gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
These pipeline stages are used in creating the StandardPipelineRenderer,
the default renderer for standard widget backends.
[createbg] => [overlays] => [iccprof] => [flipswap] => [rotate] => [output]
"""
import time
import numpy as np
from ginga import trcalc
from .base import Stage, StageError
class CreateBg(Stage):
_stagename = 'viewer-createbg'
def __init__(self, viewer):
super(CreateBg, self).__init__()
self.viewer = viewer
self.dtype = np.uint8
def run(self, prev_stage):
if prev_stage is not None:
raise StageError("'{}' in wrong location".format(self._stagename))
if self._bypass:
self.pipeline.send(res_np=None)
return
state = self.pipeline.get('state')
win_wd, win_ht = state.win_dim
# calc minimum size of pixel image we will generate
# necessary to fit the window in the desired size
# Make a square from the scaled cutout, with room to rotate
slop = 20
side = int(np.sqrt(win_wd**2 + win_ht**2) + slop)
wd = ht = side
# Find center of new array
ncx, ncy = wd // 2, ht // 2
depth = len(state.order)
# make backing image with the background color
r, g, b = self.viewer.get_bg()
res_np = trcalc.make_filled_array((ht, wd, depth), self.dtype,
state.order, r, g, b, 1.0)
self.pipeline.set(org_dim=(wd, ht), org_off=(ncx, ncy))
self.pipeline.send(res_np=res_np)
class ICCProf(Stage):
"""Convert the given RGB data from the input ICC profile
to the output ICC profile.
"""
_stagename = 'viewer-icc-profiler'
def __init__(self, viewer):
super(ICCProf, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
self.verify_2d(data)
from ginga.util import rgb_cms
working_profile = rgb_cms.working_profile
t_ = self.viewer.get_settings()
output_profile = t_.get('icc_output_profile', None)
if self._bypass or None in [working_profile, output_profile]:
self.pipeline.set(icc_output_profile=working_profile)
self.pipeline.send(res_np=data)
return
# color profiling will not work with other types
data = data.astype(np.uint8)
alpha = None
ht, wd, dp = data.shape
if dp > 3:
# color profile conversion does not handle an alpha layer
alpha = data[:, :, 3]
data = data[:, :, 0:3]
# get rest of necessary conversion parameters
to_intent = t_.get('icc_output_intent', 'perceptual')
proofprof_name = t_.get('icc_proof_profile', None)
proof_intent = t_.get('icc_proof_intent', 'perceptual')
use_black_pt = t_.get('icc_black_point_compensation', False)
try:
data = rgb_cms.convert_profile_fromto(data,
working_profile,
output_profile,
to_intent=to_intent,
proof_name=proofprof_name,
proof_intent=proof_intent,
use_black_pt=use_black_pt,
logger=self.logger)
self.logger.debug("Converted from '%s' to '%s' profile" % (
working_profile, output_profile))
except Exception as e:
self.logger.warning("Error converting output from working profile: %s" % (str(e)))
# TODO: maybe should have a traceback here
self.logger.info("Output left unprofiled")
if alpha is not None:
data = trcalc.add_alpha(data, alpha)
self.pipeline.set(icc_output_profile=output_profile)
self.pipeline.send(res_np=data)
class FlipSwap(Stage):
_stagename = 'viewer-flipswap'
def __init__(self, viewer):
super(FlipSwap, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
self.verify_2d(data)
xoff, yoff = self.pipeline.get('org_off')
if not self._bypass:
flip_x, flip_y, swap_xy = self.viewer.get_transforms()
ht, wd = data.shape[:2]
# Do transforms as necessary
data = trcalc.transform(data, flip_x=flip_x, flip_y=flip_y,
swap_xy=swap_xy)
if flip_y:
yoff = ht - yoff
if flip_x:
xoff = wd - xoff
if swap_xy:
xoff, yoff = yoff, xoff
self.pipeline.set(off=(xoff, yoff))
self.pipeline.send(res_np=data)
class Rotate(Stage):
_stagename = 'viewer-rotate'
def __init__(self, viewer):
super(Rotate, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
self.verify_2d(data)
if not self._bypass:
rot_deg = self.viewer.get_rotation()
if not np.isclose(rot_deg, 0.0):
data = np.copy(data)
#data = np.ascontiguousarray(data)
data = trcalc.rotate_clip(data, -rot_deg, out=data,
logger=self.logger)
# apply other transforms
if self.viewer._invert_y:
# Flip Y for natural Y-axis inversion between FITS coords
# and screen coords
data = np.flipud(data)
# dimensions may have changed in transformations
ht, wd = data.shape[:2]
xoff, yoff = self.pipeline.get('off')
state = self.pipeline.get('state')
ctr_x, ctr_y = state.ctr
dst_x, dst_y = ctr_x - xoff, ctr_y - (ht - yoff)
self.pipeline.set(dst=(dst_x, dst_y))
self.logger.debug("ctr=%d,%d off=%d,%d dst=%d,%d cutout=%dx%d" % (
ctr_x, ctr_y, xoff, yoff, dst_x, dst_y, wd, ht))
win_wd, win_ht = state.win_dim
self.logger.debug("win=%d,%d coverage=%d,%d" % (
win_wd, win_ht, dst_x + wd, dst_y + ht))
self.pipeline.send(res_np=data)
class Output(Stage):
_stagename = 'viewer-output'
def __init__(self, viewer):
super(Output, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
## assert (len(data.shape) == 3 and data.dtype == np.uint8 and
## data.shape[2] in [3, 4]), \
## StageError("Expecting a RGB[A] image in final stage")
self.verify_2d(data)
state = self.pipeline.get('state')
out_order = state.order
if not self._bypass:
ht, wd = data.shape[:2]
state = self.pipeline.get('state')
win_wd, win_ht = state.win_dim
if wd < win_wd or ht < win_ht:
raise StageError("pipeline output doesn't cover window")
# now cut out the size that we need
dst_x, dst_y = self.pipeline.get('dst')
if dst_x > 0 or dst_y > 0:
raise StageError("pipeline calculated dst is not correct")
x1, y1 = abs(dst_x), abs(dst_y)
data = data[y1:y1 + win_ht, x1:x1 + win_wd]
# reorder image for renderer's desired format
dst_order = self.viewer.renderer.get_rgb_order()
data = trcalc.reorder_image(dst_order, data, state.order)
data = np.ascontiguousarray(data)
out_order = dst_order
self.pipeline.set(out_order=out_order)
self.pipeline.send(res_np=data)
class Overlays(Stage):
_stagename = 'viewer-image-overlays'
def __init__(self, viewer):
super(Overlays, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
bgarr = self.pipeline.get_data(prev_stage)
self.verify_2d(bgarr)
dstarr = np.copy(bgarr)
self._rgbarr2 = dstarr
self.pipeline.set(dstarr=dstarr)
whence = self.pipeline.get('whence')
p_canvas = self.viewer.get_private_canvas()
self._overlay_images(p_canvas, whence=whence)
self.pipeline.send(res_np=dstarr)
def _overlay_images(self, canvas, whence=0.0):
if not hasattr(canvas, 'objects'):
return
for obj in canvas.get_objects():
if hasattr(obj, 'prepare_image'):
obj.prepare_image(self.viewer, whence)
elif obj.is_compound() and (obj != canvas):
self._overlay_images(obj, whence=whence)
def _common_draw(self, cvs_img, cache, whence):
# internal common drawing phase for all images
image = cvs_img.image
if image is None:
return
dstarr = self._rgbarr2
if (whence <= 0.0) or (cache.cutout is None) or (not cvs_img.optimize):
# get extent of our data coverage in the window
# TODO: get rid of padding by fixing get_draw_rect() which
# doesn't quite get the coverage right at high magnifications
pad = 1.0
pts = np.asarray(self.viewer.get_draw_rect()).T
xmin = int(np.min(pts[0])) - pad
ymin = int(np.min(pts[1])) - pad
xmax = int(np.ceil(np.max(pts[0]))) + pad
ymax = int(np.ceil(np.max(pts[1]))) + pad
# get destination location in data_coords
dst_x, dst_y = cvs_img.crdmap.to_data((cvs_img.x, cvs_img.y))
a1, b1, a2, b2 = 0, 0, cvs_img.image.width - 1, cvs_img.image.height - 1
# calculate the cutout that we can make and scale to merge
# onto the final image--by only cutting out what is necessary
# this speeds scaling greatly at zoomed in sizes
((dst_x, dst_y), (a1, b1), (a2, b2)) = \
trcalc.calc_image_merge_clip((xmin, ymin), (xmax, ymax),
(dst_x, dst_y),
(a1, b1), (a2, b2))
# is image completely off the screen?
if (a2 - a1 <= 0) or (b2 - b1 <= 0):
# no overlay needed
cache.cutout = None
return
# cutout and scale the piece appropriately by the viewer scale
scale_x, scale_y = self.viewer.get_scale_xy()
# scale additionally by our scale
_scale_x, _scale_y = (scale_x * cvs_img.scale_x,
scale_y * cvs_img.scale_y)
interp = cvs_img.interpolation
if interp is None:
t_ = self.viewer.get_settings()
interp = t_.get('interpolation', 'basic')
# previous choice might not be available if preferences
# were saved when opencv was being used (and not used now);
# if so, silently default to "basic"
if interp not in trcalc.interpolation_methods:
interp = 'basic'
res = image.get_scaled_cutout2((a1, b1), (a2, b2),
(_scale_x, _scale_y),
method=interp)
data = res.data
if cvs_img.flipy:
data = np.flipud(data)
cache.cutout = data
# calculate our offset from the pan position
pan_x, pan_y = self.viewer.get_pan()
pan_off = self.viewer.data_off
pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
off_x, off_y = dst_x - pan_x, dst_y - pan_y
# scale offset
off_x *= scale_x
off_y *= scale_y
# dst position in the pre-transformed array should be calculated
# from the center of the array plus offsets
ht, wd, dp = dstarr.shape
cvs_x = int(np.round(wd / 2.0 + off_x))
cvs_y = int(np.round(ht / 2.0 + off_y))
cache.cvs_pos = (cvs_x, cvs_y)
def _prepare_image(self, cvs_img, cache, whence):
if whence > 2.3 and cache.rgbarr is not None:
return
dstarr = self._rgbarr2
t1 = t2 = time.time()
self._common_draw(cvs_img, cache, whence)
if cache.cutout is None:
return
cache.rgbarr = cache.cutout
t2 = time.time()
state = self.pipeline.get('state')
dst_order = state.order
image_order = cvs_img.image.get_order()
# composite the image into the destination array at the
# calculated position
trcalc.overlay_image(dstarr, cache.cvs_pos, cache.rgbarr,
dst_order=dst_order, src_order=image_order,
alpha=cvs_img.alpha, fill=True, flipy=False)
cache.drawn = True
t3 = time.time()
self.logger.debug("draw: t2=%.4f t3=%.4f total=%.4f" % (
t2 - t1, t3 - t2, t3 - t1))
def _prepare_norm_image(self, cvs_img, cache, whence):
if whence > 2.3 and cache.rgbarr is not None:
return
dstarr = self._rgbarr2
t1 = t2 = t3 = t4 = time.time()
self._common_draw(cvs_img, cache, whence)
if cache.cutout is None:
return
t2 = time.time()
if cvs_img.rgbmap is not None:
rgbmap = cvs_img.rgbmap
else:
rgbmap = self.viewer.get_rgbmap()
image_order = cvs_img.image.get_order()
if (whence <= 0.0) or (not cvs_img.optimize):
# if image has an alpha channel, then strip it off and save
# it until it is recombined later with the colorized output
# this saves us having to deal with an alpha band in the
# cuts leveling and RGB mapping routines
img_arr = cache.cutout
if 'A' not in image_order:
cache.alpha = None
else:
# normalize alpha array to the final output range
mn, mx = trcalc.get_minmax_dtype(img_arr.dtype)
a_idx = image_order.index('A')
cache.alpha = (img_arr[..., a_idx] / mx *
rgbmap.maxc).astype(rgbmap.dtype)
cache.cutout = img_arr[..., 0:a_idx]
if (whence <= 1.0) or (cache.prergb is None) or (not cvs_img.optimize):
# apply visual changes prior to color mapping (cut levels, etc)
vmax = rgbmap.get_hash_size() - 1
newdata = self._apply_visuals(cvs_img, cache.cutout, 0, vmax)
# result becomes an index array fed to the RGB mapper
if not np.issubdtype(newdata.dtype, np.dtype('uint')):
newdata = newdata.astype(np.uint)
idx = newdata
self.logger.debug("shape of index is %s" % (str(idx.shape)))
cache.prergb = idx
t3 = time.time()
state = self.pipeline.get('state')
dst_order = state.order
if (whence <= 2.0) or (cache.rgbarr is None) or (not cvs_img.optimize):
# get RGB mapped array
rgbobj = rgbmap.get_rgbarray(cache.prergb, order=dst_order,
image_order=image_order)
cache.rgbarr = rgbobj.get_array(dst_order)
if cache.alpha is not None and 'A' in dst_order:
a_idx = dst_order.index('A')
cache.rgbarr[..., a_idx] = cache.alpha
t4 = time.time()
# composite the image into the destination array at the
# calculated position
trcalc.overlay_image(dstarr, cache.cvs_pos, cache.rgbarr,
dst_order=dst_order, src_order=dst_order,
alpha=cvs_img.alpha, fill=True, flipy=False)
cache.drawn = True
t5 = time.time()
self.logger.debug("draw: t2=%.4f t3=%.4f t4=%.4f t5=%.4f total=%.4f" % (
t2 - t1, t3 - t2, t4 - t3, t5 - t4, t5 - t1))
def _apply_visuals(self, cvs_img, data, vmin, vmax):
if cvs_img.autocuts is not None:
autocuts = cvs_img.autocuts
else:
autocuts = self.viewer.autocuts
# Apply cut levels
if cvs_img.cuts is not None:
loval, hival = cvs_img.cuts
else:
loval, hival = self.viewer.t_['cuts']
newdata = autocuts.cut_levels(data, loval, hival,
vmin=vmin, vmax=vmax)
return newdata
##########################
class Overlays2(Stage):
_stagename = 'viewer-image-overlays'
def __init__(self, viewer):
super(Overlays2, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
bgarr = self.pipeline.get_data(prev_stage)
self.verify_2d(bgarr)
dstarr = np.copy(bgarr)
self.pipeline.set(dstarr=dstarr)
whence = self.pipeline.get('whence')
p_canvas = self.viewer.get_private_canvas()
self._overlay_images(p_canvas, whence=whence)
self.pipeline.send(res_np=dstarr)
def _overlay_images(self, canvas, whence=0.0):
if not hasattr(canvas, 'objects'):
return
for obj in canvas.get_objects():
if hasattr(obj, 'prepare_image'):
obj.prepare_image(self.viewer, whence)
elif obj.is_compound() and (obj != canvas):
self._overlay_images(obj, whence=whence)
def _prepare_image(self, cvs_img, cache, whence):
from ginga.util import pipeline
pipe = cache.get('minipipe', None)
if pipe is None:
stages = [Clip(self.viewer),
Merge(self.viewer)]
pipe = pipeline.Pipeline(self.logger, stages)
pipe.name = 'image-overlays'
cache.minipipe = pipe
state = self.pipeline.get('state')
pipe.set(whence=whence, cvs_img=cvs_img, state=state,
dstarr=self.pipeline.get('dstarr'))
if whence <= 0:
pipe.run_from(pipe[0])
return
if not cache.visible:
return
pipe.run_from(pipe[1])
def _prepare_norm_image(self, cvs_img, cache, whence):
from ginga.util import pipeline
pipe = cache.get('minipipe', None)
if pipe is None:
stages = [Clip(self.viewer),
Cuts(self.viewer),
RGBMap(self.viewer),
Merge(self.viewer)]
pipe = pipeline.Pipeline(self.logger, stages)
pipe.name = 'image-overlays'
cache.minipipe = pipe
state = self.pipeline.get('state')
pipe.set(whence=whence, cvs_img=cvs_img, state=state,
dstarr=self.pipeline.get('dstarr'))
if whence <= 0:
pipe.run_from(pipe[0])
return
if not cache.visible:
return
elif whence <= 1:
pipe.run_from(pipe[1])
elif whence <= 2:
pipe.run_from(pipe[2])
else:
pipe.run_from(pipe[3])
class Clip(Stage):
_stagename = 'viewer-clip'
def __init__(self, viewer):
super(Clip, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
#assert prev_stage is None, StageError("'viewclip' in wrong location")
cvs_img = self.pipeline.get('cvs_img')
cache = cvs_img.get_cache(self.viewer)
image = cvs_img.get_image()
if image is None:
self.pipeline.send(res_np=None)
return
data_np = image.get_data()
self.verify_2d(data_np)
# get extent of our data coverage in the window
# TODO: get rid of padding by fixing get_draw_rect() which
# doesn't quite get the coverage right at high magnifications
pad = 1.0
pts = np.asarray(self.viewer.get_draw_rect()).T
xmin = int(np.min(pts[0])) - pad
ymin = int(np.min(pts[1])) - pad
xmax = int(np.ceil(np.max(pts[0]))) + pad
ymax = int(np.ceil(np.max(pts[1]))) + pad
# get destination location in data_coords
img = cvs_img
dst_x, dst_y = img.crdmap.to_data((img.x, img.y))
ht, wd = data_np.shape[:2]
# TODO: think we need to apply scaling factors to wd/ht
# BEFORE we calculate merge clip
a1, b1, a2, b2 = 0, 0, wd - 1, ht - 1
# calculate the cutout that we can make and scale to merge
# onto the final image--by only cutting out what is necessary
# this speeds scaling greatly at zoomed in sizes
((dst_x, dst_y), (a1, b1), (a2, b2)) = \
trcalc.calc_image_merge_clip((xmin, ymin), (xmax, ymax),
(dst_x, dst_y),
(a1, b1), (a2, b2))
# is image completely off the screen?
if (a2 - a1 <= 0) or (b2 - b1 <= 0):
# no overlay needed
self.pipeline.send(res_np=None)
cache.visible = False
self.pipeline.stop()
return
cache.visible = True
# cutout and scale the piece appropriately by the viewer scale
scale_x, scale_y = self.viewer.get_scale_xy()
# scale additionally by scale specified in canvas image
_scale_x, _scale_y = (scale_x * img.scale_x,
scale_y * img.scale_y)
interp = img.interpolation
if interp is None:
t_ = self.viewer.get_settings()
interp = t_.get('interpolation', 'basic')
if interp not in trcalc.interpolation_methods:
interp = 'basic'
data, scales = trcalc.get_scaled_cutout_basic(data_np, a1, b1, a2, b2,
_scale_x, _scale_y,
interpolation=interp,
logger=self.logger)
if img.flipy:
data = np.flipud(data)
# calculate our offset from the pan position
pan_x, pan_y = self.viewer.get_pan()
pan_off = self.viewer.data_off
pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
off_x, off_y = dst_x - pan_x, dst_y - pan_y
# scale offset
off_x *= scale_x
off_y *= scale_y
self.pipeline.set(offset=(off_x, off_y))
## if cvs_img.rgbmap is not None:
## rgbmap = cvs_img.rgbmap
## else:
rgbmap = self.viewer.get_rgbmap()
state = self.pipeline.get('state')
image_order = image.get_order()
## if image_order != state.order:
## # reorder image channels for pipeline
## data = trcalc.reorder_image(state.order, data, image_order)
if 'A' not in image_order:
alpha = None
else:
# if image has an alpha channel, then strip it off and save
# it until it is recombined later with the colorized output
# this saves us having to deal with an alpha band in the
# cuts leveling and RGB mapping routines
# normalize alpha array to the final output range
mn, mx = trcalc.get_minmax_dtype(data.dtype)
a_idx = image_order.index('A')
alpha = (data[..., a_idx] / mx *
rgbmap.maxc).astype(rgbmap.dtype)
data = data[..., 0:a_idx]
ht, wd, dp = data.shape
if dp == 1:
data = data.reshape((ht, wd))
self.pipeline.set(alpha=alpha)
self.pipeline.send(res_np=data)
class Merge(Stage):
_stagename = 'viewer-merge-overlay'
def __init__(self, viewer):
super(Merge, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
rgbarr = self.pipeline.get_data(prev_stage)
if rgbarr is None:
# nothing to merge
return
self.verify_2d(rgbarr)
cvs_img = self.pipeline.get('cvs_img')
off_x, off_y = self.pipeline.get('offset')
dstarr = self.pipeline.get('dstarr')
state = self.pipeline.get('state')
# dst position in the pre-transformed array should be calculated
# from the center of the array plus offsets
ht, wd, dp = dstarr.shape
cvs_x = int(np.round(wd / 2.0 + off_x))
cvs_y = int(np.round(ht / 2.0 + off_y))
cvs_pos = (cvs_x, cvs_y)
dst_order = state.order
image_order = state.order
## alpha = self.pipeline.get('alpha')
## if alpha is not None:
## rgbarr[..., -1] = alpha
# composite the image into the destination array at the
# calculated position
trcalc.overlay_image(dstarr, cvs_pos, rgbarr,
dst_order=dst_order, src_order=image_order,
# NOTE: these actually not used because rgbarr
# contains an alpha channel
alpha=cvs_img.alpha, fill=True,
flipy=False) # cvs_img.flipy
cache = cvs_img.get_cache(self.viewer)
cache.drawn = True
#self.pipeline.send(res_np=None)
class Cuts(Stage):
_stagename = 'viewer-cut-levels'
def __init__(self, viewer):
super(Cuts, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
if data is None:
self.pipeline.send(res_np=None)
return
self.verify_2d(data)
cvs_img = self.pipeline.get('cvs_img')
if cvs_img.rgbmap is not None:
rgbmap = cvs_img.rgbmap
else:
rgbmap = self.viewer.get_rgbmap()
vmin = 0
vmax = rgbmap.get_hash_size() - 1
if cvs_img.autocuts is not None:
autocuts = cvs_img.autocuts
else:
autocuts = self.viewer.autocuts
# Apply cut levels
if cvs_img.cuts is not None:
loval, hival = cvs_img.cuts
else:
loval, hival = self.viewer.t_['cuts']
res_np = autocuts.cut_levels(data, loval, hival,
vmin=vmin, vmax=vmax)
# NOTE: optimization to prevent multiple coercions in
# RGBMap
if not np.issubdtype(res_np.dtype, np.uint):
res_np = res_np.astype(np.uint)
self.pipeline.send(res_np=res_np)
class RGBMap(Stage):
_stagename = 'viewer-rgb-mapper'
def __init__(self, viewer):
super(RGBMap, self).__init__()
self.viewer = viewer
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
if data is None:
self.pipeline.send(res_np=None)
return
self.verify_2d(data)
cvs_img = self.pipeline.get('cvs_img')
state = self.pipeline.get('state')
if cvs_img.rgbmap is not None:
rgbmap = cvs_img.rgbmap
else:
rgbmap = self.viewer.get_rgbmap()
# See NOTE in Cuts
## if not np.issubdtype(data.dtype, np.uint):
## data = data.astype(np.uint)
# get RGB mapped array
image_order = trcalc.guess_order(data.shape)
rgbobj = rgbmap.get_rgbarray(data, order=state.order,
image_order=image_order)
res_np = rgbobj.get_array(state.order)
alpha = self.pipeline.get('alpha')
if alpha is not None:
res_np[..., -1] = alpha
self.pipeline.send(res_np=res_np)
|
|
############################################
#
# Author: Luca Cinquini
#
############################################
"""
Abstract
--------
The wps module of the OWSlib package provides client-side functionality for executing invocations to a remote Web Processing Server.
Disclaimer
----------
PLEASE NOTE: the owslib wps module should be considered in beta state: it has been tested versus only a handful of WPS services (deployed by the USGS, BADC and PML).
More extensive testing is needed and feedback is appreciated.
Usage
-----
The module can be used to execute three types of requests versus a remote WPS endpoint:
a) "GetCapabilities"
- use the method wps.getcapabilities(xml=None)
- the optional keyword argument "xml" may be used to avoid a real live request, and instead read the WPS capabilities document from a cached XML file
b) "DescribeProcess"
- use the method wps.describeprocess(identifier, xml=None)
- identifier is the process identifier, retrieved from the list obtained from a previous "GetCapabilities" invocation
- the optional keyword argument "xml" may be used to avoid a real live request, and instead read the WPS process description document from a cached XML file
c) "Execute"
- use the method wps.execute(identifier, inputs, output=None, request=None, response=None),
which submits the job to the remote WPS server and returns a WPSExecution object that can be used to periodically check the job status until completion
(or error)
- the optional keyword argument "request" may be used to avoid re-building the request XML from input arguments, and instead submit a request from a
pre-made XML file
- alternatively, an "Execute" request can be built from input arguments by supplying the "identifier", "inputs" and "output" arguments to the execute() method.
- "identifier" is the mandatory process identifier
- "inputs" is a dictionary of (key,value) pairs where:
- key is a named input parameter
- value is either a string, or any python object that supports a getXml() method
In particular, a few classes are included in the package to support a FeatuteCollection input:
- "WFSFeatureCollection" can be used in conjunction with "WFSQuery" to define a FEATURE_COLLECTION retrieved from a live WFS server.
- "GMLMultiPolygonFeatureCollection" can be used to define one or more polygons of (latitude, longitude) points.
- "output" is an optional output identifier to be included in the ResponseForm section of the request.
- the optional keyword argument "response" mey be used to avoid submitting a real live request, and instead reading the WPS execution response document
from a cached XML file (for debugging or testing purposes)
- the convenience module function monitorExecution() can be used to periodically check the status of a remote running job, and eventually download the output
either to a named file, or to a file specified by the server.
Examples
--------
The files examples/wps-usgs-script.py, examples/wps-pml-script-1.py and examples/wps-pml-script-2.py contain real-world usage examples
that submits a "GetCapabilities", "DescribeProcess" and "Execute" requests to the live USGS and PML servers. To run:
cd examples
python wps-usgs-script.py
python wps-pml-script-1.py
python wps-pml-script-2.py
The file wps-client.py contains a command-line client that can be used to submit a "GetCapabilities", "DescribeProcess" or "Execute"
request to an arbitratry WPS server. For example, you can run it as follows:
cd examples
To prints out usage and example invocations: wps-client -help
To execute a (fake) WPS invocation:
wps-client.py -v -u http://cida.usgs.gov/climate/gdp/process/WebProcessingService -r GetCapabilities -x ../tests/USGSCapabilities.xml
The directory tests/ includes several doctest-style files wps_*.txt that show how to interactively submit a
"GetCapabilities", "DescribeProcess" or "Execute" request, without making a live request but rather parsing the response of cached XML response documents. To run:
cd tests
python -m doctest wps_*.txt
(or python -m doctest -v wps_*.txt for verbose output)
Also, the directory tests/ contains several examples of well-formed "Execute" requests:
- The files wps_USGSExecuteRequest*.xml contain requests that can be submitted to the live USGS WPS service.
- The files PMLExecuteRequest*.xml contain requests that can be submitted to the live PML WPS service.
"""
from __future__ import (absolute_import, division, print_function)
from bcube_owslib.etree import etree
from bcube_owslib.ows import DEFAULT_OWS_NAMESPACE, ServiceIdentification, ServiceProvider, OperationsMetadata
from time import sleep
from bcube_owslib.util import (testXMLValue, build_get_url, dump, getTypedValue,
getNamespace, element_to_string, nspath, openURL, nspath_eval, log)
from xml.dom.minidom import parseString
from bcube_owslib.namespaces import Namespaces
# namespace definition
n = Namespaces()
# These static namespaces are DEPRECIATED. Please don't use them.
# No great way of printing a message since there are at the file level
WPS_DEFAULT_NAMESPACE = n.get_namespace("wps")
WFS_NAMESPACE = n.get_namespace("wfs")
OGC_NAMESPACE = n.get_namespace("ogc")
GML_NAMESPACE = n.get_namespace("gml")
DRAW_NAMESPACE = n.get_namespace("draw")
GML_SCHEMA_LOCATION = "http://schemas.opengis.net/gml/3.1.1/base/feature.xsd"
DRAW_SCHEMA_LOCATION = 'http://cida.usgs.gov/climate/derivative/xsd/draw.xsd'
WPS_DEFAULT_SCHEMA_LOCATION = 'http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd'
WPS_DEFAULT_VERSION = '1.0.0'
def get_namespaces():
ns = n.get_namespaces(["ogc","wfs","wps","gml","xsi","xlink"])
ns[None] = n.get_namespace("wps")
ns["ows"] = DEFAULT_OWS_NAMESPACE
return ns
namespaces = get_namespaces()
class IWebProcessingService():
"""
Abstract interface for an OGC Web Processing Service (WPS).
"""
url = property("""URL for the remote WPS server (string).""")
def getcapabilities(**kw):
"""
Makes a GetCapabilities request to the remote WPS server,
returns an XML document wrapped in a python file-like object.
"""
def describeprocess(**kw):
"""
Makes a DescribeProcess request to the remote WPS server,
returns a Process object containing all the process metadata.
"""
def execute(**kw):
"""
Submits an Execute request to the remote WPS server,
returns a WPSExecution object, which can be used to monitor the status of the job, and ultimately retrieve the result.
"""
class IComplexData():
"""
Abstract interface representing complex input object for a WPS request.
"""
def getXml(self):
"""
Method that returns the object data as an XML snippet,
to be inserted into the WPS request document sent to the server.
"""
class WebProcessingService(object):
"""
Class that contains client-side functionality for invoking an OGC Web Processing Service (WPS).
Implements IWebProcessingService.
"""
def __init__(self, url, version=WPS_DEFAULT_VERSION, username=None, password=None, verbose=False, skip_caps=False):
"""
Initialization method resets the object status.
By default it will execute a GetCapabilities invocation to the remote service,
which can be skipped by using skip_caps=True.
"""
# fields passed in from object initializer
self.url = url
self.username = username
self.password = password
self.version = version
self.verbose = verbose
# fields populated by method invocations
self._capabilities = None
self.identification = None
self.provider = None
self.operations=[]
self.processes=[]
if not skip_caps:
self.getcapabilities()
def getcapabilities(self, xml=None):
"""
Method that requests a capabilities document from the remote WPS server and populates this object's metadata.
keyword argument xml: local XML GetCapabilities document, prevents actual HTTP invocation.
"""
# read capabilities document
reader = WPSCapabilitiesReader(version=self.version, verbose=self.verbose)
if xml:
# read from stored XML file
self._capabilities = reader.readFromString(xml)
else:
self._capabilities = reader.readFromUrl(self.url, username=self.username, password=self.password)
log.debug(element_to_string(self._capabilities))
# populate the capabilities metadata obects from the XML tree
self._parseCapabilitiesMetadata(self._capabilities)
def describeprocess(self, identifier, xml=None):
"""
Requests a process document from a WPS service and populates the process metadata.
Returns the process object.
"""
# read capabilities document
reader = WPSDescribeProcessReader(version=self.version, verbose=self.verbose)
if xml:
# read from stored XML file
rootElement = reader.readFromString(xml)
else:
# read from server
rootElement = reader.readFromUrl(self.url, identifier)
log.info(element_to_string(rootElement))
# build metadata objects
return self._parseProcessMetadata(rootElement)
def execute(self, identifier, inputs, output=None, request=None, response=None):
"""
Submits a WPS process execution request.
Returns a WPSExecution object, which can be used to monitor the status of the job, and ultimately retrieve the result.
identifier: the requested process identifier
inputs: list of process inputs as (key, value) tuples (where value is either a string for LiteralData, or an object for ComplexData)
output: optional identifier for process output reference (if not provided, output will be embedded in the response)
request: optional pre-built XML request document, prevents building of request from other arguments
response: optional pre-built XML response document, prevents submission of request to live WPS server
"""
# instantiate a WPSExecution object
log.info('Executing WPS request...')
execution = WPSExecution(version=self.version, url=self.url, username=self.username, password=self.password, verbose=self.verbose)
# build XML request from parameters
if request is None:
requestElement = execution.buildRequest(identifier, inputs, output)
request = etree.tostring( requestElement )
execution.request = request
log.debug(request)
# submit the request to the live server
if response is None:
response = execution.submitRequest(request)
else:
response = etree.fromstring(response)
log.debug(etree.tostring(response))
# parse response
execution.parseResponse(response)
return execution
def _parseProcessMetadata(self, rootElement):
"""
Method to parse a <ProcessDescriptions> XML element and returned the constructed Process object
"""
processDescriptionElement = rootElement.find( 'ProcessDescription' )
process = Process(processDescriptionElement, verbose=self.verbose)
# override existing processes in object metadata, if existing already
found = False
for n, p in enumerate(self.processes):
if p.identifier==process.identifier:
self.processes[n]=process
found = True
# otherwise add it
if not found:
self.processes.append(process)
return process
def _parseCapabilitiesMetadata(self, root):
''' Sets up capabilities metadata objects '''
# use the WPS namespace defined in the document root
wpsns = getNamespace(root)
# loop over children WITHOUT requiring a specific namespace
for element in root:
# thie element's namespace
ns = getNamespace(element)
# <ows:ServiceIdentification> metadata
if element.tag.endswith('ServiceIdentification'):
self.identification=ServiceIdentification(element, namespace=ns)
if self.verbose==True:
dump(self.identification)
# <ows:ServiceProvider> metadata
elif element.tag.endswith('ServiceProvider'):
self.provider=ServiceProvider(element, namespace=ns)
if self.verbose==True:
dump(self.provider)
# <ns0:OperationsMetadata xmlns:ns0="http://www.opengeospatial.net/ows">
# <ns0:Operation name="GetCapabilities">
# <ns0:DCP>
# <ns0:HTTP>
# <ns0:Get xlink:href="http://ceda-wps2.badc.rl.ac.uk/wps?" xmlns:xlink="http://www.w3.org/1999/xlink" />
# </ns0:HTTP>
# </ns0:DCP>
# </ns0:Operation>
# ........
# </ns0:OperationsMetadata>
elif element.tag.endswith('OperationsMetadata'):
for child in element.findall( nspath('Operation', ns=ns) ):
self.operations.append( OperationsMetadata(child, namespace=ns) )
if self.verbose==True:
dump(self.operations[-1])
# <wps:ProcessOfferings>
# <wps:Process ns0:processVersion="1.0.0">
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles</ows:Identifier>
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles</ows:Title>
# </wps:Process>
# ......
# </wps:ProcessOfferings>
elif element.tag.endswith('ProcessOfferings'):
for child in element.findall( nspath('Process', ns=ns) ):
p = Process(child, verbose=self.verbose)
self.processes.append(p)
if self.verbose==True:
dump(self.processes[-1])
class WPSReader(object):
"""
Superclass for reading a WPS document into a lxml.etree infoset.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False):
self.version = version
self.verbose = verbose
def _readFromUrl(self, url, data, method='Get', username=None, password=None):
"""
Method to get and parse a WPS document, returning an elementtree instance.
url: WPS service base url.
data: GET: dictionary of HTTP (key, value) parameter pairs, POST: XML document to post
username, password: optional user credentials
"""
if method == 'Get':
# full HTTP request url
request_url = build_get_url(url, data)
log.debug(request_url)
# split URL into base url and query string to use utility function
spliturl=request_url.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username=username, password=password)
return etree.fromstring(u.read())
elif method == 'Post':
u = openURL(url, data, method='Post', username = username, password = password)
return etree.fromstring(u.read())
else:
raise Exception("Unrecognized HTTP method: %s" % method)
def readFromString(self, string):
"""
Method to read a WPS GetCapabilities document from an XML string.
"""
if not isinstance(string, str):
raise ValueError("Input must be of type string, not %s" % type(string))
return etree.fromstring(string)
class WPSCapabilitiesReader(WPSReader):
"""
Utility class that reads and parses a WPS GetCapabilities document into a lxml.etree infoset.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False):
# superclass initializer
super(WPSCapabilitiesReader,self).__init__(version=version, verbose=verbose)
def readFromUrl(self, url, username=None, password=None):
"""
Method to get and parse a WPS capabilities document, returning an elementtree instance.
url: WPS service base url, to which is appended the HTTP parameters: service, version, and request.
username, password: optional user credentials
"""
return self._readFromUrl(url,
{'service':'WPS', 'request':'GetCapabilities', 'version':self.version},
username=username, password=password)
class WPSDescribeProcessReader(WPSReader):
"""
Class that reads and parses a WPS DescribeProcess document into a etree infoset
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False):
# superclass initializer
super(WPSDescribeProcessReader,self).__init__(version=version, verbose=verbose)
def readFromUrl(self, url, identifier, username=None, password=None):
"""
Reads a WPS DescribeProcess document from a remote service and returns the XML etree object
url: WPS service base url, to which is appended the HTTP parameters: 'service', 'version', and 'request', and 'identifier'.
"""
return self._readFromUrl(url,
{'service':'WPS', 'request':'DescribeProcess', 'version':self.version, 'identifier':identifier},
username=username, password=password)
class WPSExecuteReader(WPSReader):
"""
Class that reads and parses a WPS Execute response document into a etree infoset
"""
def __init__(self, verbose=False):
# superclass initializer
super(WPSExecuteReader,self).__init__(verbose=verbose)
def readFromUrl(self, url, data={}, method='Get', username=None, password=None):
"""
Reads a WPS status document from a remote service and returns the XML etree object.
url: the URL to submit the GET/POST request to.
"""
return self._readFromUrl(url, data, method, username=username, password=password)
class WPSExecution():
"""
Class that represents a single WPS process executed on a remote WPS service.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, url=None, username=None, password=None, verbose=False):
# initialize fields
self.url = url
self.version = version
self.username = username
self.password = password
self.verbose = verbose
# request document
self.request = None
# last response document
self.response = None
# status fields retrieved from the response documents
self.process = None
self.serviceInstance = None
self.status = None
self.percentCompleted = 0
self.statusMessage = None
self.errors = []
self.statusLocation = None
self.dataInputs=[]
self.processOutputs=[]
def buildRequest(self, identifier, inputs=[], output=None):
"""
Method to build a WPS process request.
identifier: the requested process identifier
inputs: array of input arguments for the process.
- LiteralData inputs are expressed as simple (key,value) tuples where key is the input identifier, value is the value
- ComplexData inputs are express as (key, object) tuples, where key is the input identifier,
and the object must contain a 'getXml()' method that returns an XML infoset to be included in the WPS request
output: optional identifier if process output is to be returned as a hyperlink reference
"""
#<wps:Execute xmlns:wps="http://www.opengis.net/wps/1.0.0"
# xmlns:ows="http://www.opengis.net/ows/1.1"
# xmlns:xlink="http://www.w3.org/1999/xlink"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# service="WPS"
# version="1.0.0"
# xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd">
root = etree.Element(nspath_eval('wps:Execute', namespaces))
root.set('service', 'WPS')
root.set('version', WPS_DEFAULT_VERSION)
root.set(nspath_eval('xsi:schemaLocation', namespaces), '%s %s' % (namespaces['wps'], WPS_DEFAULT_SCHEMA_LOCATION) )
# <ows:Identifier>gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm</ows:Identifier>
identifierElement = etree.SubElement(root, nspath_eval('ows:Identifier', namespaces))
identifierElement.text = identifier
# <wps:DataInputs>
dataInputsElement = etree.SubElement(root, nspath_eval('wps:DataInputs', namespaces))
for (key,val) in inputs:
inputElement = etree.SubElement(dataInputsElement, nspath_eval('wps:Input', namespaces))
identifierElement = etree.SubElement(inputElement, nspath_eval('ows:Identifier', namespaces))
identifierElement.text = key
# Literal data
# <wps:Input>
# <ows:Identifier>DATASET_URI</ows:Identifier>
# <wps:Data>
# <wps:LiteralData>dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/conus_grid.w_meta.ncml</wps:LiteralData>
# </wps:Data>
# </wps:Input>
if isinstance(val, str):
dataElement = etree.SubElement(inputElement, nspath_eval('wps:Data', namespaces))
literalDataElement = etree.SubElement(dataElement, nspath_eval('wps:LiteralData', namespaces))
literalDataElement.text = val
# Complex data
# <wps:Input>
# <ows:Identifier>FEATURE_COLLECTION</ows:Identifier>
# <wps:Reference xlink:href="http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs">
# <wps:Body>
# <wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" service="WFS" version="1.1.0" outputFormat="text/xml; subtype=gml/3.1.1" xsi:schemaLocation="http://www.opengis.net/wfs ../wfs/1.1.0/WFS.xsd">
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
# </wfs:GetFeature>
# </wps:Body>
# </wps:Reference>
# </wps:Input>
else:
inputElement.append( val.getXml() )
# <wps:ResponseForm>
# <wps:ResponseDocument storeExecuteResponse="true" status="true">
# <wps:Output asReference="true">
# <ows:Identifier>OUTPUT</ows:Identifier>
# </wps:Output>
# </wps:ResponseDocument>
# </wps:ResponseForm>
if output is not None:
responseFormElement = etree.SubElement(root, nspath_eval('wps:ResponseForm', namespaces))
responseDocumentElement = etree.SubElement(responseFormElement, nspath_eval('wps:ResponseDocument', namespaces),
attrib={'storeExecuteResponse':'true', 'status':'true'} )
if isinstance(output, str):
self._add_output(responseDocumentElement, output, asReference=True)
elif isinstance(output, list):
for (identifier,as_reference) in output:
self._add_output(responseDocumentElement, identifier, asReference=as_reference)
else:
raise Exception('output parameter is neither string nor list. output=%s' % output)
return root
def _add_output(self, element, identifier, asReference=False):
outputElement = etree.SubElement(element, nspath_eval('wps:Output', namespaces),
attrib={'asReference':str(asReference).lower()} )
outputIdentifierElement = etree.SubElement(outputElement, nspath_eval('ows:Identifier', namespaces)).text = identifier
# wait for 60 seconds by default
def checkStatus(self, url=None, response=None, sleepSecs=60):
"""
Method to check the status of a job execution.
In the process, this method will upadte the object 'response' attribute.
url: optional 'statusLocation' URL retrieved from a previous WPS Execute response document.
If not provided, the current 'statusLocation' URL will be used.
sleepSecs: number of seconds to sleep before returning control to the caller.
"""
reader = WPSExecuteReader(verbose=self.verbose)
if response is None:
# override status location
if url is not None:
self.statusLocation = url
log.info('\nChecking execution status... (location=%s)' % self.statusLocation)
response = reader.readFromUrl(self.statusLocation, username=self.username, password=self.password)
else:
response = reader.readFromString(response)
# store latest response
self.response = etree.tostring(response)
log.debug(self.response)
self.parseResponse(response)
# sleep given number of seconds
if self.isComplete()==False:
log.info('Sleeping %d seconds...' % sleepSecs)
sleep(sleepSecs)
def getStatus(self):
return self.status
def isComplete(self):
if (self.status=='ProcessSucceeded' or self.status=='ProcessFailed' or self.status=='Exception'):
return True
elif (self.status=='ProcessStarted'):
return False
elif (self.status=='ProcessAccepted' or self.status=='ProcessPaused'):
return False
else:
raise Exception('Unknown process execution status: %s' % self.status)
def isSucceded(self):
if self.status=='ProcessSucceeded':
return True
else:
return False
def isNotComplete(self):
return not self.isComplete()
def getOutput(self, filepath=None):
"""
Method to write the outputs of a WPS process to a file:
either retrieves the referenced files from the server, or writes out the content of response embedded output.
filepath: optional path to the output file, otherwise a file will be created in the local directory with the name assigned by the server,
or default name 'wps.out' for embedded output.
"""
if self.isSucceded():
content = ''
for output in self.processOutputs:
output_content = output.retrieveData(self.username, self.password)
# ExecuteResponse contains reference to server-side output
if output_content is not "":
content = content + output_content
if filepath is None:
filepath = output.fileName
# ExecuteResponse contain embedded output
if len(output.data)>0:
if filepath is None:
filepath = 'wps.out'
for data in output.data:
content = content + data
# write out content
if content is not '':
out = open(filepath, 'wb')
out.write(content)
out.close()
log.info('Output written to file: %s' %filepath)
else:
raise Exception("Execution not successfully completed: status=%s" % self.status)
def submitRequest(self, request):
"""
Submits a WPS Execute document to a remote service, returns the XML response document from the server.
This method will save the request document and the first returned response document.
request: the XML request document to be submitted as POST to the server.
"""
self.request = request
reader = WPSExecuteReader(verbose=self.verbose)
response = reader.readFromUrl(self.url, request, method='Post', username=self.username, password=self.password)
self.response = response
return response
'''
if response is None:
# override status location
if url is not None:
self.statusLocation = url
else:
response = reader.readFromString(response)
'''
def parseResponse(self, response):
"""
Method to parse a WPS response document
"""
rootTag = response.tag.split('}')[1]
# <ns0:ExecuteResponse>
if rootTag == 'ExecuteResponse':
self._parseExecuteResponse(response)
# <ows:ExceptionReport>
elif rootTag == 'ExceptionReport':
self._parseExceptionReport(response)
else:
log.debug('Unknown Response')
# log status, errors
log.info('Execution status=%s' % self.status)
log.info('Percent completed=%s' % self.percentCompleted)
log.info('Status message=%s' % self.statusMessage)
for error in self.errors:
dump(error)
def _parseExceptionReport(self, root):
"""
Method to parse a WPS ExceptionReport document and populate this object's metadata.
"""
# set exception status, unless set already
if self.status is None:
self.status = "Exception"
for exceptionEl in root.findall( nspath('Exception', ns=namespaces['ows']) ):
self.errors.append( WPSException(exceptionEl) )
def _parseExecuteResponse(self, root):
"""
Method to parse a WPS ExecuteResponse response document and populate this object's metadata.
"""
# retrieve WPS namespace directly from root element
wpsns = getNamespace(root)
self.serviceInstance = root.get( 'serviceInstance' )
self.statusLocation = root.get( 'statusLocation' )
# <ns0:Status creationTime="2011-11-09T14:19:50Z">
# <ns0:ProcessSucceeded>PyWPS Process v.net.path successfully calculated</ns0:ProcessSucceeded>
# </ns0:Status>
# OR
# <ns0:Status creationTime="2011-11-07T08:26:44.359-06:00">
# <ns0:ProcessFailed>
# <ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1">
# <ows:Exception>
# <ows:ExceptionText>Attribute null not found in feature collection</ows:ExceptionText>
# </ows:Exception>
# </ows:ExceptionReport>
# </ns0:ProcessFailed>
# </ns0:Status>
statusEl = root.find( nspath('Status/*', ns=wpsns) )
self.status = statusEl.tag.split('}')[1]
# get progress info
try:
percentCompleted = int(statusEl.get('percentCompleted'))
self.percentCompleted = percentCompleted
except:
pass
# get status message
self.statusMessage = statusEl.text
# exceptions ?
for element in statusEl:
if element.tag.endswith('ExceptionReport'):
self._parseExceptionReport(element)
self.process = Process(root.find(nspath('Process', ns=wpsns)), verbose=self.verbose)
#<wps:DataInputs xmlns:wps="http://www.opengis.net/wps/1.0.0"
# xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xlink="http://www.w3.org/1999/xlink">
for inputElement in root.findall( nspath('DataInputs/Input', ns=wpsns) ):
self.dataInputs.append( Input(inputElement) )
if self.verbose==True:
dump(self.dataInputs[-1])
# <ns:ProcessOutputs>
# xmlns:ns="http://www.opengis.net/wps/1.0.0"
for outputElement in root.findall( nspath('ProcessOutputs/Output', ns=wpsns) ):
self.processOutputs.append( Output(outputElement) )
if self.verbose==True:
dump(self.processOutputs[-1])
class ComplexData(object):
"""
Class that represents a ComplexData element in a WPS document
"""
def __init__(self, mimeType=None, encoding=None, schema=None):
self.mimeType = mimeType
self.encoding = encoding
self.schema = schema
class InputOutput(object):
"""
Superclass of a WPS input or output data object.
"""
def __init__(self, element):
# loop over sub-elements without requiring a specific namespace
for subElement in element:
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">SUMMARIZE_TIMESTEP</ows:Identifier>
if subElement.tag.endswith('Identifier'):
self.identifier = testXMLValue( subElement )
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">Summarize Timestep</ows:Title>
elif subElement.tag.endswith('Title'):
self.title = testXMLValue( subElement )
# <ows:Abstract xmlns:ows="http://www.opengis.net/ows/1.1">If selected, processing output will include columns with summarized statistics for all feature attribute values for each timestep</ows:Abstract>
elif subElement.tag.endswith('Abstract'):
self.abstract = testXMLValue( subElement )
self.allowedValues = []
self.supportedValues = []
self.defaultValue = None
self.dataType = None
self.anyValue = False
def _parseData(self, element):
"""
Method to parse a "Data" element
"""
# <ns0:Data>
# <ns0:ComplexData mimeType="text/plain">
# 7504912.93758151 -764109.175074507,7750849.82379226 -22141.8611641468,8561828.42371234 -897195.923493867,7724946.16844165 -602984.014261927
# </ns0:ComplexData>
# </ns0:Data>
#nspath('Data', ns=WPS_NAMESPACE)
complexDataElement = element.find( nspath('ComplexData', ns=getNamespace(element)) )
if complexDataElement is not None:
self.dataType = "ComplexData"
def _parseLiteralData(self, element, literalElementName):
"""
Method to parse the LiteralData element.
"""
# <LiteralData>
# <ows:DataType ows:reference="xs:string" xmlns:ows="http://www.opengis.net/ows/1.1" />
# <ows:AllowedValues xmlns:ows="http://www.opengis.net/ows/1.1">
# <ows:Value>COMMA</ows:Value>
# <ows:Value>TAB</ows:Value>
# <ows:Value>SPACE</ows:Value>
# </ows:AllowedValues>
# <DefaultValue>COMMA</DefaultValue>
# </LiteralData>
# <LiteralData>
# <ows:DataType ows:reference="xs:anyURI" xmlns:ows="http://www.opengis.net/ows/1.1" />
# <ows:AnyValue xmlns:ows="http://www.opengis.net/ows/1.1" />
# </LiteralData>
literalDataElement = element.find( literalElementName )
if literalDataElement is not None:
self.dataType = 'LiteralData'
for subElement in literalDataElement:
subns = getNamespace(subElement)
if subElement.tag.endswith('DataType'):
self.dataType = subElement.get( nspath("reference", ns=subns) ).split(':')[1]
elif subElement.tag.endswith('AllowedValues'):
for value in subElement.findall( nspath('Value', ns=subns) ):
self.allowedValues.append( getTypedValue(self.dataType, value.text) )
elif subElement.tag.endswith('DefaultValue'):
self.defaultValue = getTypedValue(self.dataType, subElement.text)
elif subElement.tag.endswith('AnyValue'):
self.anyValue = True
def _parseComplexData(self, element, complexDataElementName):
"""
Method to parse a ComplexData or ComplexOutput element.
"""
# <ComplexData>
# <Default>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.0.0/feature.xsd</Schema>
# </Format>
# </Default>
# <Supported>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.0.0/feature.xsd</Schema>
# </Format>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.1.1/feature.xsd</Schema>
# </Format>
# </Supported>
# </ComplexData>
# OR
# <ComplexOutput defaultEncoding="UTF-8" defaultFormat="text/XML" defaultSchema="NONE">
# <SupportedComplexData>
# <Format>text/XML</Format>
# <Encoding>UTF-8</Encoding>
# <Schema>NONE</Schema>
# </SupportedComplexData>
# </ComplexOutput>
complexDataElement = element.find( complexDataElementName )
if complexDataElement is not None:
self.dataType = "ComplexData"
for supportedComlexDataElement in complexDataElement.findall( 'SupportedComplexData' ):
self.supportedValues.append( ComplexData( mimeType=testXMLValue( supportedComlexDataElement.find( 'Format' ) ),
encoding=testXMLValue( supportedComlexDataElement.find( 'Encoding' ) ),
schema=testXMLValue( supportedComlexDataElement.find( 'Schema' ) )
)
)
for formatElement in complexDataElement.findall( 'Supported/Format'):
self.supportedValues.append( ComplexData( mimeType=testXMLValue( formatElement.find( 'MimeType' ) ),
encoding=testXMLValue( formatElement.find( 'Encoding' ) ),
schema=testXMLValue( formatElement.find( 'Schema' ) )
)
)
defaultFormatElement = complexDataElement.find( 'Default/Format' )
if defaultFormatElement is not None:
self.defaultValue = ComplexData( mimeType=testXMLValue( defaultFormatElement.find( 'MimeType' ) ),
encoding=testXMLValue( defaultFormatElement.find( 'Encoding' ) ),
schema=testXMLValue( defaultFormatElement.find( 'Schema' ) )
)
class Input(InputOutput):
"""
Class that represents a WPS process input.
"""
def __init__(self, inputElement):
# superclass initializer
super(Input,self).__init__(inputElement)
# <Input maxOccurs="1" minOccurs="0">
# OR
# <MinimumOccurs>1</MinimumOccurs>
self.minOccurs = -1
if inputElement.get("minOccurs") is not None:
self.minOccurs = int( inputElement.get("minOccurs") )
if inputElement.find('MinimumOccurs') is not None:
self.minOccurs = int( testXMLValue( inputElement.find('MinimumOccurs') ) )
self.maxOccurs = -1
if inputElement.get("maxOccurs") is not None:
self.maxOccurs = int( inputElement.get("maxOccurs") )
if inputElement.find('MaximumOccurs') is not None:
self.maxOccurs = int( testXMLValue( inputElement.find('MaximumOccurs') ) )
# <LiteralData>
self._parseLiteralData(inputElement, 'LiteralData')
# <ComplexData>
self._parseComplexData(inputElement, 'ComplexData')
class Output(InputOutput):
"""
Class that represents a WPS process output.
"""
def __init__(self, outputElement):
# superclass initializer
super(Output,self).__init__(outputElement)
self.reference = None
self.mimeType = None
self.data = []
self.fileName = None
self.filePath = None
# extract wps namespace from outputElement itself
wpsns = getNamespace(outputElement)
# <ns:Reference encoding="UTF-8" mimeType="text/csv"
# href="http://cida.usgs.gov/climate/gdp/process/RetrieveResultServlet?id=1318528582026OUTPUT.601bb3d0-547f-4eab-8642-7c7d2834459e" />
referenceElement = outputElement.find( nspath('Reference', ns=wpsns) )
if referenceElement is not None:
self.reference = referenceElement.get('href')
self.mimeType = referenceElement.get('mimeType')
# <LiteralOutput>
self._parseLiteralData(outputElement, 'LiteralOutput')
# <ComplexData> or <ComplexOutput>
self._parseComplexData(outputElement, 'ComplexOutput')
# <Data>
# <ns0:Data>
# <ns0:ComplexData mimeType="text/plain">
# 7504912.93758151 -764109.175074507,7750849.82379226 -22141.8611641468,8561828.42371234 -897195.923493867,7724946.16844165 -602984.014261927
# </ns0:ComplexData>
# </ns0:Data>
# OR:
# <ns0:Data>
# <ns0:ComplexData encoding="UTF-8" mimeType="text/xml" schema="http://schemas.opengis.net/gml/2.1.2/feature.xsd">
# <ns3:FeatureCollection xsi:schemaLocation="http://ogr.maptools.org/ output_0n7ij9D.xsd" xmlns:ns3="http://ogr.maptools.org/">
# <gml:boundedBy xmlns:gml="http://www.opengis.net/gml">
# <gml:Box>
# <gml:coord><gml:X>-960123.1421801626</gml:X><gml:Y>4665723.56559387</gml:Y></gml:coord>
# <gml:coord><gml:X>-101288.6510608822</gml:X><gml:Y>5108200.011823481</gml:Y></gml:coord>
# </gml:Box>
# </gml:boundedBy>
# <gml:featureMember xmlns:gml="http://www.opengis.net/gml">
# <ns3:output fid="F0">
# <ns3:geometryProperty><gml:LineString><gml:coordinates>-960123.142180162365548,4665723.565593870356679,0 -960123.142180162365548,4665723.565593870356679,0 -960123.142180162598379,4665723.565593870356679,0 -960123.142180162598379,4665723.565593870356679,0 -711230.141176006174646,4710278.48552671354264,0 -711230.141176006174646,4710278.48552671354264,0 -623656.677859728806652,4848552.374973464757204,0 -623656.677859728806652,4848552.374973464757204,0 -410100.337491964863148,4923834.82589447684586,0 -410100.337491964863148,4923834.82589447684586,0 -101288.651060882242746,5108200.011823480948806,0 -101288.651060882242746,5108200.011823480948806,0 -101288.651060882257298,5108200.011823480948806,0 -101288.651060882257298,5108200.011823480948806,0</gml:coordinates></gml:LineString></ns3:geometryProperty>
# <ns3:cat>1</ns3:cat>
# <ns3:id>1</ns3:id>
# <ns3:fcat>0</ns3:fcat>
# <ns3:tcat>0</ns3:tcat>
# <ns3:sp>0</ns3:sp>
# <ns3:cost>1002619.181</ns3:cost>
# <ns3:fdist>0</ns3:fdist>
# <ns3:tdist>0</ns3:tdist>
# </ns3:output>
# </gml:featureMember>
# </ns3:FeatureCollection>
# </ns0:ComplexData>
# </ns0:Data>
dataElement = outputElement.find( nspath('Data', ns=wpsns) )
if dataElement is not None:
complexDataElement = dataElement.find( nspath('ComplexData', ns=wpsns) )
if complexDataElement is not None:
self.dataType = "ComplexData"
self.mimeType = complexDataElement.get('mimeType')
if complexDataElement.text is not None and complexDataElement.text.strip() is not '':
self.data.append(complexDataElement.text.strip())
for child in complexDataElement:
self.data.append(etree.tostring(child))
literalDataElement = dataElement.find( nspath('LiteralData', ns=wpsns) )
if literalDataElement is not None:
self.dataType = literalDataElement.get('dataType')
if literalDataElement.text is not None and literalDataElement.text.strip() is not '':
self.data.append(literalDataElement.text.strip())
def retrieveData(self, username=None, password=None):
"""
Method to retrieve data from server-side reference:
returns "" if the reference is not known.
username, password: credentials to access the remote WPS server
"""
url = self.reference
if url is None:
return ""
# a) 'http://cida.usgs.gov/climate/gdp/process/RetrieveResultServlet?id=1318528582026OUTPUT.601bb3d0-547f-4eab-8642-7c7d2834459e'
# b) 'http://rsg.pml.ac.uk/wps/wpsoutputs/outputImage-11294Bd6l2a.tif'
log.info('Output URL=%s' % url)
if '?' in url:
spliturl=url.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username = username, password = password)
# extract output filepath from URL query string
self.fileName = spliturl[1].split('=')[1]
else:
u = openURL(url, '', method='Get', username = username, password = password)
# extract output filepath from base URL
self.fileName = url.split('/')[-1]
return u.read()
def writeToDisk(self, path=None, username=None, password=None):
"""
Method to write an output of a WPS process to disk:
it either retrieves the referenced file from the server, or write out the content of response embedded output.
filepath: optional path to the output file, otherwise a file will be created in the local directory with the name assigned by the server,
username, password: credentials to access the remote WPS server
"""
# Check if ExecuteResponse contains reference to server-side output
content = self.retrieveData(username, password)
# ExecuteResponse contain embedded output
if content is "" and len(self.data)>0:
self.fileName = self.identifier
for data in self.data:
content = content + data
# write out content
if content is not "":
if self.fileName == "":
self.fileName = self.identifier
self.filePath = path + self.fileName
out = open(self.filePath, 'wb')
out.write(content)
out.close()
log.info('Output written to file: %s' %self.filePath)
class WPSException:
"""
Class representing an exception raised by a WPS.
"""
def __init__(self, root):
self.code = root.attrib.get("exceptionCode", None)
self.locator = root.attrib.get("locator", None)
textEl = root.find( nspath('ExceptionText', ns=getNamespace(root)) )
if textEl is not None:
self.text = textEl.text
else:
self.text = ""
class Process(object):
"""
Class that represents a WPS process.
"""
def __init__(self, elem, verbose=False):
""" Initialization method extracts all available metadata from an XML document (passed in as etree object) """
# <ns0:ProcessDescriptions service="WPS" version="1.0.0"
# xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsDescribeProcess_response.xsd"
# xml:lang="en-US" xmlns:ns0="http://www.opengis.net/wps/1.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
# OR:
# <ns0:Process ns0:processVersion="1.0.0">
self._root = elem
self.verbose = verbose
wpsns = getNamespace(elem)
# <ProcessDescription statusSupported="true" storeSupported="true" ns0:processVersion="1.0.0">
self.processVersion = elem.get( nspath('processVersion', ns=wpsns) )
self.statusSupported = bool( elem.get( "statusSupported" ) )
self.storeSupported = bool( elem.get( "storeSupported" ) )
for child in elem:
# this element's namespace
ns = getNamespace(child)
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm</ows:Identifier>
if child.tag.endswith('Identifier'):
self.identifier = testXMLValue( child )
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">Feature Weighted Grid Statistics</ows:Title>
elif child.tag.endswith('Title'):
self.title = testXMLValue( child )
# <ows:Abstract xmlns:ows="http://www.opengis.net/ows/1.1">This algorithm generates area weighted statistics of a gridded dataset for a set of vector polygon features. Using the bounding-box that encloses the feature data and the time range, if provided, a subset of the gridded dataset is requested from the remote gridded data server. Polygon representations are generated for cells in the retrieved grid. The polygon grid-cell representations are then projected to the feature data coordinate reference system. The grid-cells are used to calculate per grid-cell feature coverage fractions. Area-weighted statistics are then calculated for each feature using the grid values and fractions as weights. If the gridded dataset has a time range the last step is repeated for each time step within the time range or all time steps if a time range was not supplied.</ows:Abstract>
elif child.tag.endswith('Abstract'):
self.abstract = testXMLValue( child )
if self.verbose==True:
dump(self)
# <DataInputs>
self.dataInputs = []
for inputElement in elem.findall( 'DataInputs/Input' ):
self.dataInputs.append( Input(inputElement) )
if self.verbose==True:
dump(self.dataInputs[-1], prefix='\tInput: ')
# <ProcessOutputs>
self.processOutputs = []
for outputElement in elem.findall( 'ProcessOutputs/Output' ):
self.processOutputs.append( Output(outputElement) )
if self.verbose==True:
dump(self.processOutputs[-1], prefix='\tOutput: ')
class FeatureCollection():
'''
Base class to represent a Feature Collection used as input to a WPS request.
The method getXml() is invoked by the WPS execute() method to build the WPS request.
All subclasses must implement the getXml() method to provide their specific XML.
Implements IComplexData.
'''
def __init__(self):
pass
def getXml(self):
raise NotImplementedError
class WFSFeatureCollection(FeatureCollection):
'''
FeatureCollection specified by a WFS query.
All subclasses must implement the getQuery() method to provide the specific query portion of the XML.
'''
def __init__(self, wfsUrl, wfsQuery):
'''
wfsUrl: the WFS service URL
example: wfsUrl = "http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs"
wfsQuery : a WFS query instance
'''
self.url = wfsUrl
self.query = wfsQuery
# <wps:Reference xlink:href="http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs">
# <wps:Body>
# <wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" service="WFS" version="1.1.0" outputFormat="text/xml; subtype=gml/3.1.1" xsi:schemaLocation="http://www.opengis.net/wfs ../wfs/1.1.0/WFS.xsd">
# .......
# </wfs:GetFeature>
# </wps:Body>
# </wps:Reference>
def getXml(self):
root = etree.Element(nspath_eval('wps:Reference', namespaces), attrib = { nspath_eval("xlink:href",namespaces) : self.url} )
bodyElement = etree.SubElement(root, nspath_eval('wps:Body', namespaces))
getFeatureElement = etree.SubElement(bodyElement, nspath_eval('wfs:GetFeature', namespaces),
attrib = { "service":"WFS",
"version":"1.1.0",
"outputFormat":"text/xml; subtype=gml/3.1.1",
nspath_eval("xsi:schemaLocation",namespaces):"%s %s" % (namespaces['wfs'], '../wfs/1.1.0/WFS.xsd')})
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
getFeatureElement.append( self.query.getXml() )
return root
class WFSQuery():
'''
Class representing a WFS query, for insertion into a WFSFeatureCollection instance.
Implements IComplexData.
'''
def __init__(self, typeName, propertyNames=[], filters=[]):
self.typeName = typeName
self.propertyNames = propertyNames
self.filters = filters
def getXml(self):
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
queryElement = etree.Element(nspath_eval('wfs:Query', namespaces), attrib = { "typeName":self.typeName })
for propertyName in self.propertyNames:
propertyNameElement = etree.SubElement(queryElement, nspath_eval('wfs:PropertyName', namespaces))
propertyNameElement.text = propertyName
if len(self.filters)>0:
filterElement = etree.SubElement(queryElement, nspath_eval('ogc:Filter', namespaces))
for filter in self.filters:
gmlObjectIdElement = etree.SubElement(filterElement, nspath_eval('ogc:GmlObjectId', namespaces),
attrib={nspath_eval('gml:id', namespaces):filter})
return queryElement
class GMLMultiPolygonFeatureCollection(FeatureCollection):
'''
Class that represents a FeatureCollection defined as a GML multi-polygon.
'''
def __init__(self, polygons):
'''
Initializer accepts an array of polygons, where each polygon is an array of (lat,lon) tuples.
Example: polygons = [ [(-102.8184, 39.5273), (-102.8184, 37.418), (-101.2363, 37.418), (-101.2363, 39.5273), (-102.8184, 39.5273)],
[(-92.8184, 39.5273), (-92.8184, 37.418), (-91.2363, 37.418), (-91.2363, 39.5273), (-92.8184, 39.5273)] ]
'''
self.polygons = polygons
def getXml(self):
'''
<wps:Data>
<wps:ComplexData mimeType="text/xml" encoding="UTF-8"
schema="http://schemas.opengis.net/gml/3.1.1/base/feature.xsd">
<gml:featureMembers xmlns:ogc="http://www.opengis.net/ogc"
xmlns:draw="gov.usgs.cida.gdp.draw" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ows="http://www.opengis.net/ows" xmlns:gml="http://www.opengis.net/gml"
xmlns:xlink="http://www.w3.org/1999/xlink"
xsi:schemaLocation="gov.usgs.cida.gdp.draw http://cida.usgs.gov/climate/derivative/xsd/draw.xsd">
<gml:box gml:id="box.1">
<gml:the_geom>
<gml:MultiPolygon srsDimension="2"
srsName="http://www.opengis.net/gml/srs/epsg.xml#4326">
<gml:polygonMember>
<gml:Polygon>
<gml:exterior>
<gml:LinearRing>
<gml:posList>-102.8184 39.5273 -102.8184 37.418 -101.2363 37.418 -101.2363 39.5273 -102.8184 39.5273</gml:posList>
</gml:LinearRing>
</gml:exterior>
</gml:Polygon>
</gml:polygonMember>
</gml:MultiPolygon>
</gml:the_geom>
<gml:ID>0</gml:ID>
</gml:box>
</gml:featureMembers>
</wps:ComplexData>
</wps:Data>
'''
dataElement = etree.Element(nspath_eval('wps:Data', namespaces))
complexDataElement = etree.SubElement(dataElement, nspath_eval('wps:ComplexData', namespaces),
attrib={"mimeType":"text/xml", "encoding":"UTF-8", "schema":GML_SCHEMA_LOCATION} )
featureMembersElement = etree.SubElement(complexDataElement, nspath_eval('gml:featureMembers', namespaces),
attrib={ nspath_eval("xsi:schemaLocation",namespaces):"%s %s" % (DRAW_NAMESPACE, DRAW_SCHEMA_LOCATION)})
boxElement = etree.SubElement(featureMembersElement, nspath_eval('gml:box', namespaces), attrib={ nspath_eval("gml:id",namespaces):"box.1" })
geomElement = etree.SubElement(boxElement, nspath_eval('gml:the_geom', namespaces))
multiPolygonElement = etree.SubElement(geomElement, nspath_eval('gml:MultiPolygon', namespaces),
attrib={"srsDimension":"2", "srsName":"http://www.opengis.net/gml/srs/epsg.xml#4326"} )
for polygon in self.polygons:
polygonMemberElement = etree.SubElement(multiPolygonElement, nspath_eval('gml:polygonMember', namespaces))
polygonElement = etree.SubElement(polygonMemberElement, nspath_eval('gml:Polygon', namespaces))
exteriorElement = etree.SubElement(polygonElement, nspath_eval('gml:exterior', namespaces))
linearRingElement = etree.SubElement(exteriorElement, nspath_eval('gml:LinearRing', namespaces))
posListElement = etree.SubElement(linearRingElement, nspath_eval('gml:posList', namespaces))
posListElement.text = ' '.join(["%s %s" % (x, y) for x, y in polygon[:] ])
idElement = etree.SubElement(boxElement, nspath_eval('gml:ID', namespaces))
idElement.text = "0"
return dataElement
def monitorExecution(execution, sleepSecs=3, download=False, filepath=None):
'''
Convenience method to monitor the status of a WPS execution till it completes (succesfully or not),
and write the output to file after a succesfull job completion.
execution: WPSExecution instance
sleepSecs: number of seconds to sleep in between check status invocations
download: True to download the output when the process terminates, False otherwise
filepath: optional path to output file (if downloaded=True), otherwise filepath will be inferred from response document
'''
while execution.isComplete()==False:
execution.checkStatus(sleepSecs=sleepSecs)
log.info('Execution status: %s' % execution.status)
if execution.isSucceded():
if download:
execution.getOutput(filepath=filepath)
else:
for output in execution.processOutputs:
if output.reference is not None:
log.info('Output URL=%s' % output.reference)
else:
for ex in execution.errors:
log.error('Error: code=%s, locator=%s, text=%s' % (ex.code, ex.locator, ex.text))
def printValue(value):
'''
Utility method to format a value for printing.
'''
# ComplexData type
if isinstance(value, ComplexData):
return "mimeType=%s, encoding=%s, schema=%s" % (value.mimeType, value.encoding, value.schema)
# other type
else:
return value
def printInputOutput(value, indent=''):
'''
Utility method to inspect an input/output element.
'''
# InputOutput fields
print('%s identifier=%s, title=%s, abstract=%s, data type=%s' % (indent, value.identifier, value.title, value.abstract, value.dataType))
for val in value.allowedValues:
print('%s Allowed Value: %s' % (indent, printValue(val)))
if value.anyValue:
print(' Any value allowed')
for val in value.supportedValues:
print('%s Supported Value: %s' % (indent, printValue(val)))
print('%s Default Value: %s ' % (indent, printValue(value.defaultValue)))
# Input fields
if isinstance(value, Input):
print('%s minOccurs=%d, maxOccurs=%d' % (indent, value.minOccurs, value.maxOccurs))
# Output fields
if isinstance(value, Output):
print('%s reference=%s, mimeType=%s' % (indent, value.reference, value.mimeType))
for datum in value.data:
print('%s Data Value: %s' % (indent, printValue(datum)))
|
|
# coding=utf-8
"""
#### Grants
* Normal usage
```
GRANT REPLICATION CLIENT on *.* TO 'user'@'hostname' IDENTIFIED BY
'password';
```
* For innodb engine status
```
GRANT SUPER ON *.* TO 'user'@'hostname' IDENTIFIED BY
'password';
```
* For innodb engine status on MySQL versions 5.1.24+
```
GRANT PROCESS ON *.* TO 'user'@'hostname' IDENTIFIED BY
'password';
```
#### Dependencies
* MySQLdb
"""
import diamond.collector
from diamond.collector import str_to_bool
import re
import time
try:
import MySQLdb
from MySQLdb import MySQLError
except ImportError:
MySQLdb = None
MySQLError = ValueError
class MySQLCollector(diamond.collector.Collector):
_GAUGE_KEYS = [
'Innodb_buffer_pool_pages_data', 'Innodb_buffer_pool_pages_dirty',
'Innodb_buffer_pool_pages_free',
'Innodb_buffer_pool_pages_misc', 'Innodb_buffer_pool_pages_total',
'Innodb_data_pending_fsyncs', 'Innodb_data_pending_reads',
'Innodb_data_pending_writes',
'Innodb_os_log_pending_fsyncs', 'Innodb_os_log_pending_writes',
'Innodb_page_size',
'Innodb_row_lock_current_waits', 'Innodb_row_lock_time',
'Innodb_row_lock_time_avg',
'Innodb_row_lock_time_max',
'Key_blocks_unused', 'Last_query_cost', 'Max_used_connections',
'Open_files', 'Open_streams', 'Open_table_definitions', 'Open_tables',
'Qcache_free_blocks', 'Qcache_free_memory',
'Qcache_queries_in_cache', 'Qcache_total_blocks',
'Seconds_Behind_Master',
'Slave_open_temp_tables',
'Threads_cached', 'Threads_connected', 'Threads_created',
'Threads_running',
# innodb status non counter keys
'Innodb_bp_created_per_sec',
'Innodb_bp_pages_evicted_no_access_per_sec',
'Innodb_bp_pages_not_young_per_sec',
'Innodb_bp_pages_read_ahead_per_sec', 'Innodb_bp_pages_young_per_sec',
'Innodb_bp_reads_per_sec', 'Innodb_bp_written_per_sec',
'Innodb_bp_add_alloc', 'Innodb_bp_db_pages',
'Innodb_bp_dictionary_alloc', 'Innodb_bp_free_buffers',
'Innodb_bp_hit_rate', 'Innodb_bp_io_cur_pages',
'Innodb_bp_io_sum_pages', 'Innodb_bp_io_unzip_cur_pages',
'Innodb_bp_io_unzip_sum_pages', 'Innodb_bp_lru_len',
'Innodb_bp_modified_pages', 'Innodb_bp_not_young_hit_rate',
'Innodb_bp_old_db_pages', 'Innodb_bp_pending_pages',
'Innodb_bp_pending_writes_flush_list', 'Innodb_bp_pending_writes_lru',
'Innodb_bp_pending_writes_single_page', 'Innodb_bp_size',
'Innodb_bp_total_alloc', 'Innodb_bp_unzip_lru_len',
'Innodb_bp_young_hit_rate',
'Innodb_hash_searches_per_sec',
'Innodb_io_syncs_per_sec',
'Innodb_log_io_per_sec',
'Innodb_non_hash_searches_per_sec',
'Innodb_per_sec_avg',
'Innodb_reads_per_sec',
'Innodb_rows_deleted_per_sec', 'Innodb_rows_inserted_per_sec',
'Innodb_rows_read_per_sec', 'Innodb_rows_updated_per_sec',
'Innodb_sem_spins_per_wait_mutex', 'Innodb_sem_spins_per_wait_rw_excl',
'Innodb_sem_spins_per_wait_rw_shared',
'Innodb_writes_per_sec',
'Innodb_bytes_per_read',
'Innodb_hash_node_heap', 'Innodb_hash_table_size',
'Innodb_hash_used_cells',
'Innodb_ibuf_free_list_len', 'Innodb_ibuf_seg_size', 'Innodb_ibuf_size',
'Innodb_io_ibuf_logs', 'Innodb_io_ibuf_reads', 'Innodb_io_ibuf_syncs',
'Innodb_io_pending_flush_bp', 'Innodb_io_pending_flush_log',
'Innodb_io_pending_reads', 'Innodb_io_pending_writes', '',
'Innodb_log_pending_checkpoint_writes', 'Innodb_log_pending_log_writes',
'Innodb_row_queries_inside', 'Innodb_row_queries_queue',
'Innodb_trx_history_list_length', 'Innodb_trx_total_lock_structs',
'Innodb_status_process_time', ]
_IGNORE_KEYS = [
'Master_Port', 'Master_Server_Id',
'Last_Errno', 'Last_IO_Errno', 'Last_SQL_Errno', ]
innodb_status_keys = {
'Innodb_bp_total_alloc,' +
'Innodb_bp_add_alloc':
'Total memory allocated (\d+)\; in additional pool allocated (\d+)',
'Innodb_bp_reads_per_sec,' +
'Innodb_bp_created_per_sec,' +
'Innodb_bp_written_per_sec':
'(^\d+.\d+) reads/s, (\d+.\d+) creates/s, (\d+.\d+) writes/s',
'Innodb_io_ibuf_reads,Innodb_io_ibuf_logs,Innodb_io_ibuf_syncs':
' ibuf aio reads: (\d+), log i/o\'s: (\d+), sync i/o\'s: (\d+)',
'Innodb_log_pending_log_writes,Innodb_log_pending_checkpoint_writes':
'(\d+) pending log writes, (\d+) pending chkp writes',
'Innodb_hash_searches_per_sec,Innodb_non_hash_searches_per_sec':
'(\d+.\d+) hash searches/s, (\d+.\d+) non-hash searches/s',
'Innodb_row_queries_inside,Innodb_row_queries_queue':
'(\d+) queries inside InnoDB, (\d+) queries in queue',
'Innodb_trx_total_lock_structs':
'(\d+) lock struct\(s\), ' +
'heap size (\d+), ' +
'(\d+) row lock\(s\), ' +
'undo log entries (\d+)',
'Innodb_log_io_total,Innodb_log_io_per_sec':
'(\d+) log i\/o\'s done, (\d+.\d+) log i\/o\'s\/second',
'Innodb_io_os_file_reads,Innodb_io_os_file_writes,' +
'Innodb_io_os_file_fsyncs':
'(\d+) OS file reads, (\d+) OS file writes, (\d+) OS fsyncs',
'Innodb_rows_inserted_per_sec,Innodb_rows_updated_per_sec,' +
'Innodb_rows_deleted_per_sec,Innodb_rows_read_per_sec':
'(\d+.\d+) inserts\/s, ' +
'(\d+.\d+) updates\/s, ' +
'(\d+.\d+) deletes\/s, ' +
'(\d+.\d+) reads\/s',
'Innodb_reads_per_sec,Innodb_bytes_per_read,Innodb_io_syncs_per_sec,' +
'Innodb_writes_per_sec':
'(\d+.\d+) reads\/s, (\d+) avg bytes\/read, (\d+.\d+) writes\/s, ' +
'(\d+.\d+) fsyncs\/s',
'Innodb_bp_pages_young_per_sec,Innodb_bp_pages_not_young_per_sec':
'(\d+.\d+) youngs\/s, (\d+.\d+) non-youngs\/s',
'Innodb_bp_hit_rate,Innodb_bp_young_hit_rate,' +
'Innodb_bp_not_young_hit_rate':
'Buffer pool hit rate (\d+) \/ \d+, ' +
'young-making rate (\d+) \/ \d+ not (\d+) \/ \d+',
'Innodb_bp_size':
'Buffer pool size (\d+)',
'Innodb_bp_db_pages':
'Database pages (\d+)',
'Innodb_bp_dictionary_alloc':
'Dictionary memory allocated (\d+)',
'Innodb_bp_free_buffers':
'Free buffers (\d+)',
'Innodb_hash_table_size,Innodb_hash_node_heap':
'Hash table size (\d+), node heap has (\d+) buffer\(s\)',
'Innodb_trx_history_list_length':
'History list length (\d+)',
'Innodb_bp_io_sum_pages,Innodb_bp_io_cur_pages,' +
'Innodb_bp_io_unzip_sum_pages,Innodb_bp_io_unzip_cur_pages':
'I\/O sum\[(\d+)\]:cur\[(\d+)\], unzip sum\[(\d+)\]:cur\[(\d+)\]',
'Innodb_ibuf_size,Innodb_ibuf_free_list_len,Innodb_ibuf_seg_size,' +
'Innodb_ibuf_merges':
'Ibuf: size (\d+), free list len (\d+), seg size (\d+), (\d+) ' +
'merges',
'Innodb_bp_lru_len,Innodb_bp_unzip_lru_len':
'LRU len: (\d+), unzip_LRU len: (\d+)',
'Innodb_bp_modified_pages':
'Modified db pages (\d+)',
'Innodb_sem_mutex_spin_waits,Innodb_sem_mutex_rounds,' +
'Innodb_sem_mutex_os_waits':
'Mutex spin waits (\d+), rounds (\d+), OS waits (\d+)',
'Innodb_rows_inserted,Innodb_rows_updated,Innodb_rows_deleted,' +
'Innodb_rows_read':
'Number of rows inserted (\d+), updated (\d+), deleted (\d+), ' +
'read (\d+)',
'Innodb_bp_old_db_pages':
'Old database pages (\d+)',
'Innodb_sem_os_reservation_count,' +
'Innodb_sem_os_signal_count':
'OS WAIT ARRAY INFO: reservation count (\d+), signal count (\d+)',
'Innodb_bp_pages_young,Innodb_bp_pages_not_young':
'Pages made young (\d+), not young (\d+)',
'Innodb_bp_pages_read,Innodb_bp_pages_created,Innodb_bp_pages_written':
'Pages read (\d+), created (\d+), written (\d+)',
'Innodb_bp_pages_read_ahead_per_sec,' +
'Innodb_bp_pages_evicted_no_access_per_sec,' +
'Innodb_status_bp_pages_random_read_ahead':
'Pages read ahead (\d+.\d+)/s, ' +
'evicted without access (\d+.\d+)\/s, ' +
'Random read ahead (\d+.\d+)/s',
'Innodb_io_pending_flush_log,Innodb_io_pending_flush_bp':
'Pending flushes \(fsync\) log: (\d+); buffer pool: (\d+)',
'Innodb_io_pending_reads,Innodb_io_pending_writes':
'Pending normal aio reads: (\d+) \[\d+, \d+, \d+, \d+\], aio ' +
'writes: (\d+) \[\d+, \d+, \d+, \d+\]',
'Innodb_bp_pending_writes_lru,Innodb_bp_pending_writes_flush_list,' +
'Innodb_bp_pending_writes_single_page':
'Pending writes: LRU (\d+), flush list (\d+), single page (\d+)',
'Innodb_per_sec_avg':
'Per second averages calculated from the last (\d+) seconds',
'Innodb_sem_rw_excl_spins,Innodb_sem_rw_excl_rounds,' +
'Innodb_sem_rw_excl_os_waits':
'RW-excl spins (\d+), rounds (\d+), OS waits (\d+)',
'Innodb_sem_shared_spins,Innodb_sem_shared_rounds,' +
'Innodb_sem_shared_os_waits':
'RW-shared spins (\d+), rounds (\d+), OS waits (\d+)',
'Innodb_sem_spins_per_wait_mutex,Innodb_sem_spins_per_wait_rw_shared,' +
'Innodb_sem_spins_per_wait_rw_excl':
'Spin rounds per wait: (\d+.\d+) mutex, (\d+.\d+) RW-shared, ' +
'(\d+.\d+) RW-excl',
'Innodb_main_thd_log_flush_writes':
'srv_master_thread log flush and writes: (\d+)',
'Innodb_main_thd_loops_one_sec,Innodb_main_thd_loops_sleeps,' +
'Innodb_main_thd_loops_ten_sec,Innodb_main_thd_loops_background,' +
'Innodb_main_thd_loops_flush':
'srv_master_thread loops: (\d+) 1_second, (\d+) sleeps, (\d+) ' +
'10_second, (\d+) background, (\d+) flush',
'Innodb_ibuf_inserts,Innodb_ibuf_merged_recs,Innodb_ibuf_merges':
'(\d+) inserts, (\d+) merged recs, (\d+) merges',
}
innodb_status_match = {}
def __init__(self, *args, **kwargs):
super(MySQLCollector, self).__init__(*args, **kwargs)
for key in self.innodb_status_keys:
self.innodb_status_keys[key] = re.compile(
self.innodb_status_keys[key])
def process_config(self):
super(MySQLCollector, self).process_config()
if self.config['hosts'].__class__.__name__ != 'list':
self.config['hosts'] = [self.config['hosts']]
# Move legacy config format to new format
if 'host' in self.config:
hoststr = "%s:%s@%s:%s/%s" % (
self.config['user'],
self.config['passwd'],
self.config['host'],
self.config['port'],
self.config['db'],
)
self.config['hosts'].append(hoststr)
# Normalize some config vars
self.config['master'] = str_to_bool(self.config['master'])
self.config['slave'] = str_to_bool(self.config['slave'])
self.config['innodb'] = str_to_bool(self.config['innodb'])
self.db = None
def get_default_config_help(self):
config_help = super(MySQLCollector, self).get_default_config_help()
config_help.update({
'publish':
"Which rows of '[SHOW GLOBAL STATUS](http://dev.mysql." +
"com/doc/refman/5.1/en/show-status.html)' you would " +
"like to publish. Leave unset to publish all",
'slave': 'Collect SHOW SLAVE STATUS',
'master': 'Collect SHOW MASTER STATUS',
'innodb': 'Collect SHOW ENGINE INNODB STATUS',
'hosts': 'List of hosts to collect from. Format is ' +
'yourusername:yourpassword@host:port/db[/nickname]' +
'use db "None" to avoid connecting to a particular db'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MySQLCollector, self).get_default_config()
config.update({
'path': 'mysql',
# Connection settings
'hosts': [],
# Which rows of 'SHOW GLOBAL STATUS' you would like to publish.
# http://dev.mysql.com/doc/refman/5.1/en/show-status.html
# Leave unset to publish all
# 'publish': '',
'slave': False,
'master': False,
'innodb': False,
})
return config
def get_db_stats(self, query):
cursor = self.db.cursor(cursorclass=MySQLdb.cursors.DictCursor)
try:
cursor.execute(query)
return cursor.fetchall()
except MySQLError as e:
self.log.error('MySQLCollector could not get db stats', e)
return ()
def connect(self, params):
try:
self.db = MySQLdb.connect(**params)
self.log.debug('MySQLCollector: Connected to database.')
except MySQLError as e:
self.log.error('MySQLCollector couldnt connect to database %s', e)
return False
return True
def disconnect(self):
self.db.close()
def get_db_global_status(self):
return self.get_db_stats('SHOW GLOBAL STATUS')
def get_db_master_status(self):
return self.get_db_stats('SHOW MASTER STATUS')
def get_db_slave_status(self):
return self.get_db_stats('SHOW SLAVE STATUS')
def get_db_innodb_status(self):
return self.get_db_stats('SHOW ENGINE INNODB STATUS')
def get_stats(self, params):
metrics = {'status': {}}
if not self.connect(params):
return metrics
rows = self.get_db_global_status()
for row in rows:
try:
metrics['status'][row['Variable_name']] = float(row['Value'])
except:
pass
if self.config['master']:
metrics['master'] = {}
try:
rows = self.get_db_master_status()
for row_master in rows:
for key, value in row_master.items():
if key in self._IGNORE_KEYS:
continue
try:
metrics['master'][key] = float(row_master[key])
except:
pass
except:
self.log.error('MySQLCollector: Couldnt get master status')
pass
if self.config['slave']:
metrics['slave'] = {}
try:
rows = self.get_db_slave_status()
for row_slave in rows:
for key, value in row_slave.items():
if key in self._IGNORE_KEYS:
continue
try:
metrics['slave'][key] = float(row_slave[key])
except:
pass
except:
self.log.error('MySQLCollector: Couldnt get slave status')
pass
if self.config['innodb']:
metrics['innodb'] = {}
innodb_status_timer = time.time()
try:
rows = self.get_db_innodb_status()
innodb_status_output = rows[0]
todo = self.innodb_status_keys.keys()
for line in innodb_status_output['Status'].split('\n'):
for key in todo:
match = self.innodb_status_keys[key].match(line)
if match is not None:
todo.remove(key)
match_index = 1
for key_index in key.split(','):
try:
value = float(match.group(match_index))
# store value
if key_index in metrics:
self.log.debug("MySQLCollector: %s " +
"already defined, " +
"ignoring new value",
key_index)
else:
metrics['innodb'][key_index] = value
match_index += 1
except IndexError:
self.log.debug(
"MySQLCollector: Cannot find value " +
"in innodb status for %s", key_index)
for key in todo:
self.log.debug("MySQLCollector: %s regexp not matched " +
"in innodb status", key)
except Exception as innodb_status_error:
self.log.error('MySQLCollector: Couldnt get engine innodb ' +
'status, check user permissions: %s',
innodb_status_error)
Innodb_status_process_time = time.time() - innodb_status_timer
self.log.debug("MySQLCollector: innodb status process time: %f",
Innodb_status_process_time)
subkey = "Innodb_status_process_time"
metrics['innodb'][subkey] = Innodb_status_process_time
self.disconnect()
return metrics
def _publish_stats(self, nickname, metrics):
for key in metrics:
for metric_name in metrics[key]:
metric_value = metrics[key][metric_name]
if type(metric_value) is not float:
continue
if metric_name not in self._GAUGE_KEYS:
metric_value = self.derivative(nickname + metric_name,
metric_value)
if key == 'status':
if (('publish' not in self.config or
metric_name in self.config['publish'])):
self.publish(nickname + metric_name, metric_value)
else:
self.publish(nickname + metric_name, metric_value)
def collect(self):
if MySQLdb is None:
self.log.error('Unable to import MySQLdb')
return False
for host in self.config['hosts']:
matches = re.search(
'^([^:]*):([^@]*)@([^:]*):?([^/]*)/([^/]*)/?(.*)', host)
if not matches:
self.log.error(
'Connection string not in required format, skipping: %s',
host)
continue
params = {'host': matches.group(3)}
try:
params['port'] = int(matches.group(4))
except ValueError:
params['port'] = 3306
params['db'] = matches.group(5)
params['user'] = matches.group(1)
params['passwd'] = matches.group(2)
nickname = matches.group(6)
if len(nickname):
nickname += '.'
if params['db'] == 'None':
del params['db']
try:
metrics = self.get_stats(params=params)
except Exception as e:
try:
self.disconnect()
except MySQLdb.ProgrammingError:
pass
self.log.error('Collection failed for %s %s', nickname, e)
continue
# Warn if publish contains an unknown variable
if 'publish' in self.config and metrics['status']:
for k in self.config['publish'].split():
if k not in metrics['status']:
self.log.error("No such key '%s' available, issue " +
"'show global status' for a full " +
"list", k)
self._publish_stats(nickname, metrics)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import operator
import os
import shutil
import tempfile
import time
import unittest
from functools import reduce
from itertools import chain
import platform
from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
from pyspark.testing.streamingutils import PySparkStreamingTestCase
@unittest.skipIf(
"pypy" in platform.python_implementation().lower(),
"The tests fail in PyPy3 implementation for an unknown reason.",
)
class BasicOperationTests(PySparkStreamingTestCase):
def test_map(self):
"""Basic operation test for DStream.map."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.map(str)
expected = [list(map(str, x)) for x in input]
self._test_func(input, func, expected)
def test_flatMap(self):
"""Basic operation test for DStream.flatMap."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.flatMap(lambda x: (x, x * 2))
expected = [list(chain.from_iterable((map(lambda y: [y, y * 2], x)))) for x in input]
self._test_func(input, func, expected)
def test_filter(self):
"""Basic operation test for DStream.filter."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.filter(lambda x: x % 2 == 0)
expected = [[y for y in x if y % 2 == 0] for x in input]
self._test_func(input, func, expected)
def test_count(self):
"""Basic operation test for DStream.count."""
input = [range(5), range(10), range(20)]
def func(dstream):
return dstream.count()
expected = [[len(x)] for x in input]
self._test_func(input, func, expected)
def test_slice(self):
"""Basic operation test for DStream.slice."""
import datetime as dt
self.ssc = StreamingContext(self.sc, 1.0)
self.ssc.remember(4.0)
input = [[1], [2], [3], [4]]
stream = self.ssc.queueStream([self.sc.parallelize(d, 1) for d in input])
time_vals = []
def get_times(t, rdd):
if rdd and len(time_vals) < len(input):
time_vals.append(t)
stream.foreachRDD(get_times)
self.ssc.start()
self.wait_for(time_vals, 4)
begin_time = time_vals[0]
def get_sliced(begin_delta, end_delta):
begin = begin_time + dt.timedelta(seconds=begin_delta)
end = begin_time + dt.timedelta(seconds=end_delta)
rdds = stream.slice(begin, end)
result_list = [rdd.collect() for rdd in rdds]
return [r for result in result_list for r in result]
self.assertEqual(set([1]), set(get_sliced(0, 0)))
self.assertEqual(set([2, 3]), set(get_sliced(1, 2)))
self.assertEqual(set([2, 3, 4]), set(get_sliced(1, 4)))
self.assertEqual(set([1, 2, 3, 4]), set(get_sliced(0, 4)))
def test_reduce(self):
"""Basic operation test for DStream.reduce."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.reduce(operator.add)
expected = [[reduce(operator.add, x)] for x in input]
self._test_func(input, func, expected)
def test_reduceByKey(self):
"""Basic operation test for DStream.reduceByKey."""
input = [
[("a", 1), ("a", 1), ("b", 1), ("b", 1)],
[("", 1), ("", 1), ("", 1), ("", 1)],
[(1, 1), (1, 1), (2, 1), (2, 1), (3, 1)],
]
def func(dstream):
return dstream.reduceByKey(operator.add)
expected = [[("a", 2), ("b", 2)], [("", 4)], [(1, 2), (2, 2), (3, 1)]]
self._test_func(input, func, expected, sort=True)
def test_mapValues(self):
"""Basic operation test for DStream.mapValues."""
input = [
[("a", 2), ("b", 2), ("c", 1), ("d", 1)],
[(0, 4), (1, 1), (2, 2), (3, 3)],
[(1, 1), (2, 1), (3, 1), (4, 1)],
]
def func(dstream):
return dstream.mapValues(lambda x: x + 10)
expected = [
[("a", 12), ("b", 12), ("c", 11), ("d", 11)],
[(0, 14), (1, 11), (2, 12), (3, 13)],
[(1, 11), (2, 11), (3, 11), (4, 11)],
]
self._test_func(input, func, expected, sort=True)
def test_flatMapValues(self):
"""Basic operation test for DStream.flatMapValues."""
input = [
[("a", 2), ("b", 2), ("c", 1), ("d", 1)],
[(0, 4), (1, 1), (2, 1), (3, 1)],
[(1, 1), (2, 1), (3, 1), (4, 1)],
]
def func(dstream):
return dstream.flatMapValues(lambda x: (x, x + 10))
expected = [
[("a", 2), ("a", 12), ("b", 2), ("b", 12), ("c", 1), ("c", 11), ("d", 1), ("d", 11)],
[(0, 4), (0, 14), (1, 1), (1, 11), (2, 1), (2, 11), (3, 1), (3, 11)],
[(1, 1), (1, 11), (2, 1), (2, 11), (3, 1), (3, 11), (4, 1), (4, 11)],
]
self._test_func(input, func, expected)
def test_glom(self):
"""Basic operation test for DStream.glom."""
input = [range(1, 5), range(5, 9), range(9, 13)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
return dstream.glom()
expected = [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]
self._test_func(rdds, func, expected)
def test_mapPartitions(self):
"""Basic operation test for DStream.mapPartitions."""
input = [range(1, 5), range(5, 9), range(9, 13)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
def f(iterator):
yield sum(iterator)
return dstream.mapPartitions(f)
expected = [[3, 7], [11, 15], [19, 23]]
self._test_func(rdds, func, expected)
def test_countByValue(self):
"""Basic operation test for DStream.countByValue."""
input = [list(range(1, 5)) * 2, list(range(5, 7)) + list(range(5, 9)), ["a", "a", "b", ""]]
def func(dstream):
return dstream.countByValue()
expected = [
[(1, 2), (2, 2), (3, 2), (4, 2)],
[(5, 2), (6, 2), (7, 1), (8, 1)],
[("a", 2), ("b", 1), ("", 1)],
]
self._test_func(input, func, expected, sort=True)
def test_groupByKey(self):
"""Basic operation test for DStream.groupByKey."""
input = [
[(1, 1), (2, 1), (3, 1), (4, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1), (2, 1), (3, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1), ("", 1)],
]
def func(dstream):
return dstream.groupByKey().mapValues(list)
expected = [
[(1, [1]), (2, [1]), (3, [1]), (4, [1])],
[(1, [1, 1, 1]), (2, [1, 1]), (3, [1])],
[("a", [1, 1]), ("b", [1]), ("", [1, 1, 1])],
]
self._test_func(input, func, expected, sort=True)
def test_combineByKey(self):
"""Basic operation test for DStream.combineByKey."""
input = [
[(1, 1), (2, 1), (3, 1), (4, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1), (2, 1), (3, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1), ("", 1)],
]
def func(dstream):
def add(a, b):
return a + str(b)
return dstream.combineByKey(str, add, add)
expected = [
[(1, "1"), (2, "1"), (3, "1"), (4, "1")],
[(1, "111"), (2, "11"), (3, "1")],
[("a", "11"), ("b", "1"), ("", "111")],
]
self._test_func(input, func, expected, sort=True)
def test_repartition(self):
input = [range(1, 5), range(5, 9)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
return dstream.repartition(1).glom()
expected = [[[1, 2, 3, 4]], [[5, 6, 7, 8]]]
self._test_func(rdds, func, expected)
def test_union(self):
input1 = [range(3), range(5), range(6)]
input2 = [range(3, 6), range(5, 6)]
def func(d1, d2):
return d1.union(d2)
expected = [list(range(6)), list(range(6)), list(range(6))]
self._test_func(input1, func, expected, input2=input2)
def test_cogroup(self):
input = [
[(1, 1), (2, 1), (3, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1)],
]
input2 = [[(1, 2)], [(4, 1)], [("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 2)]]
def func(d1, d2):
return d1.cogroup(d2).mapValues(lambda vs: tuple(map(list, vs)))
expected = [
[(1, ([1], [2])), (2, ([1], [])), (3, ([1], []))],
[(1, ([1, 1, 1], [])), (2, ([1], [])), (4, ([], [1]))],
[("a", ([1, 1], [1, 1])), ("b", ([1], [1])), ("", ([1, 1], [1, 2]))],
]
self._test_func(input, func, expected, sort=True, input2=input2)
def test_join(self):
input = [[("a", 1), ("b", 2)]]
input2 = [[("b", 3), ("c", 4)]]
def func(a, b):
return a.join(b)
expected = [[("b", (2, 3))]]
self._test_func(input, func, expected, True, input2)
def test_left_outer_join(self):
input = [[("a", 1), ("b", 2)]]
input2 = [[("b", 3), ("c", 4)]]
def func(a, b):
return a.leftOuterJoin(b)
expected = [[("a", (1, None)), ("b", (2, 3))]]
self._test_func(input, func, expected, True, input2)
def test_right_outer_join(self):
input = [[("a", 1), ("b", 2)]]
input2 = [[("b", 3), ("c", 4)]]
def func(a, b):
return a.rightOuterJoin(b)
expected = [[("b", (2, 3)), ("c", (None, 4))]]
self._test_func(input, func, expected, True, input2)
def test_full_outer_join(self):
input = [[("a", 1), ("b", 2)]]
input2 = [[("b", 3), ("c", 4)]]
def func(a, b):
return a.fullOuterJoin(b)
expected = [[("a", (1, None)), ("b", (2, 3)), ("c", (None, 4))]]
self._test_func(input, func, expected, True, input2)
def test_update_state_by_key(self):
def updater(vs, s):
if not s:
s = []
s.extend(vs)
return s
input = [[("k", i)] for i in range(5)]
def func(dstream):
return dstream.updateStateByKey(updater)
expected = [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3], [0, 1, 2, 3, 4]]
expected = [[("k", v)] for v in expected]
self._test_func(input, func, expected)
def test_update_state_by_key_initial_rdd(self):
def updater(vs, s):
if not s:
s = []
s.extend(vs)
return s
initial = [("k", [0, 1])]
initial = self.sc.parallelize(initial, 1)
input = [[("k", i)] for i in range(2, 5)]
def func(dstream):
return dstream.updateStateByKey(updater, initialRDD=initial)
expected = [[0, 1, 2], [0, 1, 2, 3], [0, 1, 2, 3, 4]]
expected = [[("k", v)] for v in expected]
self._test_func(input, func, expected)
def test_failed_func(self):
# Test failure in
# TransformFunction.apply(rdd: Option[RDD[_]], time: Time)
input = [self.sc.parallelize([d], 1) for d in range(4)]
input_stream = self.ssc.queueStream(input)
def failed_func(i):
raise ValueError("This is a special error")
input_stream.map(failed_func).pprint()
self.ssc.start()
try:
self.ssc.awaitTerminationOrTimeout(10)
except BaseException:
import traceback
failure = traceback.format_exc()
self.assertTrue("This is a special error" in failure)
return
self.fail("a failed func should throw an error")
def test_failed_func2(self):
# Test failure in
# TransformFunction.apply(rdd: Option[RDD[_]], rdd2: Option[RDD[_]], time: Time)
input = [self.sc.parallelize([d], 1) for d in range(4)]
input_stream1 = self.ssc.queueStream(input)
input_stream2 = self.ssc.queueStream(input)
def failed_func(rdd1, rdd2):
raise ValueError("This is a special error")
input_stream1.transformWith(failed_func, input_stream2, True).pprint()
self.ssc.start()
try:
self.ssc.awaitTerminationOrTimeout(10)
except BaseException:
import traceback
failure = traceback.format_exc()
self.assertTrue("This is a special error" in failure)
return
self.fail("a failed func should throw an error")
def test_failed_func_with_reseting_failure(self):
input = [self.sc.parallelize([d], 1) for d in range(4)]
input_stream = self.ssc.queueStream(input)
def failed_func(i):
if i == 1:
# Make it fail in the second batch
raise ValueError("This is a special error")
else:
return i
# We should be able to see the results of the 3rd and 4th batches even if the second batch
# fails
expected = [[0], [2], [3]]
self.assertEqual(expected, self._collect(input_stream.map(failed_func), 3))
try:
self.ssc.awaitTerminationOrTimeout(10)
except BaseException:
import traceback
failure = traceback.format_exc()
self.assertTrue("This is a special error" in failure)
return
self.fail("a failed func should throw an error")
@unittest.skipIf(
"pypy" in platform.python_implementation().lower(),
"The tests fail in PyPy3 implementation for an unknown reason.",
)
class WindowFunctionTests(PySparkStreamingTestCase):
timeout = 15
def test_window(self):
input = [range(1), range(2), range(3), range(4), range(5)]
def func(dstream):
return dstream.window(1.5, 0.5).count()
expected = [[1], [3], [6], [9], [12], [9], [5]]
self._test_func(input, func, expected)
def test_count_by_window(self):
input = [range(1), range(2), range(3), range(4), range(5)]
def func(dstream):
return dstream.countByWindow(1.5, 0.5)
expected = [[1], [3], [6], [9], [12], [9], [5]]
self._test_func(input, func, expected)
def test_count_by_window_large(self):
input = [range(1), range(2), range(3), range(4), range(5), range(6)]
def func(dstream):
return dstream.countByWindow(2.5, 0.5)
expected = [[1], [3], [6], [10], [15], [20], [18], [15], [11], [6]]
self._test_func(input, func, expected)
def test_count_by_value_and_window(self):
input = [range(1), range(2), range(3), range(4), range(5), range(6)]
def func(dstream):
return dstream.countByValueAndWindow(2.5, 0.5)
expected = [
[(0, 1)],
[(0, 2), (1, 1)],
[(0, 3), (1, 2), (2, 1)],
[(0, 4), (1, 3), (2, 2), (3, 1)],
[(0, 5), (1, 4), (2, 3), (3, 2), (4, 1)],
[(0, 5), (1, 5), (2, 4), (3, 3), (4, 2), (5, 1)],
[(0, 4), (1, 4), (2, 4), (3, 3), (4, 2), (5, 1)],
[(0, 3), (1, 3), (2, 3), (3, 3), (4, 2), (5, 1)],
[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 1)],
[(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1)],
]
self._test_func(input, func, expected)
def test_group_by_key_and_window(self):
input = [[("a", i)] for i in range(5)]
def func(dstream):
return dstream.groupByKeyAndWindow(1.5, 0.5).mapValues(list)
expected = [
[("a", [0])],
[("a", [0, 1])],
[("a", [0, 1, 2])],
[("a", [1, 2, 3])],
[("a", [2, 3, 4])],
[("a", [3, 4])],
[("a", [4])],
]
self._test_func(input, func, expected)
def test_reduce_by_invalid_window(self):
input1 = [range(3), range(5), range(1), range(6)]
d1 = self.ssc.queueStream(input1)
self.assertRaises(ValueError, lambda: d1.reduceByKeyAndWindow(None, None, 0.1, 0.1))
self.assertRaises(ValueError, lambda: d1.reduceByKeyAndWindow(None, None, 1, 0.1))
def test_reduce_by_key_and_window_with_none_invFunc(self):
input = [range(1), range(2), range(3), range(4), range(5), range(6)]
def func(dstream):
return (
dstream.map(lambda x: (x, 1))
.reduceByKeyAndWindow(operator.add, None, 5, 1)
.filter(lambda kv: kv[1] > 0)
.count()
)
expected = [[2], [4], [6], [6], [6], [6]]
self._test_func(input, func, expected)
@unittest.skipIf(
"pypy" in platform.python_implementation().lower(),
"The tests fail in PyPy3 implementation for an unknown reason.",
)
class CheckpointTests(unittest.TestCase):
setupCalled = False
@staticmethod
def tearDownClass():
# Clean up in the JVM just in case there has been some issues in Python API
if SparkContext._jvm is not None:
jStreamingContextOption = (
SparkContext._jvm.org.apache.spark.streaming.StreamingContext.getActive()
)
if jStreamingContextOption.nonEmpty():
jStreamingContextOption.get().stop()
def setUp(self):
self.ssc = None
self.sc = None
self.cpd = None
def tearDown(self):
if self.ssc is not None:
self.ssc.stop(True)
if self.sc is not None:
self.sc.stop()
if self.cpd is not None:
shutil.rmtree(self.cpd)
def test_transform_function_serializer_failure(self):
inputd = tempfile.mkdtemp()
self.cpd = tempfile.mkdtemp("test_transform_function_serializer_failure")
def setup():
conf = SparkConf().set("spark.default.parallelism", 1)
sc = SparkContext(conf=conf)
ssc = StreamingContext(sc, 0.5)
# A function that cannot be serialized
def process(time, rdd):
sc.parallelize(range(1, 10))
ssc.textFileStream(inputd).foreachRDD(process)
return ssc
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
try:
self.ssc.start()
except BaseException:
import traceback
failure = traceback.format_exc()
self.assertTrue(
"It appears that you are attempting to reference SparkContext" in failure
)
return
self.fail("using SparkContext in process should fail because it's not Serializable")
def test_get_or_create_and_get_active_or_create(self):
inputd = tempfile.mkdtemp()
outputd = tempfile.mkdtemp() + "/"
def updater(vs, s):
return sum(vs, s or 0)
def setup():
conf = SparkConf().set("spark.default.parallelism", 1)
sc = SparkContext(conf=conf)
ssc = StreamingContext(sc, 2)
dstream = ssc.textFileStream(inputd).map(lambda x: (x, 1))
wc = dstream.updateStateByKey(updater)
wc.map(lambda x: "%s,%d" % x).saveAsTextFiles(outputd + "test")
wc.checkpoint(2)
self.setupCalled = True
return ssc
# Verify that getOrCreate() calls setup() in absence of checkpoint files
self.cpd = tempfile.mkdtemp("test_streaming_cps")
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertTrue(self.setupCalled)
self.ssc.start()
def check_output(n):
while not os.listdir(outputd):
if self.ssc.awaitTerminationOrTimeout(0.5):
raise RuntimeError("ssc stopped")
time.sleep(1) # make sure mtime is larger than the previous one
with open(os.path.join(inputd, str(n)), "w") as f:
f.writelines(["%d\n" % i for i in range(10)])
while True:
if self.ssc.awaitTerminationOrTimeout(0.5):
raise RuntimeError("ssc stopped")
p = os.path.join(outputd, max(os.listdir(outputd)))
if "_SUCCESS" not in os.listdir(p):
# not finished
continue
ordd = self.ssc.sparkContext.textFile(p).map(lambda line: line.split(","))
d = ordd.values().map(int).collect()
if not d:
continue
self.assertEqual(10, len(d))
s = set(d)
self.assertEqual(1, len(s))
m = s.pop()
if n > m:
continue
self.assertEqual(n, m)
break
check_output(1)
check_output(2)
# Verify the getOrCreate() recovers from checkpoint files
self.ssc.stop(True, True)
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.ssc.start()
check_output(3)
# Verify that getOrCreate() uses existing SparkContext
self.ssc.stop(True, True)
time.sleep(1)
self.sc = SparkContext(conf=SparkConf())
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.assertTrue(self.ssc.sparkContext == self.sc)
# Verify the getActiveOrCreate() recovers from checkpoint files
self.ssc.stop(True, True)
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.ssc.start()
check_output(4)
# Verify that getActiveOrCreate() returns active context
self.setupCalled = False
self.assertEqual(StreamingContext.getActiveOrCreate(self.cpd, setup), self.ssc)
self.assertFalse(self.setupCalled)
# Verify that getActiveOrCreate() uses existing SparkContext
self.ssc.stop(True, True)
time.sleep(1)
self.sc = SparkContext(conf=SparkConf())
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.assertTrue(self.ssc.sparkContext == self.sc)
# Verify that getActiveOrCreate() calls setup() in absence of checkpoint files
self.ssc.stop(True, True)
shutil.rmtree(self.cpd) # delete checkpoint directory
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertTrue(self.setupCalled)
# Stop everything
self.ssc.stop(True, True)
if __name__ == "__main__":
from pyspark.streaming.tests.test_dstream import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EnvironmentMap'
db.create_table(u'bsdfs_environmentmap', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.UserProfile'])),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('tonemap_scale', self.gf('django.db.models.fields.FloatField')()),
('tonemap_white', self.gf('django.db.models.fields.FloatField')()),
))
db.send_create_signal(u'bsdfs', ['EnvironmentMap'])
# Adding model 'ShapeBsdfLabel_mf'
db.create_table(u'bsdfs_shapebsdflabel_mf', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.UserProfile'])),
('mturk_assignment', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, on_delete=models.SET_NULL, to=orm['mturk.MtAssignment'])),
('sandbox', self.gf('django.db.models.fields.BooleanField')(default=False)),
('invalid', self.gf('django.db.models.fields.BooleanField')(default=False)),
('quality_method', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('time_ms', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('time_active_ms', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('reward', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=4, blank=True)),
('admin_score', self.gf('django.db.models.fields.IntegerField')(default=0)),
('edit_dict', self.gf('django.db.models.fields.TextField')(blank=True)),
('edit_sum', self.gf('django.db.models.fields.IntegerField')(default=0)),
('edit_nnz', self.gf('django.db.models.fields.IntegerField')(default=0)),
('envmap', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['bsdfs.EnvironmentMap'], null=True, blank=True)),
('image_blob', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True, blank=True)),
('give_up', self.gf('django.db.models.fields.BooleanField')(default=False)),
('give_up_msg', self.gf('django.db.models.fields.TextField')(blank=True)),
('color_correct', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('color_correct_score', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('gloss_correct', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('gloss_correct_score', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('init_method', self.gf('django.db.models.fields.CharField')(max_length=2)),
('color_L', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('color_a', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('color_b', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('shape', self.gf('django.db.models.fields.related.ForeignKey')(related_name='bsdfs_mf', to=orm['shapes.MaterialShape'])),
('bsdf_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('alpha_index', self.gf('django.db.models.fields.IntegerField')()),
('specular', self.gf('django.db.models.fields.FloatField')()),
('color_sRGB', self.gf('django.db.models.fields.CharField')(max_length=6)),
))
db.send_create_signal(u'bsdfs', ['ShapeBsdfLabel_mf'])
# Adding model 'ShapeBsdfLabel_wd'
db.create_table(u'bsdfs_shapebsdflabel_wd', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.UserProfile'])),
('mturk_assignment', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, on_delete=models.SET_NULL, to=orm['mturk.MtAssignment'])),
('sandbox', self.gf('django.db.models.fields.BooleanField')(default=False)),
('invalid', self.gf('django.db.models.fields.BooleanField')(default=False)),
('quality_method', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('time_ms', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('time_active_ms', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('reward', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=4, blank=True)),
('admin_score', self.gf('django.db.models.fields.IntegerField')(default=0)),
('edit_dict', self.gf('django.db.models.fields.TextField')(blank=True)),
('edit_sum', self.gf('django.db.models.fields.IntegerField')(default=0)),
('edit_nnz', self.gf('django.db.models.fields.IntegerField')(default=0)),
('envmap', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['bsdfs.EnvironmentMap'], null=True, blank=True)),
('image_blob', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True, blank=True)),
('give_up', self.gf('django.db.models.fields.BooleanField')(default=False)),
('give_up_msg', self.gf('django.db.models.fields.TextField')(blank=True)),
('color_correct', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('color_correct_score', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('gloss_correct', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('gloss_correct_score', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('init_method', self.gf('django.db.models.fields.CharField')(max_length=2)),
('color_L', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('color_a', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('color_b', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('shape', self.gf('django.db.models.fields.related.ForeignKey')(related_name='bsdfs_wd', to=orm['shapes.MaterialShape'])),
('contrast', self.gf('django.db.models.fields.FloatField')()),
('doi', self.gf('django.db.models.fields.IntegerField')()),
('metallic', self.gf('django.db.models.fields.BooleanField')(default=False)),
('color', self.gf('django.db.models.fields.CharField')(max_length=7)),
))
db.send_create_signal(u'bsdfs', ['ShapeBsdfLabel_wd'])
# Adding model 'ShapeBsdfQuality'
db.create_table(u'bsdfs_shapebsdfquality', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.UserProfile'])),
('mturk_assignment', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, on_delete=models.SET_NULL, to=orm['mturk.MtAssignment'])),
('sandbox', self.gf('django.db.models.fields.BooleanField')(default=False)),
('invalid', self.gf('django.db.models.fields.BooleanField')(default=False)),
('quality_method', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('time_ms', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('time_active_ms', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('reward', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=8, decimal_places=4, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('color_correct', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('gloss_correct', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('canttell', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
))
db.send_create_signal(u'bsdfs', ['ShapeBsdfQuality'])
def backwards(self, orm):
# Deleting model 'EnvironmentMap'
db.delete_table(u'bsdfs_environmentmap')
# Deleting model 'ShapeBsdfLabel_mf'
db.delete_table(u'bsdfs_shapebsdflabel_mf')
# Deleting model 'ShapeBsdfLabel_wd'
db.delete_table(u'bsdfs_shapebsdflabel_wd')
# Deleting model 'ShapeBsdfQuality'
db.delete_table(u'bsdfs_shapebsdfquality')
models = {
u'accounts.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'always_approve': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocked_reason': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'exclude_from_aggregation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_worker_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'bsdfs.environmentmap': {
'Meta': {'ordering': "['-id']", 'object_name': 'EnvironmentMap'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'tonemap_scale': ('django.db.models.fields.FloatField', [], {}),
'tonemap_white': ('django.db.models.fields.FloatField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'bsdfs.shapebsdflabel_mf': {
'Meta': {'ordering': "['-edit_nnz', '-time_ms']", 'object_name': 'ShapeBsdfLabel_mf'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'admin_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'alpha_index': ('django.db.models.fields.IntegerField', [], {}),
'bsdf_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'color_L': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'color_a': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'color_b': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'color_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'color_correct_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'color_sRGB': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'edit_dict': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edit_nnz': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'edit_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'envmap': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bsdfs.EnvironmentMap']", 'null': 'True', 'blank': 'True'}),
'give_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'give_up_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'gloss_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gloss_correct_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_blob': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'init_method': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bsdfs_mf'", 'to': u"orm['shapes.MaterialShape']"}),
'specular': ('django.db.models.fields.FloatField', [], {}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'bsdfs.shapebsdflabel_wd': {
'Meta': {'ordering': "['-edit_nnz', '-time_ms']", 'object_name': 'ShapeBsdfLabel_wd'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'admin_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'color_L': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'color_a': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'color_b': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'color_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'color_correct_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'contrast': ('django.db.models.fields.FloatField', [], {}),
'doi': ('django.db.models.fields.IntegerField', [], {}),
'edit_dict': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edit_nnz': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'edit_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'envmap': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bsdfs.EnvironmentMap']", 'null': 'True', 'blank': 'True'}),
'give_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'give_up_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'gloss_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gloss_correct_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_blob': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'init_method': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metallic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bsdfs_wd'", 'to': u"orm['shapes.MaterialShape']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'bsdfs.shapebsdfquality': {
'Meta': {'object_name': 'ShapeBsdfQuality'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'canttell': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'color_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'gloss_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'licenses.license': {
'Meta': {'object_name': 'License'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'cc_attribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_no_deriv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_noncommercial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_share_alike': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creative_commons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'publishable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
},
u'mturk.experiment': {
'Meta': {'ordering': "['slug', 'variant']", 'unique_together': "(('slug', 'variant'),)", 'object_name': 'Experiment'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'completed_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'cubam_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'examples_group_attr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'has_tutorial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'module': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'new_hit_settings': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiments'", 'null': 'True', 'to': u"orm['mturk.ExperimentSettings']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'template_dir': ('django.db.models.fields.CharField', [], {'default': "'mturk/experiments'", 'max_length': '255'}),
'test_contents_per_assignment': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'mturk.experimentsettings': {
'Meta': {'object_name': 'ExperimentSettings'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'auto_add_hits': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_approval_delay': ('django.db.models.fields.IntegerField', [], {'default': '2592000'}),
'content_filter': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'experiment_settings_in'", 'to': u"orm['contenttypes.ContentType']"}),
'contents_per_hit': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '1800'}),
'feedback_bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'frame_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {'default': '2678400'}),
'max_active_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'max_total_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'min_output_consensus': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'num_outputs_max': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'out_content_attr': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'out_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiment_settings_out'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'out_count_ratio': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'qualifications': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'requirements': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'reward': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '4'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'mturk.experimenttestcontent': {
'Meta': {'ordering': "['-id']", 'object_name': 'ExperimentTestContent'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'test_contents'", 'to': u"orm['mturk.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'priority': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'})
},
u'mturk.mtassignment': {
'Meta': {'object_name': 'MtAssignment'},
'accept_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'action_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'approval_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'approve_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auto_approval_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'bonus_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'feedback': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'feedback_bonus_given': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feedback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['mturk.MtHit']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'manually_rejected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'num_test_contents': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_test_correct': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_test_incorrect': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'partially_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'post_meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reject_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rejection_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'screen_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'screen_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'submission_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'submit_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'test_contents': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'assignments'", 'symmetrical': 'False', 'to': u"orm['mturk.ExperimentTestContent']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_load_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'wage': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'worker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']", 'null': 'True', 'blank': 'True'})
},
u'mturk.mthit': {
'Meta': {'object_name': 'MtHit'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'all_submitted_assignments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'any_submitted_assignments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'compatible_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hit_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'hit_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hits'", 'to': u"orm['mturk.MtHitType']"}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'incompatible_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_assignments': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'num_assignments_available': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_assignments_completed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_assignments_pending': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_contents': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'out_count_ratio': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'review_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'mturk.mthittype': {
'Meta': {'object_name': 'MtHitType'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'auto_approval_delay': ('django.db.models.fields.IntegerField', [], {'default': '2592000'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '3600'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hit_types'", 'to': u"orm['mturk.Experiment']"}),
'experiment_settings': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hit_types'", 'to': u"orm['mturk.ExperimentSettings']"}),
'external_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'feedback_bonus': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'frame_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'default': "'0.01'", 'max_digits': '8', 'decimal_places': '4'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'normals.shaperectifiednormallabel': {
'Meta': {'ordering': "['-admin_score']", 'object_name': 'ShapeRectifiedNormalLabel'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'admin_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'automatic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canvas_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'canvas_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'correct_score': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'focal_pixels': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_rectified': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'num_vanishing_lines': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pos_x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pos_y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rectified_normals'", 'to': u"orm['shapes.MaterialShape']"}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'uvnb': ('django.db.models.fields.TextField', [], {})
},
u'photos.flickruser': {
'Meta': {'ordering': "['-id']", 'object_name': 'FlickrUser'},
'blacklisted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '127'})
},
u'photos.photo': {
'Meta': {'ordering': "['aspect_ratio', '-id']", 'object_name': 'Photo'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'aspect_ratio': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'exif': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'flickr_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'flickr_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.FlickrUser']"}),
'focal_y': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'fov': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_orig': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'inappropriate': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['licenses.License']"}),
'light_stack': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.PhotoLightStack']"}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'median_intrinsic_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nonperspective': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'num_intrinsic_comparisons': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_intrinsic_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_shapes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_vertices': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rotated': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'scene_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photos'", 'null': 'True', 'to': u"orm['photos.PhotoSceneCategory']"}),
'scene_category_correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'scene_category_correct_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'special': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'stylized': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'vanishing_length': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'vanishing_lines': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'vanishing_points': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'whitebalanced': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'whitebalanced_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'photos.photolightstack': {
'Meta': {'ordering': "['-id']", 'object_name': 'PhotoLightStack'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'photos.photoscenecategory': {
'Meta': {'ordering': "['name']", 'object_name': 'PhotoSceneCategory'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '127'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['photos.PhotoSceneCategory']", 'null': 'True', 'blank': 'True'})
},
u'shapes.materialshape': {
'Meta': {'ordering': "['-num_vertices', '-time_ms']", 'object_name': 'MaterialShape'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'area': ('django.db.models.fields.FloatField', [], {}),
'bsdf_wd': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bsdfs.ShapeBsdfLabel_wd']", 'null': 'True', 'blank': 'True'}),
'correct': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'correct_score': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'dominant_b': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dominant_delta': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dominant_g': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dominant_r': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dominant_rgb0': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '7', 'blank': 'True'}),
'dominant_rgb1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '7', 'blank': 'True'}),
'dominant_rgb2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '7', 'blank': 'True'}),
'dominant_rgb3': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '7', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_bbox': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'image_crop': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'image_pbox': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label_pos_x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'label_pos_y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.ShapeName']", 'null': 'True', 'blank': 'True'}),
'name_entropy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nice': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'nice_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'num_segments': ('django.db.models.fields.IntegerField', [], {}),
'num_triangles': ('django.db.models.fields.IntegerField', [], {}),
'num_vertices': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'pbox': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'pbox_aspect_ratio': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'material_shapes'", 'to': u"orm['photos.Photo']"}),
'pixel_area': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'planar': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'planar_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'planar_score': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'rectified_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rectified_normal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['normals.ShapeRectifiedNormalLabel']", 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'segments': ('django.db.models.fields.TextField', [], {}),
'special': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'special_slug': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'submitted_shapes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'material_shapes'", 'symmetrical': 'False', 'to': u"orm['shapes.SubmittedShape']"}),
'substance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.ShapeSubstance']", 'null': 'True', 'blank': 'True'}),
'substance_entropy': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'substance_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'triangles': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'vertices': ('django.db.models.fields.TextField', [], {})
},
u'shapes.shapename': {
'Meta': {'ordering': "['-fail', 'name']", 'object_name': 'ShapeName'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '127'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.ShapeName']", 'null': 'True', 'blank': 'True'}),
'representative_shape': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.MaterialShape']", 'null': 'True', 'blank': 'True'})
},
u'shapes.shapesubstance': {
'Meta': {'ordering': "['-fail', 'name']", 'object_name': 'ShapeSubstance'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'substances'", 'null': 'True', 'to': u"orm['shapes.ShapeSubstanceGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '127'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.ShapeSubstance']", 'null': 'True', 'blank': 'True'}),
'representative_shape': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shapes.MaterialShape']", 'null': 'True', 'blank': 'True'})
},
u'shapes.shapesubstancegroup': {
'Meta': {'ordering': "['-id']", 'object_name': 'ShapeSubstanceGroup'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'names': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'substance_groups'", 'symmetrical': 'False', 'to': u"orm['shapes.ShapeName']"})
},
u'shapes.submittedshape': {
'Meta': {'ordering': "['-time_ms']", 'object_name': 'SubmittedShape'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mturk_assignment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['mturk.MtAssignment']"}),
'num_vertices': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submitted_shapes'", 'to': u"orm['photos.Photo']"}),
'quality_method': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reward': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'sandbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shape_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'time_active_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'time_ms': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.UserProfile']"}),
'vertices': ('django.db.models.fields.TextField', [], {'null': 'True'})
}
}
complete_apps = ['bsdfs']
|
|
from spider import Document, FetchTask, Storage
from logbook import Logger
import getopt
import time
import sys, os
import logging
logger = Logger('Spider')
fend = None
def reduce_report(row1, row2):
# Assuming row1 and row2 have share the same keys
r = {}
for key in row1:
r[key] = row1[key] + row2[key]
return r
# FIXME: Temporary
def fetch_unfetched_urls(limit, opts):
from spider.database import Database
with Database(opts["db_path"], logger=logger) as db:
curs = db.cursor
curs.execute("SELECT url FROM document WHERE timestamp IS NULL LIMIT ?", (int(limit),))
return map(lambda u: u[0], curs.fetchall())
def fetch_url(args):
url, opts = args
import random
import thread
import contextlib
from spider.database import Database
# thread ID
tid = thread.get_ident()
with contextlib.nested(
Database(opts["db_path"], logger=logger),
open(opts["log_path"], "a")) as (db, log):
storage = Storage('file')
url_entry = db.fetch_document(url)
has_url = (url_entry != None)
request_succeeded = 0
new_urls_count = 0
fetch_flag = True
if has_url:
# TODO: Check if timestamp was too long time ago
pass
if not fetch_flag:
logger.info("URL entry (%s) already exists. Skipping..." % url)
else:
task = FetchTask(url, logger=logger)
# FIXME: Revise semantics
if fend != None:
task.proxy_factory = fend.proxy_factory
try:
url_entry = task.run(db, opts)
request_succeeded = 1
storage.save(url, url_entry, opts)
if has_url:
db.update_document(url_entry)
else:
db.insert_document(url_entry)
if 'url_patterns' in opts:
for url_pattern in opts["url_patterns"]:
urls = url_entry.extract_urls(url_pattern)
new_urls_count += len(urls)
db.insert_urls(urls)
logger.info("[%x] Found %d URLs in %s.\n" % (tid, new_urls_count, url))
if "process_content" in opts:
opts["process_content"](url_entry)
except Exception as e:
logger.exception(e)
db.delete_url(url)
finally:
sys.stdout.write('+' if request_succeeded != 0 else '-')
sys.stdout.flush()
# number of bytes of the fetched url_entry
fetched_size = len(url_entry.content) if url_entry != None and url_entry.content != None else 0
return {"count": 1,
"succeeded": request_succeeded,
'new_urls_count': new_urls_count,
'fetched_size': fetched_size,
}
class Frontend:
def __init__(self, opts, logger=logging):
self.opts = opts
self.logger = logger
self.proxy_factory = None
# Default values
if not 'log_path' in opts:
self.opts['log_path'] = 'spider.log'
# shared across multiple threads
self.shared = {}
def run(self):
raise NotImplementedError()
class SingleMode(Frontend):
def __int__(self, opts, logger):
super(Frontend, self).__init__(opts, logger)
def run(self):
start_time = time.time()
# FIXME: This is a temporary solution
self.opts['storage_dir'] = './storage'
report = fetch_url((self.opts["url"], self.opts))
# print an empty line after the execution
print()
end_time = time.time()
report["time_elapsed"] = end_time - start_time # in seconds
ReportMode.generate_report(self.opts["db_path"], report, self.opts)
class MultiThreadingMode(Frontend):
def __int__(self, opts, logger):
super(Frontend, self).__init__(opts, logger)
from hallucination import ProxyFactory
self.proxy_factory = ProxyFactory(
config=dict(db_uri=opts['hallucination_db_uri']),
logger=logger,
)
def __del__(self):
pass
def run(self):
from multiprocessing.pool import ThreadPool
start_time = time.time()
unfetched_urls = fetch_unfetched_urls(self.opts["n_urls"], self.opts)
pool = ThreadPool(self.opts["n_proc"])
result = pool.map(fetch_url, map(lambda u: (u, self.opts), unfetched_urls))
report = {}
if result != []:
report = reduce(reduce_report, result)
end_time = time.time()
report["time_elapsed"] = end_time - start_time # in seconds
ReportMode.generate_report(self.opts["db_path"], report, self.opts)
class CreateDBMode(Frontend):
def __int__(self, opts, logger):
super(Frontend, self).__init__(opts, logger)
def run(self):
from spider.database import Database
with Database(self.opts["db_path"], logger=logger) as db:
with open("scheme.txt") as f:
# FIXME: This may break in some cases
for sql in f.read().split(";"):
db.execute(sql)
class ReportMode(Frontend):
def __int__(self, opts, logger):
super(Frontend, self).__init__(opts, logger)
def run(self):
if "db_path" not in self.opts:
raise Exception("Database path is not specified.")
else:
ReportMode.generate_report(self.opts["db_path"])
@staticmethod
def human_readable_size(size):
if size < 1024:
return "%d bytes" % size
elif size < 1024**2:
return "%.02f KB" % (float(size) / 1024)
elif size < 1024**3:
return "%.02f MB" % (float(size) / 1024**2)
else:
return "%.02f GB" % (float(size) / 1024**3)
@staticmethod
def generate_report(db_path, session_report=None, opts=None):
"""Prints out a status report to standard output. This function may be called from outside this class."""
from spider.database import Database
with Database(db_path, logger=logger) as db:
url_count = db.url_count
fetched_url_count = db.fetched_url_count
if session_report != None and ("count" in session_report):
print()
print("-[ Spider Report: This session ]------------------------------------")
print(" Number of fetch requests sent out: %d" % session_report["count"])
print(" Number of successful fetches: %s" % session_report["succeeded"])
print(" Time elapsed: %.03f sec" % session_report["time_elapsed"])
print(" Fetching speed: %.03f pages/sec" % (session_report["succeeded"] / session_report["time_elapsed"]))
print(" Total size of fetched documents: %s" % ReportMode.human_readable_size(session_report['fetched_size']))
print(" Number of newly found URLs: %d" % session_report['new_urls_count'])
print()
print("-[ Spider Report: Overall summary ]------------------------------------")
print(" Total number of URLs: %d" % url_count)
print(" Number of fetched URLs: %d" % fetched_url_count)
if url_count > 0:
print(" Progress: %.02f%%" % (100.0 * fetched_url_count / url_count))
print(" Database file size: %s" % ReportMode.human_readable_size(os.path.getsize(db_path)))
class ProfileMode(Frontend):
def __init__(self, opts, logger):
#super(ProfileMode, self).__init__(opts, logger)
Frontend.__init__(self, opts, logger)
# If there is nothing to fetch, exit
# Figure out # of URLs to fetch
# Figure out optimal # of threads
# Continuously run multithreading mode
profile = __import__(self.opts['profile'])
# TODO: Any better way to handle this?
self.opts['n_urls'] = profile.URLS
self.opts['n_proc'] = profile.THREADS
self.opts['db_path'] = profile.DB_URI
self.opts['url_patterns'] = profile.URL_PATTERNS
self.opts['storage_dir'] = profile.STORAGE_DIR
self.opts['process_content'] = profile.process_content
self.opts['hallucination_db_uri'] = profile.HALLUCINATION_DB_URI
self.opts['user_agent'] = profile.USER_AGENT
from spider.database import Database
with Database(self.opts['db_path'], logger=logger) as db:
db.insert_urls(profile.ENTRY_POINTS)
def run(self):
multimode = MultiThreadingMode(self.opts, logger)
multimode.run()
def parse_args(args):
optlist, args = getopt.getopt(args, "u:n:t:d:p:smag", ("create-db", "single", "generate-report", "auto"))
opts = {}
for o, a in optlist:
if o == '-n':
opts['n_urls'] = int(a)
elif o == '-t':
opts['n_proc'] = int(a)
elif o == '-d':
opts['db_path'] = a
elif o in ('-u', '--url'):
opts['url'] = a
elif o == '-p':
opts['run_mode'] = 'profile'
opts['profile'] = a
elif o == '--create-db':
opts['run_mode'] = 'create_db'
elif o in ('-s', '--single'):
opts['run_mode'] = 'single'
opts['n_urls'] = 1
elif o in ('-m', '--multithreading'):
opts['run_mode'] = 'multithreading'
elif o in ('-a', '--auto'):
opts['run_mode'] = 'auto'
elif o in ('-g', '--generate-report'):
opts['run_mode'] = 'generate_report'
return opts
def validate_runtime_options(opts):
if 'run_mode' not in opts:
return (False, 'Run mode is not specified')
elif (opts['run_mode'] == 'create_db'):
if ('db_path' not in opts):
return (False, 'SQLite3 database path must be supplied (-d)')
else:
return (True, '')
elif (opts['run_mode'] == 'single') and ('db_path' in opts) and ('url' in opts):
return (True, '')
elif (opts['run_mode'] == 'multithreading'):
if ('db_path' not in opts):
return (False, 'SQLite3 database path must be supplied (-d)')
elif ('n_urls' not in opts):
return (False, 'Specify the number of URLs to fetch (-n)')
elif ('n_proc' not in opts):
return (False, 'Specify the number of threads (-t)')
else:
return (True, '')
elif (opts['run_mode'] == 'profile'):
if ('profile' not in opts):
return (False, 'Specify a profile to run (-p)')
else:
return (True, '')
return (False, 'Unclassified error')
def main():
opts = parse_args(sys.argv[1:])
valid, message = validate_runtime_options(opts)
if valid:
run_mode = opts['run_mode']
fc = {
'create_db': CreateDBMode,
'single': SingleMode,
'multithreading': MultiThreadingMode,
'generate_report': ReportMode,
'profile': ProfileMode,
}
global fend
fend = fc[run_mode](opts, logger=logger)
fend.run()
else:
sys.stderr.write(message + '\n')
if __name__ == '__main__':
main()
|
|
import os
import environ
import oscar
env = environ.Env()
# Path helper
location = lambda x: os.path.join(
os.path.dirname(os.path.realpath(__file__)), x)
DEBUG = env.bool('DEBUG', default=True)
ALLOWED_HOSTS = [
'latest.oscarcommerce.com',
'master.oscarcommerce.com',
'localhost',
'127.0.0.1',
]
# This is needed for the hosted version of the sandbox
ADMINS = (
('David Winterbottom', '[email protected]'),
('Michael van Tellingen', '[email protected]'),
)
EMAIL_SUBJECT_PREFIX = '[Oscar sandbox] '
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MANAGERS = ADMINS
# Use a Sqlite database by default
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.environ.get('DATABASE_NAME', location('db.sqlite')),
'USER': os.environ.get('DATABASE_USER', None),
'PASSWORD': os.environ.get('DATABASE_PASSWORD', None),
'HOST': os.environ.get('DATABASE_HOST', None),
'PORT': os.environ.get('DATABASE_PORT', None),
'ATOMIC_REQUESTS': True
}
}
CACHES = {
'default': env.cache(default='locmemcache://'),
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
USE_TZ = True
TIME_ZONE = 'Europe/London'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
# Includes all languages that have >50% coverage in Transifex
# Taken from Django's default setting for LANGUAGES
gettext_noop = lambda s: s
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('en-gb', gettext_noop('British English')),
('el', gettext_noop('Greek')),
('es', gettext_noop('Spanish')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('it', gettext_noop('Italian')),
('ko', gettext_noop('Korean')),
('nl', gettext_noop('Dutch')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = location("public/media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = location('public/static')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_DIRS = (
location('static/'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
location('templates'),
oscar.OSCAR_MAIN_TEMPLATE_DIR,
],
'OPTIONS': {
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
],
'debug': DEBUG,
}
}
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
# Allow languages to be selected
'django.middleware.locale.LocaleMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.middleware.common.CommonMiddleware',
# Ensure a valid basket is added to the request instance for every request
'oscar.apps.basket.middleware.BasketMiddleware',
]
ROOT_URLCONF = 'urls'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s',
},
'simple': {
'format': '[%(asctime)s] %(message)s'
},
},
'root': {
'level': 'DEBUG',
'handlers': ['console'],
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'oscar': {
'level': 'DEBUG',
'propagate': True,
},
'oscar.catalogue.import': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'oscar.alerts': {
'handlers': ['null'],
'level': 'INFO',
'propagate': False,
},
# Django loggers
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
'django.db.backends': {
'level': 'WARNING',
'propagate': True,
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
# Third party
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sorl.thumbnail': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO',
},
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django_extensions',
# Debug toolbar + extensions
'debug_toolbar',
'apps.gateway', # For allowing dashboard access
'widget_tweaks',
] + oscar.get_core_apps()
# Add Oscar's custom auth backend so users can sign in using their email
# address.
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 9,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
]
LOGIN_REDIRECT_URL = '/'
APPEND_SLASH = True
# ====================
# Messages contrib app
# ====================
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': location('whoosh_index'),
},
}
# Here's a sample Haystack config if using Solr (which is recommended)
#HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
# 'URL': u'http://127.0.0.1:8983/solr/oscar_latest/',
# 'INCLUDE_SPELLING': True
# },
#}
# =============
# Debug Toolbar
# =============
INTERNAL_IPS = ['127.0.0.1', '::1']
# ==============
# Oscar settings
# ==============
from oscar.defaults import *
# Meta
# ====
OSCAR_SHOP_TAGLINE = 'Sandbox'
OSCAR_RECENTLY_VIEWED_PRODUCTS = 20
OSCAR_ALLOW_ANON_CHECKOUT = True
# Order processing
# ================
# Sample order/line status settings. This is quite simplistic. It's like you'll
# want to override the set_status method on the order object to do more
# sophisticated things.
OSCAR_INITIAL_ORDER_STATUS = 'Pending'
OSCAR_INITIAL_LINE_STATUS = 'Pending'
# This dict defines the new order statuses than an order can move to
OSCAR_ORDER_STATUS_PIPELINE = {
'Pending': ('Being processed', 'Cancelled',),
'Being processed': ('Complete', 'Cancelled',),
'Cancelled': (),
'Complete': (),
}
# This dict defines the line statuses that will be set when an order's status
# is changed
OSCAR_ORDER_STATUS_CASCADE = {
'Being processed': 'Being processed',
'Cancelled': 'Cancelled',
'Complete': 'Shipped',
}
# LESS/CSS
# ========
# We default to using CSS files, rather than the LESS files that generate them.
# If you want to develop Oscar's CSS, then set OSCAR_USE_LESS=True to enable the
# on-the-fly less processor.
OSCAR_USE_LESS = False
# Sentry
# ======
if env('SENTRY_DSN', default=None):
RAVEN_CONFIG = {'dsn': env('SENTRY_DSN', default=None)}
LOGGING['handlers']['sentry'] = {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
}
LOGGING['root']['handlers'].append('sentry')
INSTALLED_APPS.append('raven.contrib.django.raven_compat')
# Sorl
# ====
THUMBNAIL_DEBUG = DEBUG
THUMBNAIL_KEY_PREFIX = 'oscar-sandbox'
THUMBNAIL_KVSTORE = env(
'THUMBNAIL_KVSTORE',
default='sorl.thumbnail.kvstores.cached_db_kvstore.KVStore')
THUMBNAIL_REDIS_URL = env('THUMBNAIL_REDIS_URL', default=None)
# Django 1.6 has switched to JSON serializing for security reasons, but it does not
# serialize Models. We should resolve this by extending the
# django/core/serializers/json.Serializer to have the `dumps` function. Also
# in tests/config.py
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# Try and import local settings which can be used to override any of the above.
try:
from settings_local import *
except ImportError:
pass
|
|
# -*- coding: utf-8 -*-
"""
GraphicsView.py - Extension of QGraphicsView
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from PyQt4 import QtCore, QtGui, QtOpenGL, QtSvg
#from numpy import vstack
#import time
from Point import *
#from vector import *
import sys
#import debug
class GraphicsView(QtGui.QGraphicsView):
sigRangeChanged = QtCore.Signal(object, object)
sigMouseReleased = QtCore.Signal(object)
sigSceneMouseMoved = QtCore.Signal(object)
#sigRegionChanged = QtCore.Signal(object)
def __init__(self, parent=None, useOpenGL=True):
"""Re-implementation of QGraphicsView that removes scrollbars and allows unambiguous control of the
viewed coordinate range. Also automatically creates a QGraphicsScene and a central QGraphicsWidget
that is automatically scaled to the full view geometry.
By default, the view coordinate system matches the widget's pixel coordinates and
automatically updates when the view is resized. This can be overridden by setting
autoPixelRange=False. The exact visible range can be set with setRange().
The view can be panned using the middle mouse button and scaled using the right mouse button if
enabled via enableMouse()."""
self.closed = False
QtGui.QGraphicsView.__init__(self, parent)
if 'linux' in sys.platform: ## linux has bugs in opengl implementation
useOpenGL = False
self.useOpenGL(useOpenGL)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0,0,0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active,QtGui.QPalette.Base,brush)
brush = QtGui.QBrush(QtGui.QColor(0,0,0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive,QtGui.QPalette.Base,brush)
brush = QtGui.QBrush(QtGui.QColor(244,244,244))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled,QtGui.QPalette.Base,brush)
self.setPalette(palette)
#self.setProperty("cursor",QtCore.QVariant(QtCore.Qt.ArrowCursor))
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setFrameShape(QtGui.QFrame.NoFrame)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setTransformationAnchor(QtGui.QGraphicsView.NoAnchor)
self.setResizeAnchor(QtGui.QGraphicsView.AnchorViewCenter)
#self.setResizeAnchor(QtGui.QGraphicsView.NoAnchor)
self.setViewportUpdateMode(QtGui.QGraphicsView.SmartViewportUpdate)
self.setSceneRect(QtCore.QRectF(-1e10, -1e10, 2e10, 2e10))
#self.setSceneRect(1, 1, 0, 0) ## Set an empty (but non-zero) scene rect so that the view doesn't try to automatically update for us.
#self.setInteractive(False)
self.lockedViewports = []
self.lastMousePos = None
#self.setMouseTracking(False)
self.aspectLocked = False
self.yInverted = True
self.range = QtCore.QRectF(0, 0, 1, 1)
self.autoPixelRange = True
self.currentItem = None
self.clearMouse()
self.updateMatrix()
self.sceneObj = QtGui.QGraphicsScene()
self.setScene(self.sceneObj)
## by default we set up a central widget with a grid layout.
## this can be replaced if needed.
self.centralWidget = None
self.setCentralItem(QtGui.QGraphicsWidget())
self.centralLayout = QtGui.QGraphicsGridLayout()
self.centralWidget.setLayout(self.centralLayout)
self.mouseEnabled = False
self.scaleCenter = False ## should scaling center around view center (True) or mouse click (False)
self.clickAccepted = False
#def paintEvent(self, *args):
#prof = debug.Profiler('GraphicsView.paintEvent '+str(id(self)), disabled=True)
#QtGui.QGraphicsView.paintEvent(self, *args)
#prof.finish()
def close(self):
self.centralWidget = None
self.scene().clear()
#print " ", self.scene().itemCount()
self.currentItem = None
self.sceneObj = None
self.closed = True
def useOpenGL(self, b=True):
if b:
v = QtOpenGL.QGLWidget()
else:
v = QtGui.QWidget()
#v.setStyleSheet("background-color: #000000;")
self.setViewport(v)
def keyPressEvent(self, ev):
ev.ignore()
def setCentralItem(self, item):
"""Sets a QGraphicsWidget to automatically fill the entire view."""
if self.centralWidget is not None:
self.scene().removeItem(self.centralWidget)
self.centralWidget = item
self.sceneObj.addItem(item)
def addItem(self, *args):
return self.scene().addItem(*args)
def removeItem(self, *args):
return self.scene().removeItem(*args)
def enableMouse(self, b=True):
self.mouseEnabled = b
self.autoPixelRange = (not b)
def clearMouse(self):
self.mouseTrail = []
self.lastButtonReleased = None
def resizeEvent(self, ev):
if self.closed:
return
if self.autoPixelRange:
self.range = QtCore.QRectF(0, 0, self.size().width(), self.size().height())
self.setRange(self.range, padding=0, disableAutoPixel=False)
self.updateMatrix()
def updateMatrix(self, propagate=True):
#print "udpateMatrix:"
translate = Point(self.range.center())
if self.range.width() == 0 or self.range.height() == 0:
return
scale = Point(self.size().width()/self.range.width(), self.size().height()/self.range.height())
m = QtGui.QTransform()
## First center the viewport at 0
self.resetMatrix()
center = self.viewportTransform().inverted()[0].map(Point(self.width()/2., self.height()/2.))
if self.yInverted:
m.translate(center.x(), center.y())
#print " inverted; translate", center.x(), center.y()
else:
m.translate(center.x(), -center.y())
#print " not inverted; translate", center.x(), -center.y()
## Now scale and translate properly
if self.aspectLocked:
scale = Point(scale.min())
if not self.yInverted:
scale = scale * Point(1, -1)
m.scale(scale[0], scale[1])
#print " scale:", scale
st = translate
m.translate(-st[0], -st[1])
#print " translate:", st
self.setTransform(m)
self.currentScale = scale
#self.emit(QtCore.SIGNAL('viewChanged'), self.range)
self.sigRangeChanged.emit(self, self.range)
if propagate:
for v in self.lockedViewports:
v.setXRange(self.range, padding=0)
def visibleRange(self):
"""Return the boundaries of the view in scene coordinates"""
## easier to just return self.range ?
r = QtCore.QRectF(self.rect())
return self.viewportTransform().inverted()[0].mapRect(r)
def translate(self, dx, dy):
self.range.adjust(dx, dy, dx, dy)
self.updateMatrix()
def scale(self, sx, sy, center=None):
scale = [sx, sy]
if self.aspectLocked:
scale[0] = scale[1]
#adj = (self.range.width()*0.5*(1.0-(1.0/scale[0])), self.range.height()*0.5*(1.0-(1.0/scale[1])))
#print "======\n", scale, adj
#print self.range
#self.range.adjust(adj[0], adj[1], -adj[0], -adj[1])
#print self.range
if self.scaleCenter:
center = None
if center is None:
center = self.range.center()
w = self.range.width() / scale[0]
h = self.range.height() / scale[1]
self.range = QtCore.QRectF(center.x() - (center.x()-self.range.left()) / scale[0], center.y() - (center.y()-self.range.top()) /scale[1], w, h)
self.updateMatrix()
def setRange(self, newRect=None, padding=0.05, lockAspect=None, propagate=True, disableAutoPixel=True):
if disableAutoPixel:
self.autoPixelRange=False
if newRect is None:
newRect = self.visibleRange()
padding = 0
padding = Point(padding)
newRect = QtCore.QRectF(newRect)
pw = newRect.width() * padding[0]
ph = newRect.height() * padding[1]
self.range = newRect.adjusted(-pw, -ph, pw, ph)
#print "New Range:", self.range
self.centralWidget.setGeometry(self.range)
self.updateMatrix(propagate)
def scaleToImage(self, image):
"""Scales such that pixels in image are the same size as screen pixels. This may result in a significant performance increase."""
pxSize = image.pixelSize()
tl = image.sceneBoundingRect().topLeft()
w = self.size().width() * pxSize[0]
h = self.size().height() * pxSize[1]
range = QtCore.QRectF(tl.x(), tl.y(), w, h)
self.setRange(range, padding=0)
def lockXRange(self, v1):
if not v1 in self.lockedViewports:
self.lockedViewports.append(v1)
def setXRange(self, r, padding=0.05):
r1 = QtCore.QRectF(self.range)
r1.setLeft(r.left())
r1.setRight(r.right())
self.setRange(r1, padding=[padding, 0], propagate=False)
def setYRange(self, r, padding=0.05):
r1 = QtCore.QRectF(self.range)
r1.setTop(r.top())
r1.setBottom(r.bottom())
self.setRange(r1, padding=[0, padding], propagate=False)
def invertY(self, invert=True):
#if self.yInverted != invert:
#self.scale[1] *= -1.
self.yInverted = invert
self.updateMatrix()
def wheelEvent(self, ev):
QtGui.QGraphicsView.wheelEvent(self, ev)
if not self.mouseEnabled:
return
sc = 1.001 ** ev.delta()
#self.scale *= sc
#self.updateMatrix()
self.scale(sc, sc)
def setAspectLocked(self, s):
self.aspectLocked = s
#def mouseDoubleClickEvent(self, ev):
#QtGui.QGraphicsView.mouseDoubleClickEvent(self, ev)
#pass
### This function is here because interactive mode is disabled due to bugs.
#def graphicsSceneEvent(self, ev, pev=None, fev=None):
#ev1 = GraphicsSceneMouseEvent()
#ev1.setPos(QtCore.QPointF(ev.pos().x(), ev.pos().y()))
#ev1.setButtons(ev.buttons())
#ev1.setButton(ev.button())
#ev1.setModifiers(ev.modifiers())
#ev1.setScenePos(self.mapToScene(QtCore.QPoint(ev.pos())))
#if pev is not None:
#ev1.setLastPos(pev.pos())
#ev1.setLastScenePos(pev.scenePos())
#ev1.setLastScreenPos(pev.screenPos())
#if fev is not None:
#ev1.setButtonDownPos(fev.pos())
#ev1.setButtonDownScenePos(fev.scenePos())
#ev1.setButtonDownScreenPos(fev.screenPos())
#return ev1
def mousePressEvent(self, ev):
QtGui.QGraphicsView.mousePressEvent(self, ev)
#print "Press over:"
#for i in self.items(ev.pos()):
# print i.zValue(), int(i.acceptedMouseButtons()), i, i.scenePos()
#print "Event accepted:", ev.isAccepted()
#print "Grabber:", self.scene().mouseGrabberItem()
if not self.mouseEnabled:
return
self.lastMousePos = Point(ev.pos())
self.mousePressPos = ev.pos()
self.clickAccepted = ev.isAccepted()
if not self.clickAccepted:
self.scene().clearSelection()
return ## Everything below disabled for now..
#self.currentItem = None
#maxZ = None
#for i in self.items(ev.pos()):
#if maxZ is None or maxZ < i.zValue():
#self.currentItem = i
#maxZ = i.zValue()
#print "make event"
#self.pev = self.graphicsSceneEvent(ev)
#self.fev = self.pev
#if self.currentItem is not None:
#self.currentItem.mousePressEvent(self.pev)
##self.clearMouse()
##self.mouseTrail.append(Point(self.mapToScene(ev.pos())))
#self.emit(QtCore.SIGNAL("mousePressed(PyQt_PyObject)"), self.mouseTrail)
def mouseReleaseEvent(self, ev):
QtGui.QGraphicsView.mouseReleaseEvent(self, ev)
if not self.mouseEnabled:
return
#self.mouseTrail.append(Point(self.mapToScene(ev.pos())))
#self.emit(QtCore.SIGNAL("mouseReleased"), ev)
self.sigMouseReleased.emit(ev)
self.lastButtonReleased = ev.button()
return ## Everything below disabled for now..
##self.mouseTrail.append(Point(self.mapToScene(ev.pos())))
#self.emit(QtCore.SIGNAL("mouseReleased(PyQt_PyObject)"), self.mouseTrail)
#if self.currentItem is not None:
#pev = self.graphicsSceneEvent(ev, self.pev, self.fev)
#self.pev = pev
#self.currentItem.mouseReleaseEvent(pev)
#self.currentItem = None
def mouseMoveEvent(self, ev):
if self.lastMousePos is None:
self.lastMousePos = Point(ev.pos())
delta = Point(ev.pos()) - self.lastMousePos
self.lastMousePos = Point(ev.pos())
QtGui.QGraphicsView.mouseMoveEvent(self, ev)
if not self.mouseEnabled:
return
#self.emit(QtCore.SIGNAL("sceneMouseMoved(PyQt_PyObject)"), self.mapToScene(ev.pos()))
self.sigSceneMouseMoved.emit(self.mapToScene(ev.pos()))
#print "moved. Grabber:", self.scene().mouseGrabberItem()
if self.clickAccepted: ## Ignore event if an item in the scene has already claimed it.
return
if ev.buttons() == QtCore.Qt.RightButton:
delta = Point(clip(delta[0], -50, 50), clip(-delta[1], -50, 50))
scale = 1.01 ** delta
#if self.yInverted:
#scale[0] = 1. / scale[0]
self.scale(scale[0], scale[1], center=self.mapToScene(self.mousePressPos))
#self.emit(QtCore.SIGNAL('regionChanged(QRectF)'), self.range)
self.sigRangeChanged.emit(self, self.range)
elif ev.buttons() in [QtCore.Qt.MidButton, QtCore.Qt.LeftButton]: ## Allow panning by left or mid button.
tr = -delta / self.currentScale
self.translate(tr[0], tr[1])
#self.emit(QtCore.SIGNAL('regionChanged(QRectF)'), self.range)
self.sigRangeChanged.emit(self, self.range)
#return ## Everything below disabled for now..
##self.mouseTrail.append(Point(self.mapToScene(ev.pos())))
#if self.currentItem is not None:
#pev = self.graphicsSceneEvent(ev, self.pev, self.fev)
#self.pev = pev
#self.currentItem.mouseMoveEvent(pev)
def writeSvg(self, fileName=None):
if fileName is None:
fileName = str(QtGui.QFileDialog.getSaveFileName())
self.svg = QtSvg.QSvgGenerator()
self.svg.setFileName(fileName)
self.svg.setSize(self.size())
self.svg.setResolution(600)
painter = QtGui.QPainter(self.svg)
self.render(painter)
def writeImage(self, fileName=None):
if fileName is None:
fileName = str(QtGui.QFileDialog.getSaveFileName())
self.png = QtGui.QImage(self.size(), QtGui.QImage.Format_ARGB32)
painter = QtGui.QPainter(self.png)
rh = self.renderHints()
self.setRenderHints(QtGui.QPainter.Antialiasing)
self.render(painter)
self.setRenderHints(rh)
self.png.save(fileName)
def writePs(self, fileName=None):
if fileName is None:
fileName = str(QtGui.QFileDialog.getSaveFileName())
printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution)
printer.setOutputFileName(fileName)
painter = QtGui.QPainter(printer)
self.render(painter)
painter.end()
def dragEnterEvent(self, ev):
ev.ignore() ## not sure why, but for some reason this class likes to consume drag events
#def getFreehandLine(self):
## Wait for click
#self.clearMouse()
#while self.lastButtonReleased != QtCore.Qt.LeftButton:
#QtGui.qApp.sendPostedEvents()
#QtGui.qApp.processEvents()
#time.sleep(0.01)
#fl = vstack(self.mouseTrail)
#return fl
#def getClick(self):
#fl = self.getFreehandLine()
#return fl[-1]
#class GraphicsSceneMouseEvent(QtGui.QGraphicsSceneMouseEvent):
#"""Stand-in class for QGraphicsSceneMouseEvent"""
#def __init__(self):
#QtGui.QGraphicsSceneMouseEvent.__init__(self)
#def setPos(self, p):
#self.vpos = p
#def setButtons(self, p):
#self.vbuttons = p
#def setButton(self, p):
#self.vbutton = p
#def setModifiers(self, p):
#self.vmodifiers = p
#def setScenePos(self, p):
#self.vscenePos = p
#def setLastPos(self, p):
#self.vlastPos = p
#def setLastScenePos(self, p):
#self.vlastScenePos = p
#def setLastScreenPos(self, p):
#self.vlastScreenPos = p
#def setButtonDownPos(self, p):
#self.vbuttonDownPos = p
#def setButtonDownScenePos(self, p):
#self.vbuttonDownScenePos = p
#def setButtonDownScreenPos(self, p):
#self.vbuttonDownScreenPos = p
#def pos(self):
#return self.vpos
#def buttons(self):
#return self.vbuttons
#def button(self):
#return self.vbutton
#def modifiers(self):
#return self.vmodifiers
#def scenePos(self):
#return self.vscenePos
#def lastPos(self):
#return self.vlastPos
#def lastScenePos(self):
#return self.vlastScenePos
#def lastScreenPos(self):
#return self.vlastScreenPos
#def buttonDownPos(self):
#return self.vbuttonDownPos
#def buttonDownScenePos(self):
#return self.vbuttonDownScenePos
#def buttonDownScreenPos(self):
#return self.vbuttonDownScreenPos
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.http import urlencode
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard import api
from openstack_dashboard.api import base
NOT_LAUNCHABLE_FORMATS = ['aki', 'ari']
class LaunchImage(tables.LinkAction):
name = "launch_image"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
if get_image_type(datum) == "image":
source_type = "image_id"
else:
source_type = "instance_snapshot_id"
params = urlencode({"source_type": source_type,
"source_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
def allowed(self, request, image=None):
if image and image.container_format not in NOT_LAUNCHABLE_FORMATS:
return image.status in ("active",)
return False
class LaunchImageNG(LaunchImage):
name = "launch_image_ng"
verbose_name = _("Launch")
url = "horizon:project:images:index"
classes = ("btn-launch", )
ajax = False
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchImage, self).__init__(attrs, **kwargs)
def get_link_url(self, datum):
imageId = self.table.get_object_id(datum)
url = reverse(self.url)
ngclick = "modal.openLaunchInstanceWizard(" \
"{successUrl: '%s', imageId: '%s'})" % (url, imageId)
self.attrs.update({
"ng-controller": "LaunchInstanceModalController as modal",
"ng-click": ngclick
})
return "javascript:void(0);"
class DeleteImage(tables.DeleteAction):
# NOTE: The bp/add-batchactions-help-text
# will add appropriate help text to some batch/delete actions.
help_text = _("Deleted images are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Image",
u"Delete Images",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Image",
u"Deleted Images",
count
)
policy_rules = (("image", "delete_image"),)
def allowed(self, request, image=None):
# Protected images can not be deleted.
if image and image.protected:
return False
if image:
return image.owner == request.user.tenant_id
# Return True to allow table-level bulk delete action to appear.
return True
def delete(self, request, obj_id):
api.glance.image_delete(request, obj_id)
class CreateImage(tables.LinkAction):
name = "create"
verbose_name = _("Create Image")
url = "horizon:project:images:images:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("image", "add_image"),)
class EditImage(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Image")
url = "horizon:project:images:images:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("image", "modify_image"),)
def allowed(self, request, image=None):
if image:
return image.status in ("active",) and \
image.owner == request.user.tenant_id
# We don't have bulk editing, so if there isn't an image that's
# authorized, don't allow the action.
return False
class CreateVolumeFromImage(tables.LinkAction):
name = "create_volume_from_image"
verbose_name = _("Create Volume")
url = "horizon:project:volumes:volumes:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("volume", "volume:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"image_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
def allowed(self, request, image=None):
if (image and image.container_format not in NOT_LAUNCHABLE_FORMATS
and base.is_service_enabled(request, 'volume')):
return image.status == "active"
return False
def filter_tenants():
return getattr(settings, 'IMAGES_LIST_FILTER_TENANTS', [])
@memoized
def filter_tenant_ids():
return map(lambda ft: ft['tenant'], filter_tenants())
class OwnerFilter(tables.FixedFilterAction):
def get_fixed_buttons(self):
def make_dict(text, tenant, icon):
return dict(text=text, value=tenant, icon=icon)
buttons = [make_dict(_('Project'), 'project', 'fa-home')]
for button_dict in filter_tenants():
new_dict = button_dict.copy()
new_dict['value'] = new_dict['tenant']
buttons.append(new_dict)
buttons.append(make_dict(_('Shared with Me'), 'shared',
'fa-share-square-o'))
buttons.append(make_dict(_('Public'), 'public', 'fa-group'))
return buttons
def categorize(self, table, images):
user_tenant_id = table.request.user.tenant_id
tenants = defaultdict(list)
for im in images:
categories = get_image_categories(im, user_tenant_id)
for category in categories:
tenants[category].append(im)
return tenants
def get_image_categories(im, user_tenant_id):
categories = []
if im.is_public:
categories.append('public')
if im.owner == user_tenant_id:
categories.append('project')
elif im.owner in filter_tenant_ids():
categories.append(im.owner)
elif not im.is_public:
categories.append('shared')
return categories
def get_image_name(image):
return getattr(image, "name", None) or image.id
def get_image_type(image):
return getattr(image, "properties", {}).get("image_type", "image")
def get_format(image):
format = getattr(image, "disk_format", "")
# The "container_format" attribute can actually be set to None,
# which will raise an error if you call upper() on it.
if not format:
return format
if format == "raw":
if getattr(image, "container_format") == 'docker':
return pgettext_lazy("Image format for display in table",
u"Docker")
# Most image formats are untranslated acronyms, but raw is a word
# and should be translated
return pgettext_lazy("Image format for display in table", u"Raw")
return format.upper()
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, image_id):
image = api.glance.image_get(request, image_id)
return image
def load_cells(self, image=None):
super(UpdateRow, self).load_cells(image)
# Tag the row with the image category for client-side filtering.
image = self.datum
my_tenant_id = self.table.request.user.tenant_id
image_categories = get_image_categories(image, my_tenant_id)
for category in image_categories:
self.classes.append('category-' + category)
class ImagesTable(tables.DataTable):
STATUS_CHOICES = (
("active", True),
("saving", None),
("queued", None),
("pending_delete", None),
("killed", False),
("deleted", False),
)
STATUS_DISPLAY_CHOICES = (
("active", pgettext_lazy("Current status of an Image", u"Active")),
("saving", pgettext_lazy("Current status of an Image", u"Saving")),
("queued", pgettext_lazy("Current status of an Image", u"Queued")),
("pending_delete", pgettext_lazy("Current status of an Image",
u"Pending Delete")),
("killed", pgettext_lazy("Current status of an Image", u"Killed")),
("deleted", pgettext_lazy("Current status of an Image", u"Deleted")),
)
TYPE_CHOICES = (
("image", pgettext_lazy("Type of an image", u"Image")),
("snapshot", pgettext_lazy("Type of an image", u"Snapshot")),
)
name = tables.Column(get_image_name,
link="horizon:project:images:images:detail",
truncate=40,
verbose_name=_("Image Name"),)
image_type = tables.Column(get_image_type,
verbose_name=_("Type"),
display_choices=TYPE_CHOICES)
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
public = tables.Column("is_public",
verbose_name=_("Public"),
empty_value=False,
filters=(filters.yesno, filters.capfirst))
protected = tables.Column("protected",
verbose_name=_("Protected"),
empty_value=False,
filters=(filters.yesno, filters.capfirst))
disk_format = tables.Column(get_format, verbose_name=_("Format"))
size = tables.Column("size",
filters=(filters.filesizeformat,),
attrs=({"data-type": "size"}),
verbose_name=_("Size"))
class Meta(object):
name = "images"
row_class = UpdateRow
status_columns = ["status"]
verbose_name = _("Images")
table_actions = (OwnerFilter, CreateImage, DeleteImage,)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', True):
launch_actions = (LaunchImage,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', False):
launch_actions = (LaunchImageNG,) + launch_actions
row_actions = launch_actions + (CreateVolumeFromImage,
EditImage, DeleteImage,)
pagination_param = "image_marker"
|
|
''' Document Localization using Recursive CNN
Maintainer : Khurram Javed
Email : [email protected] '''
import csv
import logging
import os
import xml.etree.ElementTree as ET
import numpy as np
from torchvision import transforms
import utils.utils as utils
# To incdude a new Dataset, inherit from Dataset and add all the Dataset specific parameters here.
# Goal : Remove any data specific parameters from the rest of the code
logger = logging.getLogger('iCARL')
class Dataset():
'''
Base class to reprenent a Dataset
'''
def __init__(self, name):
self.name = name
self.data = []
self.labels = []
class SmartDoc(Dataset):
'''
Class to include MNIST specific details
'''
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for d in directory:
self.directory = d
self.train_transform = transforms.Compose([transforms.Resize([32, 32]),
transforms.ColorJitter(1.5, 1.5, 0.9, 0.5),
transforms.ToTensor()])
self.test_transform = transforms.Compose([transforms.Resize([32, 32]),
transforms.ToTensor()])
logger.info("Pass train/test data paths here")
self.classes_list = {}
file_names = []
print (self.directory, "gt.csv")
with open(os.path.join(self.directory, "gt.csv"), 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
import ast
for row in spamreader:
file_names.append(row[0])
self.data.append(os.path.join(self.directory, row[0]))
test = row[1].replace("array", "")
self.labels.append((ast.literal_eval(test)))
self.labels = np.array(self.labels)
self.labels = np.reshape(self.labels, (-1, 8))
logger.debug("Ground Truth Shape: %s", str(self.labels.shape))
logger.debug("Data shape %s", str(len(self.data)))
self.myData = [self.data, self.labels]
class SmartDocDirectories(Dataset):
'''
Class to include MNIST specific details
'''
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for folder in os.listdir(directory):
if (os.path.isdir(directory + "/" + folder)):
for file in os.listdir(directory + "/" + folder):
images_dir = directory + "/" + folder + "/" + file
if (os.path.isdir(images_dir)):
list_gt = []
tree = ET.parse(images_dir + "/" + file + ".gt")
root = tree.getroot()
for a in root.iter("frame"):
list_gt.append(a)
im_no = 0
for image in os.listdir(images_dir):
if image.endswith(".jpg"):
# print(im_no)
im_no += 1
# Now we have opened the file and GT. Write code to create multiple files and scale gt
list_of_points = {}
# img = cv2.imread(images_dir + "/" + image)
self.data.append(os.path.join(images_dir, image))
for point in list_gt[int(float(image[0:-4])) - 1].iter("point"):
myDict = point.attrib
list_of_points[myDict["name"]] = (
int(float(myDict['x'])), int(float(myDict['y'])))
ground_truth = np.asarray(
(list_of_points["tl"], list_of_points["tr"], list_of_points["br"],
list_of_points["bl"]))
ground_truth = utils.sort_gt(ground_truth)
self.labels.append(ground_truth)
self.labels = np.array(self.labels)
self.labels = np.reshape(self.labels, (-1, 8))
logger.debug("Ground Truth Shape: %s", str(self.labels.shape))
logger.debug("Data shape %s", str(len(self.data)))
self.myData = []
for a in range(len(self.data)):
self.myData.append([self.data[a], self.labels[a]])
class SelfCollectedDataset(Dataset):
'''
Class to include MNIST specific details
'''
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for image in os.listdir(directory):
# print (image)
if image.endswith("jpg") or image.endswith("JPG"):
if os.path.isfile(os.path.join(directory, image + ".csv")):
with open(os.path.join(directory, image + ".csv"), 'r') as csvfile:
spamwriter = csv.reader(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
img_path = os.path.join(directory, image)
gt = []
for row in spamwriter:
gt.append(row)
gt = np.array(gt).astype(np.float32)
ground_truth = utils.sort_gt(gt)
self.labels.append(ground_truth)
self.data.append(img_path)
self.labels = np.array(self.labels)
self.labels = np.reshape(self.labels, (-1, 8))
logger.debug("Ground Truth Shape: %s", str(self.labels.shape))
logger.debug("Data shape %s", str(len(self.data)))
self.myData = []
for a in range(len(self.data)):
self.myData.append([self.data[a], self.labels[a]])
class SmartDocCorner(Dataset):
'''
Class to include MNIST specific details
'''
def __init__(self, directory="data"):
super().__init__("smartdoc")
self.data = []
self.labels = []
for d in directory:
self.directory = d
self.train_transform = transforms.Compose([transforms.Resize([32, 32]),
transforms.ColorJitter(0.5, 0.5, 0.5, 0.5),
transforms.ToTensor()])
self.test_transform = transforms.Compose([transforms.Resize([32, 32]),
transforms.ToTensor()])
logger.info("Pass train/test data paths here")
self.classes_list = {}
file_names = []
with open(os.path.join(self.directory, "gt.csv"), 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
import ast
for row in spamreader:
file_names.append(row[0])
self.data.append(os.path.join(self.directory, row[0]))
test = row[1].replace("array", "")
self.labels.append((ast.literal_eval(test)))
self.labels = np.array(self.labels)
self.labels = np.reshape(self.labels, (-1, 2))
logger.debug("Ground Truth Shape: %s", str(self.labels.shape))
logger.debug("Data shape %s", str(len(self.data)))
self.myData = [self.data, self.labels]
|
|
from __future__ import absolute_import, print_function, division
__author__ = 'Alistair Miles <[email protected]>'
# standard library dependencies
from xml.etree import ElementTree
from operator import attrgetter
import itertools
# internal dependencies
from petl.util import RowContainer
from petl.io.sources import read_source_from_arg
def fromxml(source, *args, **kwargs):
"""
Access data in an XML file. E.g.::
>>> from petl import fromxml, look
>>> data = \"""<table>
... <tr>
... <td>foo</td><td>bar</td>
... </tr>
... <tr>
... <td>a</td><td>1</td>
... </tr>
... <tr>
... <td>b</td><td>2</td>
... </tr>
... <tr>
... <td>c</td><td>2</td>
... </tr>
... </table>\"""
>>> with open('example1.xml', 'w') as f:
... f.write(data)
... f.close()
...
>>> table1 = fromxml('example1.xml', 'tr', 'td')
>>> look(table1)
+-------+-------+
| 'foo' | 'bar' |
+=======+=======+
| 'a' | '1' |
+-------+-------+
| 'b' | '2' |
+-------+-------+
| 'c' | '2' |
+-------+-------+
If the data values are stored in an attribute, provide the attribute name
as an extra positional argument, e.g.:
>>> data = \"""<table>
... <tr>
... <td v='foo'/><td v='bar'/>
... </tr>
... <tr>
... <td v='a'/><td v='1'/>
... </tr>
... <tr>
... <td v='b'/><td v='2'/>
... </tr>
... <tr>
... <td v='c'/><td v='2'/>
... </tr>
... </table>\"""
>>> with open('example2.xml', 'w') as f:
... f.write(data)
... f.close()
...
>>> table2 = fromxml('example2.xml', 'tr', 'td', 'v')
>>> look(table2)
+-------+-------+
| 'foo' | 'bar' |
+=======+=======+
| 'a' | '1' |
+-------+-------+
| 'b' | '2' |
+-------+-------+
| 'c' | '2' |
+-------+-------+
Data values can also be extracted by providing a mapping of field names
to element paths, e.g.::
>>> data = \"""<table>
... <row>
... <foo>a</foo><baz><bar v='1'/><bar v='3'/></baz>
... </row>
... <row>
... <foo>b</foo><baz><bar v='2'/></baz>
... </row>
... <row>
... <foo>c</foo><baz><bar v='2'/></baz>
... </row>
... </table>\"""
>>> with open('example3.xml', 'w') as f:
... f.write(data)
... f.close()
...
>>> table3 = fromxml('example3.xml', 'row', {'foo': 'foo', 'bar': ('baz/bar', 'v')})
>>> look(table3)
+-------+------------+
| 'foo' | 'bar' |
+=======+============+
| 'a' | ('1', '3') |
+-------+------------+
| 'b' | '2' |
+-------+------------+
| 'c' | '2' |
+-------+------------+
Note that the implementation is currently *not*
streaming, i.e., the whole document is loaded into memory.
Supports transparent reading from URLs, ``.gz`` and ``.bz2`` files.
.. versionadded:: 0.4
.. versionchanged:: 0.6
If multiple elements match a given field, all values are reported as a
tuple.
.. versionchanged:: 0.25
If there is more than one element name used for row values, a tuple
or list of paths can be provided, e.g.,
``fromxml('example.html', './/tr', ('th', 'td'))``.
"""
source = read_source_from_arg(source)
return XmlView(source, *args, **kwargs)
class XmlView(RowContainer):
def __init__(self, source, *args, **kwargs):
self.source = source
self.args = args
if len(args) == 2 and isinstance(args[1], (basestring, tuple, list)):
self.rmatch = args[0]
self.vmatch = args[1]
self.vdict = None
self.attr = None
elif len(args) == 2 and isinstance(args[1], dict):
self.rmatch = args[0]
self.vmatch = None
self.vdict = args[1]
self.attr = None
elif len(args) == 3:
self.rmatch = args[0]
self.vmatch = args[1]
self.vdict = None
self.attr = args[2]
else:
assert False, 'bad parameters'
self.missing = kwargs.get('missing', None)
def __iter__(self):
vmatch = self.vmatch
vdict = self.vdict
with self.source.open_('rb') as xmlf:
tree = ElementTree.parse(xmlf)
if not hasattr(tree, 'iterfind'):
# Python 2.6 compatibility
tree.iterfind = tree.findall
if vmatch is not None:
# simple case, all value paths are the same
for rowelm in tree.iterfind(self.rmatch):
if self.attr is None:
getv = attrgetter('text')
else:
getv = lambda e: e.get(self.attr)
if isinstance(vmatch, basestring):
# match only one path
velms = rowelm.findall(vmatch)
else:
# match multiple paths
velms = itertools.chain(*[rowelm.findall(enm)
for enm in vmatch])
yield tuple(getv(velm)
for velm in velms)
else:
# difficult case, deal with different paths for each field
# determine output header
fields = tuple(vdict.keys())
yield fields
# setup value getters
vmatches = dict()
vgetters = dict()
for f in fields:
vmatch = self.vdict[f]
if isinstance(vmatch, basestring):
# match element path
vmatches[f] = vmatch
vgetters[f] = element_text_getter(self.missing)
else:
# match element path and attribute name
vmatches[f] = vmatch[0]
attr = vmatch[1]
vgetters[f] = attribute_text_getter(attr, self.missing)
# determine data rows
for rowelm in tree.iterfind(self.rmatch):
yield tuple(vgetters[f](rowelm.findall(vmatches[f]))
for f in fields)
def element_text_getter(missing):
def _get(v):
if len(v) > 1:
return tuple(e.text for e in v)
elif len(v) == 1:
return v[0].text
else:
return missing
return _get
def attribute_text_getter(attr, missing):
def _get(v):
if len(v) > 1:
return tuple(e.get(attr) for e in v)
elif len(v) == 1:
return v[0].get(attr)
else:
return missing
return _get
|
|
from PIL import Image, ImageDraw
import yaml
import numpy as np
class Mesh:
"""
Mesh: Square mesher class for thermal analysis.
Each entry corresponds to a 2D array with one entry per pixel.
There are two data types, flags and double.
The mesher takes a number of squares in the x and y direction and creates
a square mesh from it.
Creates a bidirectional mapping from the integer X,Y 2D space to the 1D node list
that is used for solving the sparse matrix.
This uses a two-pass approach. The first pass counts the number of nodes
needed to describe the problem so that memory can be allocated in advance. The second
pass loads the mesh data structure with the problem. Then the matrix is solved, and
the solution data is loaded into the output, which could be the mesh, or could be
a new data structure.
TODO:
For this test code, especially the 2D test code, need to be able to specify the JSON
or otherwise initialize Matls, Layers, and Vias without using a JSON file.
Instead have the JSON or other calls inline.
TODO:
A nonzero boundary conductivity can be used instead of the flag self._isodeg.
TODO:
It seems like a good idea to to specify a layer as having a % copper instead of detailed traces,
and except for holes. Then there is the problem of thermal relief, unattached via relief, etc.
TODO: Stackup editor for web.
Styles:
Altium - good drawing, alternating left/right labels, 3D look. Shows embedding.
Hyperlynx - to scale, cross section, distinctive colors. Shows embedding.
Polar - design tool for stackups, looks like Altium. Doesn't show embedding.
Whatever - http://3.bp.blogspot.com/-G71R0kDY0Jk/T2t8rIId46I/AAAAAAAAAls/wlLCATtc8Js/s1600/index2.png
Cadence - http://community.cadence.com/cadence_technology_forums/f/27/t/22556
Using IPC-2581 with Altium to do stackup editing:
http://bethesignal.com/wp/wp-content/uploads/2014/01/ECEN5224_Lecture-02_2014-01-27.pdf
SI Lecture - http://bethesignal.com/wp/wp-content/uploads/2014/01/ECEN5224_Lecture-02_2014-01-27.pdf
File format for stackup could be XML from IPC-2581B standard
TODO: 3D meshing
1. Figure out how many layers there will be in the analysis.
The logical layers have more than one value per design or physical physical layer.
Input layer PNGs are mapped to boxels.
The loader first pass just counts to get the layer offsets.
The loader second pass loads the solver matrix.
The matrix solver runs.
The unloader loads the solution back into PNG files, matplotlib, or hdf5.
Each boxel is in a 2D XY array of boxels.
These arrays can be shipped to different computers to split up a problem.
material identifier
temperature from Spice solver
temperature from Trilinos solver
temperature from Numpy solver
node number
index into array of X, Y, and Z coordinates
layer number
Some physical layers have a
boundary condition flag
boundary condition temperature
boundary condition conductivity
Ultimately there are six types of layers:
int/bitfied float
input
intermediate
output
The input is fabrication artwork, power inputs, etc.
Most of the input layers are bitfields, and it is possible to pack them efficiently.
Need access routines to make packing and unpacking easy.
The intermediate and output are mostly floats for numeric calculations.
The output could also be rendered as int/bitfield pictures.
Could use png->hdf5->packed data structures
"""
# In the 3D case, there will be an offset into the big matrix for each layer.
# There will be an offset into the layer for each occupied boxel X,Y.
# Referencing a cell above is done by calculating the layer above, getting
# the offset for the layer, checking to see that the boxel is occupied,
# then getting the layer-relative offset for the occupied X,Y, and adding it to the layer
# offset.
#
# The layer-relative offset-less matrices for a given bitmap input can be computed in parallel.
# They can be named or labelled with the checksum of the bitmap and cached.
# The layers, without the offsets, can be stored in a matrix market format (probably) or perhaps spatial sqlite?
# When the layer calculations are complete, the offsets can be calculated, then
# they can be brought together pairwise for parallel calculation of the
# coupling terms between the pairs.
#
def __init__(self, fn, lyr, matls):
self.config_js_fn= fn
with open (self.config_js_fn, "r") as jsonHandle:
jsonContents= jsonHandle.read()
config= yaml.load(jsonContents)
# TODO: This shouldn't be needed but the plot routine uses mesh.config
self.config= config
self.loadConfig(config)
self.nodeCount = 0
# Field name dictionary to map self.spicenodenum layer to string values
# The key is the spicenodename and the value is the spicenodenum.
self.spiceNodeName = {}
# Dictionary that maps a node name string to the X and Y coordinates in the mesh.
self.spiceNodeXName = {}
self.spiceNodeYName = {}
# Array where the index is the spicenodenum and the values are the x and y of in the mesh.
# This allows a sequence of ordered nodes in the spice raw file output to be loaded directly back into
# a layer in the mesh. Nodes start at 1. The first variable is time.
self.spiceNodeX = []
self.spiceNodeY = []
self.nodeX= []
self.nodeY= []
self.defineProblem(config['mesh'], lyr, matls)
self.mapMeshToSolutionMatrix(lyr)
# TODO: Doesn't make sense that the mesh doesn't have a copy of all of lyr.
# Refactor it out of other calls.
self.lyr= lyr
def setMeshSize(self, w, h):
"""
__init__(Mesh self, int w, int h, Matls matls)
Create a square mesh of size w by h.
The mesh data structure is in self.field, which holds double precision numbers,
and self.ifield, which holds integers.
"""
self.width = w
self.height = h
self.field = np.zeros((self.width, self.height, self.numdoublelayers), dtype = 'double')
self.ifield = np.zeros((self.width, self.height, self.numintlayers), dtype = 'int')
self.xr, self.yr= np.mgrid[0:self.width+1, 0:self.height+1]
def solveTemperatureNodeCount(self):
"""
solveTemperatureNodeCount(Mesh self)
Returns the count of cells in the square mesh.
"""
# The node count is one more than the maximum index.
return self.nodeCount
# Return -1 for hole locations or out-of-bounds.
def getNodeAtXY(self, x, y):
"""
getNodeAtXY(Mesh self, int x, int y)
Given an integer x and y argument, find the corresponding node number
"""
if (x < 0 or y < 0 or x >= self.width or y >= self.height):
return -1
if (self.ifield[x, y, self._holeflag] == 1):
return -1
return self.ifield[x, y, self._holeflag]
def mapMeshToSolutionMatrix(self, lyr):
"""
mapMeshToSolutionMatrix(Mesh self, Layers lyr)
Based on the mesh, find the number of the different types of nodes
that will be in the matrix A. These numbers need to be known in
advance of constructing the matrix.
The input is the width and height of the mesh.
"""
# Problem must be at least 1 cell by 1 cell.
if self.width <= 0:
print "Error: Width:" + str(self.width)
if self.height <= 0:
print "Error: Height:" + str(self.height)
self.nodeCount= 0;
for xn in range(0,self.width):
for yn in range(0, self.height):
if (self.ifield[xn, yn, self._holeflag] >= 0):
self.ifield[xn, yn, self._holeflag]= self.nodeCount
self.nodeCount += 1
self.nodeXn = np.zeros(self.nodeCount, dtype = 'int')
self.nodeYn = np.zeros(self.nodeCount, dtype = 'int')
for xn in range(0,self.width):
for yn in range(0, self.height):
nodeThis= self.ifield[xn, yn, self._holeflag]
if (nodeThis >= 0):
self.nodeXn[nodeThis]= xn
self.nodeYn[nodeThis]= yn
# self.nodeCount = self.getNodeAtXY(self.width - 1, self.height - 1) + 1
print "Total number of independent nodes= ", self.nodeCount
def nodeLocation(self, node):
if node < 0 or node >= self.nodeCount:
print "Node " + str(node) + " lookup is out-of-bounds from 0 to " + self.nodeCount
return (self.nodeXn[node], self.nodeYn[node])
def getNodeAtXY(self, x, y):
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return -1
return self.ifield[x, y, self._holeflag]
# This can scale by using a PNG input instead of code
def defineScalableProblem(self, lyr, matls, x, y):
"""
defineScalableProblem(Layer lyr, Mesh mesh, Matls matls, int xsize, int ysize)
Create a sample test problem for thermal analysis that can scale
to a wide variety of sizes.
It initializes the mesh based on fractions of the size of the mesh.
The conductivities in the problem are based on the material properties
in the matls object.
"""
#
# TODO: Physical layers get used properly just below here.
# For the other uses,
# need to split use of lyr into the mesh layers, which are things like self._resis,
# and these layers should be initialized and managed in the mesh object here,
# not in the Layers class, where it is magically created now by messing with
# the object hash. Also there is the counting of the layers that happens there.
#
# The easy way to do this is to move the magic from Layers.py to this class,
# and change self.lyr.magic to self.magic, or in the cases where lyr is passed
# as a variable, change lyr.magic to self.magic.
#
fr4cond= matls.getProp('Core', 'conductivityXX')
fr4thick= lyr.getProp('core1', 'thickness')
fr4condUnits= matls.getUnits('conductivityXX')
fr4thickUnits= lyr.getUnits('thickness')
print "FR-4 Cond: " + str(fr4cond) + str(fr4condUnits)
print "FR-4 Thickness: " + str(fr4thick) + str(fr4thickUnits)
fr4res= 1.0/(fr4cond * fr4thick)
print "FR-4 Resistance per square: " + str(fr4res)
cucond= matls.getProp('Cu', 'conductivity')
cuthick= lyr.getProp('topside_cu', 'thickness')
cucondUnits= matls.getUnits('conductivity')
cuthickUnits= lyr.getUnits('thickness')
print "Cu Cond: " + str(cucond) + str(cucondUnits)
print "Cu Thickness: " + str(cuthick) + str(cuthickUnits)
cures= 1.0/(cucond * cuthick)
print "Cu Resistance per square: " + str(cures)
self.setMeshSize(x, y)
self.field[:, :, self._resis] = fr4res
# Heat source
hsx= 0.5
hsy= 0.5
hswidth= 0.25
hsheight= 0.25
heat= 10.0
srcl= round(self.width*(hsx-hswidth*0.5))
srcr= round(self.width*(hsx+hswidth*0.5))
srct= round(self.height*(hsy-hsheight*0.5))
srcb= round(self.height*(hsy+hsheight*0.5))
numHeatCells= (srcr - srcl)*(srcb-srct)
heatPerCell= heat/numHeatCells
print "Heat per cell = ", heatPerCell
self.field[srcl:srcr, srct:srcb, self._heat] = heatPerCell
self.field[srcl:srcr, srct:srcb, self._resis] = cures
# Boundary conditions
self.field[0, 0:self.height, self._isodeg] = 25.0
self.field[self.width-1, 0:self.height, self._isodeg] = 25.0
self.field[0:self.width, 0, self._isodeg] = 25.0
self.field[0:self.width, self.height-1, self._isodeg] = 25.0
self.ifield[0, 0:self.height, self._isoflag] = 1
self.ifield[self.width-1, 0:self.height, self._isoflag] = 1
self.ifield[0:self.width, 0, self._isoflag] = 1
self.ifield[0:self.width, self.height-1, self._isoflag] = 1
self.field[0, 0:self.height, self._boundCond] = cucond
self.field[self.width-1, 0:self.height, self._boundCond] = cucond
self.field[0:self.width, 0, self._boundCond] = cucond
self.field[0:self.width, self.height-1, self._boundCond] = cucond
# Thermal conductors
condwidth= 0.05
cond1l= round(self.width*hsx - self.width*condwidth*0.5)
cond1r= round(self.width*hsx + self.width*condwidth*0.5)
cond1t= round(self.height*hsy - self.height*condwidth*0.5)
cond1b= round(self.height*hsy + self.height*condwidth*0.5)
self.field[0:self.width, cond1t:cond1b, self._resis] = cures
self.field[cond1l:cond1r, 0:self.height, self._resis] = cures
# Holes
self.ifield[1, 1, self._holeflag]= -1
self.ifield[1, 1, self._isoflag]= 0
self.field[1, 1, self._heat]= 0.0
def definePNGProblem(self, fn, lyr, matls):
"""
Read a PNG file and load the data structure
pix from Image module is getting mapped into field and ifield 2D layer structs.
"""
heatPerCell= 48e-6
pngproblem = Image.open(fn, mode='r')
xysize= pngproblem.size
width= xysize[0]
height= xysize[1]
print "Width: " + str(width) + " Height: " + str(height)
fr4cond= matls.getProp('Core', 'conductivityXX')
fr4thick= lyr.getProp('core1', 'thickness')
fr4condUnits= matls.getUnits('conductivityXX')
fr4thickUnits= lyr.getUnits('thickness')
print "FR-4 Cond: " + str(fr4cond) + str(fr4condUnits)
print "FR-4 Thickness: " + str(fr4thick) + str(fr4thickUnits)
fr4res= 1.0/(fr4cond * fr4thick)
print "FR-4 Resistance per square: " + str(fr4res)
cucond= matls.getProp('Cu', 'conductivity')
cuthick= lyr.getProp('topside_cu', 'thickness')
cucondUnits= matls.getUnits('conductivity')
cuthickUnits= lyr.getUnits('thickness')
print "Cu Cond: " + str(cucond) + str(cucondUnits)
print "Cu Thickness: " + str(cuthick) + str(cuthickUnits)
cures= 1.0/(cucond * cuthick)
print "Cu Resistance per square: " + str(cures)
self.setMeshSize(width, height)
self.field[:, :, self._isodeg] = 25.0
self.field[:, :, self._resis] = fr4res
self.field[:, :, self._boundCond] = 0.0
pix = pngproblem.load()
copperCellCount=0
heatCellCount=0
isoCellCount=0
fr4CellCount=0
holeCellCount=0
for xn in range(0,width):
for tyn in range(0, height):
# Graphing package has +y up, png has it down
yn= height - 1 - tyn
if pix[xn,yn][0] == 255 and pix[xn,yn][1] == 0 and pix[xn,yn][2]== 0:
self.field[xn, tyn, self._resis] = cures
self.field[xn, tyn, self._heat] = heatPerCell
copperCellCount += 1
heatCellCount += 1
elif pix[xn,yn][0] == 0 and pix[xn,yn][1] == 255 and pix[xn,yn][2]== 0:
self.field[xn, tyn, self._resis] = cures
copperCellCount += 1
elif pix[xn,yn][0] == 0 and pix[xn,yn][1] == 0 and pix[xn,yn][2]== 255:
self.ifield[xn, tyn, self._isoflag] = 1
self.field[xn, tyn, self._resis] = cures
self.field[xn, tyn, self._isodeg] = 25.0
self.field[xn, tyn, self._boundCond] = cucond
isoCellCount += 1
copperCellCount += 1
elif pix[xn,yn][0] == 255 and pix[xn,yn][1] == 255 and pix[xn,yn][2]== 0:
self.field[xn, tyn, self._resis] = fr4res
fr4CellCount += 1
elif pix[xn,yn][0] == 255 and pix[xn,yn][1] == 255 and pix[xn,yn][2]== 255:
self.ifield[xn, tyn, self._holeflag] = -1
holeCellCount += 1
else:
print 'Unrecognized color: (' + str(pix[xn,yn][0]) + "," + str(pix[xn,yn][1]) + "," + str(pix[xn,yn][2]) + ') at: ' + str(xn) + ", " + str(yn)
print "Copper px: " + str(copperCellCount) + " Heat px: " + str(heatCellCount) + " Iso px: " + str(isoCellCount)
print "FR4 px: " + str(fr4CellCount) + " Hole px: " + str(holeCellCount)
def defineTinyProblem(self, lyr, matls):
"""
defineTinyProblem(Layer lyr, Mesh mesh, Matls matls)
Create a tiny test problem.
"""
self.setMeshSize(3, 3)
self.field[:, :, self._resis] = 400.0
self.ifield[0:3, 0, self._isoflag] = 1
self.field[0:3, 0, self._boundCond] = 400.0
self.field[1, 1, self._heat] = 2.0
print "Mesh: " + str(self)
def defineProblem(self, config, lyr, matls):
foundProblem= False
for problem in config:
if problem['active'] == 1:
if (problem['type'] == "tiny"):
self.defineTinyProblem(lyr, matls)
foundProblem= True
if (problem['type'] == "png"):
self.definePNGProblem(problem['inputFile'], lyr, matls)
foundProblem= True
if (problem['type'] == "scalable"):
self.defineScalableProblem(lyr, matls, problem['xsize'], problem['ysize'])
foundProblem= True
if foundProblem == False:
print "Problem not specified or not found in configuration"
def loadConfig(self, config):
self.numdoublelayers= 0
self.numintlayers= 0
for lyr in config['simulation_layers']:
self.__dict__['_' + lyr['name']]= lyr['index']
if (lyr['type'] == 'double'):
self.numdoublelayers = self.numdoublelayers + 1
if (lyr['type'] == 'int'):
self.numintlayers = self.numintlayers + 1
print "Number of double layers = " + str(self.numdoublelayers)
print "Number of int layers = " + str(self.numintlayers)
|
|
"""
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
from __future__ import unicode_literals
import logging
import re
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.encoding import force_text
from django.utils.http import same_origin
logger = logging.getLogger('django.request')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
CSRF_KEY_LENGTH = 32
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return get_random_string(CSRF_KEY_LENGTH)
def get_token(request):
"""
Returns the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
if "CSRF_COOKIE" not in request.META:
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
request.META["CSRF_COOKIE_USED"] = True
return request.META["CSRF_COOKIE"]
def rotate_token(request):
"""
Changes the CSRF token in use for a request - should be done on login
for security purposes.
"""
request.META.update({
"CSRF_COOKIE_USED": True,
"CSRF_COOKIE": _get_new_csrf_key(),
})
def _sanitize_token(token):
# Allow only alphanum
if len(token) > CSRF_KEY_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', force_text(token))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
logger.warning('Forbidden (%s): %s', reason, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(
request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC2616 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = force_text(
request.META.get('HTTP_REFERER'),
strings_only=True,
errors='replace'
)
if referer is None:
return self._reject(request, REASON_NO_REFERER)
# Here we generate a list of all acceptable HTTP referers,
# including the current host since that has been validated
# upstream.
good_hosts = list(settings.CSRF_TRUSTED_ORIGINS)
# Note that request.get_host() includes the port.
good_hosts.append(request.get_host())
good_referers = ['https://{0}/'.format(host) for host in good_hosts]
if not any(same_origin(referer, host) for host in good_referers):
reason = REASON_BAD_REFERER % referer
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
except IOError:
# Handle a broken connection before we've completed reading
# the POST data. process_view shouldn't raise any
# exceptions, so we'll ignore and serve the user a 403
# (assuming they're still listening, which they probably
# aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '')
if not constant_time_compare(request_csrf_token, csrf_token):
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY
)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
|
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import subprocess
import time
import uuid
import random
import warnings
import tempfile
import filelock
import multiprocessing
from packaging import version
from zoo.ray.process import session_execute, ProcessMonitor
from zoo.ray.utils import is_local
from zoo.ray.utils import resource_to_bytes
from zoo.ray.utils import get_parent_pid
def kill_redundant_log_monitors(redis_address):
"""
Killing redundant log_monitor.py processes.
If multiple ray nodes are started on the same machine,
there will be multiple ray log_monitor.py processes
monitoring the same log dir. As a result, the logs
will be replicated multiple times and forwarded to driver.
See issue https://github.com/ray-project/ray/issues/10392
"""
import psutil
import subprocess
log_monitor_processes = []
for proc in psutil.process_iter(["name", "cmdline"]):
try:
# Avoid throw exception when listing lwsslauncher in macOS
if proc.name() is None or proc.name() == "lwsslauncher":
continue
cmdline = subprocess.list2cmdline(proc.cmdline())
is_log_monitor = "log_monitor.py" in cmdline
is_same_redis = "--redis-address={}".format(redis_address)
if is_log_monitor and is_same_redis in cmdline:
log_monitor_processes.append(proc)
except (psutil.AccessDenied, psutil.ZombieProcess, psutil.ProcessLookupError):
# psutil may encounter AccessDenied or ZombieProcess exceptions
# when it's trying to visit some MacOS core services
if psutil.MACOS:
continue
else:
raise Exception("List process with list2cmdline failed!")
if len(log_monitor_processes) > 1:
for proc in log_monitor_processes[1:]:
proc.kill()
class RayServiceFuncGenerator(object):
"""
This should be a pickable class.
"""
def _prepare_env(self):
modified_env = os.environ.copy()
if self.python_loc == "python_env/bin/python":
# In this case the executor is using the conda yarn archive under the current
# working directory. Need to get the full path.
executor_python_path = "{}/{}".format(
os.getcwd(), "/".join(self.python_loc.split("/")[:-1]))
else:
executor_python_path = "/".join(self.python_loc.split("/")[:-1])
if "PATH" in os.environ:
modified_env["PATH"] = "{}:{}".format(executor_python_path, os.environ["PATH"])
else:
modified_env["PATH"] = executor_python_path
modified_env.pop("MALLOC_ARENA_MAX", None)
modified_env.pop("RAY_BACKEND_LOG_LEVEL", None)
# Unset all MKL setting as Analytics Zoo would give default values when init env.
# Running different programs may need different configurations.
modified_env.pop("intra_op_parallelism_threads", None)
modified_env.pop("inter_op_parallelism_threads", None)
modified_env.pop("OMP_NUM_THREADS", None)
modified_env.pop("KMP_BLOCKTIME", None)
modified_env.pop("KMP_AFFINITY", None)
modified_env.pop("KMP_SETTINGS", None)
if self.env: # Add in env argument if any MKL setting is needed.
modified_env.update(self.env)
if self.verbose:
print("Executing with these environment settings:")
for pair in modified_env.items():
print(pair)
print("The $PATH is: {}".format(modified_env["PATH"]))
return modified_env
def __init__(self, python_loc, redis_port, ray_node_cpu_cores,
password, object_store_memory, verbose=False, env=None,
include_webui=False,
extra_params=None):
"""object_store_memory: integer in bytes"""
self.env = env
self.python_loc = python_loc
self.redis_port = redis_port
self.password = password
self.ray_node_cpu_cores = ray_node_cpu_cores
self.ray_exec = self._get_ray_exec()
self.object_store_memory = object_store_memory
self.extra_params = extra_params
self.include_webui = include_webui
self.verbose = verbose
# _mxnet_worker and _mxnet_server are resource tags for distributed MXNet training only
# in order to diff worker from server.
# This is useful to allocate workers and servers in the cluster.
# Leave some reserved custom resources free to avoid unknown crash due to resources.
self.labels = \
"""--resources '{"_mxnet_worker": %s, "_mxnet_server": %s, "_reserved": %s}'""" \
% (1, 1, 2)
# Add a unique id so that different Ray programs won't affect each other even if
# the flags and locks are not removed.
tag = uuid.uuid4().hex
self.ray_master_flag = "ray_master_{}".format(tag)
self.ray_master_lock = "ray_master_start_{}.lock".format(tag)
self.raylet_lock = "raylet_start_{}.lock".format(tag)
def gen_stop(self):
def _stop(iter):
command = "{} stop".format(self.ray_exec)
print("Start to end the ray services: {}".format(command))
session_execute(command=command, fail_fast=True)
return iter
return _stop
@staticmethod
def _enrich_command(command, object_store_memory, extra_params):
if object_store_memory:
command = command + " --object-store-memory {}".format(str(object_store_memory))
if extra_params:
for pair in extra_params.items():
command = command + " --{} {}".format(pair[0], pair[1])
return command
def _gen_master_command(self):
webui = "true" if self.include_webui else "false"
command = "{} start --head " \
"--include-dashboard {} --dashboard-host 0.0.0.0 --port {} " \
"--redis-password {} --num-cpus {}". \
format(self.ray_exec, webui, self.redis_port, self.password,
self.ray_node_cpu_cores)
if self.labels:
command = command + " " + self.labels
return RayServiceFuncGenerator._enrich_command(command=command,
object_store_memory=self.object_store_memory,
extra_params=self.extra_params)
@staticmethod
def _get_raylet_command(redis_address,
ray_exec,
password,
ray_node_cpu_cores,
labels="",
object_store_memory=None,
extra_params=None):
command = "{} start --address {} --redis-password {} --num-cpus {}".format(
ray_exec, redis_address, password, ray_node_cpu_cores)
if labels:
command = command + " " + labels
return RayServiceFuncGenerator._enrich_command(command=command,
object_store_memory=object_store_memory,
extra_params=extra_params)
@staticmethod
def _get_spark_executor_pid():
# TODO: This might not work on OS other than Linux
this_pid = os.getpid()
pyspark_daemon_pid = get_parent_pid(this_pid)
spark_executor_pid = get_parent_pid(pyspark_daemon_pid)
return spark_executor_pid
@staticmethod
def start_ray_daemon(python_loc, pid_to_watch, pgid_to_kill):
daemon_path = os.path.join(os.path.dirname(__file__), "ray_daemon.py")
start_daemon_command = ['nohup', python_loc, daemon_path, str(pid_to_watch),
str(pgid_to_kill)]
# put ray daemon process in its children's process group to avoid being killed by spark.
subprocess.Popen(start_daemon_command, preexec_fn=os.setpgrp)
time.sleep(1)
def _start_ray_node(self, command, tag):
modified_env = self._prepare_env()
print("Starting {} by running: {}".format(tag, command))
process_info = session_execute(command=command, env=modified_env, tag=tag)
spark_executor_pid = RayServiceFuncGenerator._get_spark_executor_pid()
RayServiceFuncGenerator.start_ray_daemon(self.python_loc,
pid_to_watch=spark_executor_pid,
pgid_to_kill=process_info.pgid)
import ray._private.services as rservices
process_info.node_ip = rservices.get_node_ip_address()
return process_info
def _get_ray_exec(self):
if "envs" in self.python_loc: # conda environment
python_bin_dir = "/".join(self.python_loc.split("/")[:-1])
return "{}/python {}/ray".format(python_bin_dir, python_bin_dir)
elif self.python_loc == "python_env/bin/python": # conda yarn archive on the executor
return "python_env/bin/python python_env/bin/ray"
else: # system environment with ray installed; for example: /usr/local/bin/ray
return "ray"
def gen_ray_master_start(self):
def _start_ray_master(index, iter):
from zoo.util.utils import get_node_ip
process_info = None
if index == 0:
print("partition id is : {}".format(index))
current_ip = get_node_ip()
print("master address {}".format(current_ip))
redis_address = "{}:{}".format(current_ip, self.redis_port)
process_info = self._start_ray_node(command=self._gen_master_command(),
tag="ray-master")
process_info.master_addr = redis_address
yield process_info
return _start_ray_master
def gen_raylet_start(self, redis_address):
def _start_raylets(iter):
from zoo.util.utils import get_node_ip
current_ip = get_node_ip()
master_ip = redis_address.split(":")[0]
do_start = True
process_info = None
base_path = tempfile.gettempdir()
ray_master_flag_path = os.path.join(base_path, self.ray_master_flag)
# If there is already a ray master on this node, we need to start one less raylet.
if current_ip == master_ip:
ray_master_lock_path = os.path.join(base_path, self.ray_master_lock)
with filelock.FileLock(ray_master_lock_path):
if not os.path.exists(ray_master_flag_path):
os.mknod(ray_master_flag_path)
do_start = False
if do_start:
raylet_lock_path = os.path.join(base_path, self.raylet_lock)
with filelock.FileLock(raylet_lock_path):
process_info = self._start_ray_node(
command=RayServiceFuncGenerator._get_raylet_command(
redis_address=redis_address,
ray_exec=self.ray_exec,
password=self.password,
ray_node_cpu_cores=self.ray_node_cpu_cores,
labels=self.labels,
object_store_memory=self.object_store_memory,
extra_params=self.extra_params),
tag="raylet")
kill_redundant_log_monitors(redis_address=redis_address)
# Cannot remove ray_master_flag at the end of this task since no barrier is guaranteed.
yield process_info
return _start_raylets
def gen_ray_start(self, master_ip):
def _start_ray_services(iter):
from pyspark import BarrierTaskContext
from zoo.util.utils import get_node_ip
tc = BarrierTaskContext.get()
current_ip = get_node_ip()
print("current address {}".format(current_ip))
print("master address {}".format(master_ip))
redis_address = "{}:{}".format(master_ip, self.redis_port)
process_info = None
base_path = tempfile.gettempdir()
ray_master_flag_path = os.path.join(base_path, self.ray_master_flag)
if current_ip == master_ip: # Start the ray master.
# It is possible that multiple executors are on one node. In this case,
# the first executor that gets the lock would be the master and it would
# create a flag to indicate the master has initialized.
# The flag file is removed when ray start processes finish so that this
# won't affect other programs.
ray_master_lock_path = os.path.join(base_path, self.ray_master_lock)
with filelock.FileLock(ray_master_lock_path):
if not os.path.exists(ray_master_flag_path):
print("partition id is : {}".format(tc.partitionId()))
process_info = self._start_ray_node(command=self._gen_master_command(),
tag="ray-master")
process_info.master_addr = redis_address
os.mknod(ray_master_flag_path)
tc.barrier()
if not process_info: # Start raylets.
# Add a lock to avoid starting multiple raylets on one node at the same time.
# See this issue: https://github.com/ray-project/ray/issues/10154
raylet_lock_path = os.path.join(base_path, self.raylet_lock)
with filelock.FileLock(raylet_lock_path):
print("partition id is : {}".format(tc.partitionId()))
process_info = self._start_ray_node(
command=RayServiceFuncGenerator._get_raylet_command(
redis_address=redis_address,
ray_exec=self.ray_exec,
password=self.password,
ray_node_cpu_cores=self.ray_node_cpu_cores,
labels=self.labels,
object_store_memory=self.object_store_memory,
extra_params=self.extra_params),
tag="raylet")
kill_redundant_log_monitors(redis_address=redis_address)
if os.path.exists(ray_master_flag_path):
os.remove(ray_master_flag_path)
yield process_info
return _start_ray_services
class RayContext(object):
_active_ray_context = None
def __init__(self, sc, redis_port=None, password="123456", object_store_memory=None,
verbose=False, env=None, extra_params=None, include_webui=True,
num_ray_nodes=None, ray_node_cpu_cores=None):
"""
The RayContext would initiate a ray cluster on top of the configuration of SparkContext.
After creating RayContext, call the init method to set up the cluster.
- For Spark local mode: The total available cores for Ray is equal to the number of
Spark local cores.
- For Spark cluster mode: The number of raylets to be created is equal to the number of
Spark executors. The number of cores allocated for each raylet is equal to the number of
cores for each Spark executor.
You are allowed to specify num_ray_nodes and ray_node_cpu_cores for configurations
to start raylets.
:param sc: An instance of SparkContext.
:param redis_port: The redis port for the ray head node. Default is None.
The value would be randomly picked if not specified.
:param password: The password for redis. Default to be "123456" if not specified.
:param object_store_memory: The memory size for ray object_store in string.
This can be specified in bytes(b), kilobytes(k), megabytes(m) or gigabytes(g).
For example, "50b", "100k", "250m", "30g".
:param verbose: True for more logs when starting ray. Default is False.
:param env: The environment variable dict for running ray processes. Default is None.
:param extra_params: The key value dict for extra options to launch ray.
For example, extra_params={"temp-dir": "/tmp/ray/"}
:param include_webui: True for including web ui when starting ray. Default is False.
:param num_ray_nodes: The number of raylets to start across the cluster.
For Spark local mode, you don't need to specify this value.
For Spark cluster mode, it is default to be the number of Spark executors. If
spark.executor.instances can't be detected in your SparkContext, you need to explicitly
specify this. It is recommended that num_ray_nodes is not larger than the number of
Spark executors to make sure there are enough resources in your cluster.
:param ray_node_cpu_cores: The number of available cores for each raylet.
For Spark local mode, it is default to be the number of Spark local cores.
For Spark cluster mode, it is default to be the number of cores for each Spark executor. If
spark.executor.cores or spark.cores.max can't be detected in your SparkContext, you need to
explicitly specify this. It is recommended that ray_node_cpu_cores is not larger than the
number of cores for each Spark executor to make sure there are enough resources in your
cluster.
"""
assert sc is not None, "sc cannot be None, please create a SparkContext first"
self.sc = sc
self.initialized = False
self.is_local = is_local(sc)
self.verbose = verbose
self.redis_password = password
self.object_store_memory = resource_to_bytes(object_store_memory)
self.ray_processesMonitor = None
self.env = env
self.extra_params = extra_params
self.include_webui = include_webui
self._address_info = None
if self.is_local:
self.num_ray_nodes = 1
spark_cores = self._get_spark_local_cores()
if ray_node_cpu_cores:
ray_node_cpu_cores = int(ray_node_cpu_cores)
if ray_node_cpu_cores > spark_cores:
warnings.warn("ray_node_cpu_cores is larger than available Spark cores, "
"make sure there are enough resources on your machine")
self.ray_node_cpu_cores = ray_node_cpu_cores
else:
self.ray_node_cpu_cores = spark_cores
# For Spark local mode, directly call ray.init() and ray.shutdown().
# ray.shutdown() would clear up all the ray related processes.
# Ray Manager is only needed for Spark cluster mode to monitor ray processes.
else:
if self.sc.getConf().contains("spark.executor.cores"):
executor_cores = int(self.sc.getConf().get("spark.executor.cores"))
else:
executor_cores = None
if ray_node_cpu_cores:
ray_node_cpu_cores = int(ray_node_cpu_cores)
if executor_cores and ray_node_cpu_cores > executor_cores:
warnings.warn("ray_node_cpu_cores is larger than Spark executor cores, "
"make sure there are enough resources on your cluster")
self.ray_node_cpu_cores = ray_node_cpu_cores
elif executor_cores:
self.ray_node_cpu_cores = executor_cores
else:
raise Exception("spark.executor.cores not detected in the SparkContext, "
"you need to manually specify num_ray_nodes and ray_node_cpu_cores "
"for RayContext to start ray services")
if self.sc.getConf().contains("spark.executor.instances"):
num_executors = int(self.sc.getConf().get("spark.executor.instances"))
elif self.sc.getConf().contains("spark.cores.max"):
import math
num_executors = math.floor(
int(self.sc.getConf().get("spark.cores.max")) / self.ray_node_cpu_cores)
else:
num_executors = None
if num_ray_nodes:
num_ray_nodes = int(num_ray_nodes)
if num_executors and num_ray_nodes > num_executors:
warnings.warn("num_ray_nodes is larger than the number of Spark executors, "
"make sure there are enough resources on your cluster")
self.num_ray_nodes = num_ray_nodes
elif num_executors:
self.num_ray_nodes = num_executors
else:
raise Exception("spark.executor.cores not detected in the SparkContext, "
"you need to manually specify num_ray_nodes and ray_node_cpu_cores "
"for RayContext to start ray services")
from zoo.util.utils import detect_python_location
self.python_loc = os.environ.get("PYSPARK_PYTHON", detect_python_location())
self.redis_port = random.randint(10000, 65535) if not redis_port else int(redis_port)
self.ray_service = RayServiceFuncGenerator(
python_loc=self.python_loc,
redis_port=self.redis_port,
ray_node_cpu_cores=self.ray_node_cpu_cores,
password=self.redis_password,
object_store_memory=self.object_store_memory,
verbose=self.verbose,
env=self.env,
include_webui=self.include_webui,
extra_params=self.extra_params)
RayContext._active_ray_context = self
self.total_cores = self.num_ray_nodes * self.ray_node_cpu_cores
@classmethod
def get(cls, initialize=True):
if RayContext._active_ray_context:
ray_ctx = RayContext._active_ray_context
if initialize and not ray_ctx.initialized:
ray_ctx.init()
return ray_ctx
else:
raise Exception("No active RayContext. Please create a RayContext and init it first")
def _gather_cluster_ips(self):
"""
Get the ips of all Spark executors in the cluster. The first ip returned would be the
ray master.
"""
def info_fn(iter):
from zoo.util.utils import get_node_ip
yield get_node_ip()
ips = self.sc.range(0, self.total_cores,
numSlices=self.total_cores).mapPartitions(info_fn).collect()
ips = list(set(ips))
return ips
def stop(self):
if not self.initialized:
print("The Ray cluster has not been launched.")
return
import ray
ray.shutdown()
self.initialized = False
def purge(self):
"""
Invoke ray stop to clean ray processes.
"""
if not self.initialized:
print("The Ray cluster has not been launched.")
return
if self.is_local:
import ray
ray.shutdown()
else:
self.sc.range(0, self.total_cores,
numSlices=self.total_cores).mapPartitions(
self.ray_service.gen_stop()).collect()
self.initialized = False
def _get_spark_local_cores(self):
local_symbol = re.match(r"local\[(.*)\]", self.sc.master).group(1)
if local_symbol == "*":
return multiprocessing.cpu_count()
else:
return int(local_symbol)
def init(self, driver_cores=0):
"""
Initiate the ray cluster.
:param driver_cores: The number of cores for the raylet on driver for Spark cluster mode.
Default is 0 and in this case the local driver wouldn't have any ray workload.
:return The dictionary of address information about the ray cluster.
Information contains node_ip_address, redis_address, object_store_address,
raylet_socket_name, webui_url and session_dir.
"""
if self.initialized:
print("The Ray cluster has been launched.")
else:
if self.is_local:
if self.env:
os.environ.update(self.env)
import ray
kwargs = {}
if self.extra_params is not None:
for k, v in self.extra_params.items():
kw = k.replace("-", "_")
kwargs[kw] = v
init_params = dict(
num_cpus=self.ray_node_cpu_cores,
_redis_password=self.redis_password,
object_store_memory=self.object_store_memory,
include_dashboard=self.include_webui,
dashboard_host="0.0.0.0",
)
init_params.update(kwargs)
if version.parse(ray.__version__) >= version.parse("1.4.0"):
init_params["namespace"] = "az"
self._address_info = ray.init(**init_params)
else:
self.cluster_ips = self._gather_cluster_ips()
redis_address = self._start_cluster()
self._address_info = self._start_driver(num_cores=driver_cores,
redis_address=redis_address)
print(self._address_info)
kill_redundant_log_monitors(self._address_info["redis_address"])
self.initialized = True
return self._address_info
@property
def address_info(self):
if self._address_info:
return self._address_info
else:
raise Exception("The Ray cluster has not been launched yet. Please call init first")
@property
def redis_address(self):
return self.address_info["redis_address"]
def _start_cluster(self):
ray_rdd = self.sc.range(0, self.num_ray_nodes,
numSlices=self.num_ray_nodes)
from zoo import ZooContext
if ZooContext.barrier_mode:
print("Launching Ray on cluster with Spark barrier mode")
# The first ip would be used to launch ray master.
process_infos = ray_rdd.barrier().mapPartitions(
self.ray_service.gen_ray_start(self.cluster_ips[0])).collect()
else:
print("Launching Ray on cluster without Spark barrier mode")
master_process_infos = ray_rdd.mapPartitionsWithIndex(
self.ray_service.gen_ray_master_start()).collect()
master_process_infos = [process for process in master_process_infos if process]
assert len(master_process_infos) == 1, \
"There should be only one ray master launched, but got {}"\
.format(len(master_process_infos))
master_process_info = master_process_infos[0]
redis_address = master_process_info.master_addr
raylet_process_infos = ray_rdd.mapPartitions(
self.ray_service.gen_raylet_start(redis_address)).collect()
raylet_process_infos = [process for process in raylet_process_infos if process]
assert len(raylet_process_infos) == self.num_ray_nodes - 1, \
"There should be {} raylets launched across the cluster, but got {}"\
.format(self.num_ray_nodes - 1, len(raylet_process_infos))
process_infos = master_process_infos + raylet_process_infos
self.ray_processesMonitor = ProcessMonitor(process_infos, self.sc, ray_rdd, self,
verbose=self.verbose)
return self.ray_processesMonitor.master.master_addr
def _start_restricted_worker(self, num_cores, node_ip_address, redis_address):
extra_param = {"node-ip-address": node_ip_address}
if self.extra_params is not None:
extra_param.update(self.extra_params)
command = RayServiceFuncGenerator._get_raylet_command(
redis_address=redis_address,
ray_exec="ray",
password=self.redis_password,
ray_node_cpu_cores=num_cores,
object_store_memory=self.object_store_memory,
extra_params=extra_param)
modified_env = self.ray_service._prepare_env()
print("Executing command: {}".format(command))
process_info = session_execute(command=command, env=modified_env,
tag="raylet", fail_fast=True)
RayServiceFuncGenerator.start_ray_daemon("python",
pid_to_watch=os.getpid(),
pgid_to_kill=process_info.pgid)
def _start_driver(self, num_cores, redis_address):
print("Start to launch ray driver on local")
import ray._private.services
node_ip = ray._private.services.get_node_ip_address(redis_address)
self._start_restricted_worker(num_cores=num_cores,
node_ip_address=node_ip,
redis_address=redis_address)
ray.shutdown()
init_params = dict(
address=redis_address,
_redis_password=self.ray_service.password,
_node_ip_address=node_ip
)
if version.parse(ray.__version__) >= version.parse("1.4.0"):
init_params["namespace"] = "az"
return ray.init(**init_params)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cudnn RNN models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import unittest
import numpy as np
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn as rnn_lib
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
def _create_cudnn_compatible_canonical_rnn(cudnn_model,
inputs,
use_block_cell,
scope="rnn"):
model = cudnn_model.rnn_mode
if model not in (cudnn_rnn_ops.CUDNN_LSTM, cudnn_rnn_ops.CUDNN_GRU):
raise ValueError("%s is not supported!" % model)
if model == cudnn_rnn_ops.CUDNN_GRU and use_block_cell:
raise ValueError("gru is not supported when using block cell!")
num_units = cudnn_model.num_units
num_layers = cudnn_model.num_layers
# To reuse cuDNN-trained models, must use cudnn compatible rnn cells.
if use_block_cell:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleLSTMBlockCell(num_units)
else:
if model == cudnn_rnn_ops.CUDNN_LSTM:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleLSTMCell(num_units)
else:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleGRUCell(num_units)
cell = rnn_cell_impl.MultiRNNCell([single_cell() for _ in range(num_layers)])
return rnn_lib.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, time_major=True, scope=scope)
class CudnnRNNTest(TensorFlowTestCase):
def _CreateModel(self,
rnn_mode,
num_layers,
num_units,
input_size,
input_mode="linear_input",
dtype=dtypes.float32,
dropout=0.):
if rnn_mode == cudnn_rnn_ops.CUDNN_LSTM:
model = cudnn_rnn_ops.CudnnLSTM(
num_layers, num_units, input_size, dtype=dtype, dropout=dropout)
elif rnn_mode == cudnn_rnn_ops.CUDNN_GRU:
model = cudnn_rnn_ops.CudnnGRU(
num_layers, num_units, input_size, dtype=dtype, dropout=dropout)
elif rnn_mode == cudnn_rnn_ops.CUDNN_RNN_TANH:
model = cudnn_rnn_ops.CudnnRNNTanh(
num_layers, num_units, input_size, dtype=dtype, dropout=dropout)
elif rnn_mode == cudnn_rnn_ops.CUDNN_RNN_RELU:
model = cudnn_rnn_ops.CudnnRNNRelu(
num_layers, num_units, input_size, dtype=dtype, dropout=dropout)
else:
raise ValueError("Invalid rnn_mode: %s" % rnn_mode)
return model
def _create_params_savable(self, params, model, base_variable_scope="rnn",
name="params_canonical"):
"""Create a RNNParamsSaveable for the weight and bias parameters.
Args:
params: a Variable for weight and bias parameters.
model: a CudnnRNN model.
base_variable_scope: a string, prefix of names of saved variables.
name: a string, name of the RNNParamsSaveable object.
"""
params_saveable = cudnn_rnn_ops.RNNParamsSaveable(
model, model.params_to_canonical, model.canonical_to_params, [params],
base_variable_scope=base_variable_scope, name=name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, params_saveable)
def _testSaveRestoreVariable(self, rnn_mode, dtype):
model = self._CreateModel(
rnn_mode, num_layers=2, num_units=7, input_size=3, dtype=dtype)
random_seed.set_random_seed(1234)
params_size_t = model.params_size()
params = variables.Variable(
random_ops.random_uniform([params_size_t], dtype=dtype),
dtype=dtype,
validate_shape=False)
self._create_params_savable(params, model)
save_path = os.path.join(self.get_temp_dir(), "save-restore-variable-test")
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
params_v = sess.run(params)
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session(use_gpu=True) as sess:
reset_params = state_ops.assign(params,
array_ops.zeros(
[params_size_t], dtype=dtype))
sess.run(reset_params)
saver.restore(sess, save_path)
params_v_restored = sess.run(params)
self.assertAllEqual(params_v, params_v_restored)
def _testSaveRestoreTwoVariables(self, rnn_mode, dtype):
model = self._CreateModel(
rnn_mode, num_layers=2, num_units=7, input_size=3, dtype=dtype)
random_seed.set_random_seed(1234)
params_size_t = model.params_size()
names = ["rnn_1", "rnn_2"]
param_vars = [variables.Variable(
random_ops.random_uniform([params_size_t], dtype=dtype),
dtype=dtype,
validate_shape=False) for name in names]
for name, params in zip(names, param_vars):
self._create_params_savable(params, model, name, name)
save_path = os.path.join(self.get_temp_dir(), "save-restore-variable-test")
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
params_v = sess.run(param_vars)
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session(use_gpu=True) as sess:
reset_params = [
state_ops.assign(params,
array_ops.zeros(
[params_size_t], dtype=dtype))
for params in param_vars]
sess.run(reset_params)
saver.restore(sess, save_path)
params_v_restored = sess.run(param_vars)
for v, v_restored in zip(params_v, params_v_restored):
self.assertAllEqual(v, v_restored)
def _build_forward_cudnn_model(self,
rnn_mode,
num_layers,
num_units,
input_data,
is_training=False):
input_data_shape = input_data.get_shape().with_rank(3)
batch_size = input_data_shape[1].value
input_size = input_data_shape[2].value
model = self._CreateModel(rnn_mode, num_layers, num_units, input_size)
# Set zero init input states
input_h = constant_op.constant(
np.zeros([num_layers, batch_size, num_units]), dtype=dtypes.float32)
has_input_c = (rnn_mode == cudnn_rnn_ops.CUDNN_LSTM)
if has_input_c:
input_c = constant_op.constant(
np.zeros([num_layers, batch_size, num_units]), dtype=dtypes.float32)
# Set rnn params
params_size_t = model.params_size()
params = variables.Variable(
random_ops.random_uniform([params_size_t]), validate_shape=False)
args = {
"input_data": input_data,
"input_h": input_h,
"params": params,
"is_training": is_training
}
if has_input_c:
args["input_c"] = input_c
# Build cell
output_tuple = model(**args)
# Create savable objects for params
self._create_params_savable(params, model)
return output_tuple, model, params
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleRnnCells(self):
configs = [
{
"num_layers": 1,
"seq_length": 3,
"num_units": 4,
"input_size": 5,
"batch_size": 6,
},
{
"num_layers": 2,
"seq_length": 8,
"num_units": 4,
"input_size": 8,
"batch_size": 16,
},
{
"num_layers": 2,
"seq_length": 3,
"num_units": 4,
"input_size": 5,
"batch_size": 6,
},
{
"num_layers": 1,
"seq_length": 2,
"num_units": 2,
"input_size": 4,
"batch_size": 1,
},
]
for rnn, cfg, use_block_cell in itertools.product(
(cudnn_rnn_ops.CUDNN_LSTM,), configs, (True, False,)):
self._testCudnnCompatibleRnnCells(cfg["num_layers"], cfg["seq_length"],
cfg["num_units"], cfg["input_size"],
cfg["batch_size"], rnn, use_block_cell)
# TODO(jamesqin): Add CudnnCompatibleGRUBlockCell.
for rnn, cfg, use_block_cell in itertools.product(
(cudnn_rnn_ops.CUDNN_GRU,), configs, (False,)):
self._testCudnnCompatibleRnnCells(cfg["num_layers"], cfg["seq_length"],
cfg["num_units"], cfg["input_size"],
cfg["batch_size"], rnn, use_block_cell)
def _testCudnnCompatibleRnnCells(self, num_layers, seq_length, num_units,
input_size, batch_size, rnn_mode,
use_block_cell):
has_state_c = rnn_mode == cudnn_rnn_ops.CUDNN_LSTM
np.random.seed(0)
# Train graph
with ops.Graph().as_default():
random_seed.set_random_seed(299)
input_data = array_ops.placeholder(
dtypes.float32, shape=[seq_length, batch_size, input_size])
output_tuple, cudnn_model, cudnn_params = self._build_forward_cudnn_model(
rnn_mode, num_layers, num_units, input_data, is_training=True)
target_output = array_ops.placeholder(dtype=dtypes.float32, shape=None)
total_sum = sum(map(math_ops.reduce_sum, output_tuple))
loss_op = losses.log_loss(labels=target_output, predictions=total_sum)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss_op)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
# Train Cudnn model
with self.test_session(
use_gpu=True, graph=ops.get_default_graph()) as sess:
sess.run(variables.global_variables_initializer())
# Train 128 steps
num_steps = 128
for _ in range(num_steps):
inputs = np.random.rand(seq_length, batch_size,
input_size).astype(np.float32)
targets = np.random.rand()
sess.run(
train_op, feed_dict={input_data: inputs,
target_output: targets})
save_path = os.path.join(self.get_temp_dir(),
("cudnn-rnn-%s-test" % rnn_mode))
save_v = saver.save(sess, save_path)
self.assertEqual(save_path, save_v)
cudnn_params_v = sess.run(cudnn_params)
# cuDNN inference graph
with ops.Graph().as_default():
random_seed.set_random_seed(299)
cudnn_inputs = array_ops.placeholder(
dtypes.float32, shape=[seq_length, batch_size, input_size])
(cudnn_output_tuple, cudnn_model,
cudnn_params) = self._build_forward_cudnn_model(
rnn_mode, num_layers, num_units, cudnn_inputs, is_training=False)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
inference_input = np.random.rand(seq_length, batch_size,
input_size).astype(np.float32)
with self.test_session(
use_gpu=True, graph=ops.get_default_graph()) as sess:
sess.run(variables.global_variables_initializer())
saver.restore(sess, save_path)
restored_cudnn_params_v = sess.run(cudnn_params)
self.assertAllEqual(cudnn_params_v, restored_cudnn_params_v)
# Cudnn inference
cudnn_output = sess.run(
cudnn_output_tuple, feed_dict={cudnn_inputs: inference_input})
# Canonical RNN inference graph
with ops.Graph().as_default():
random_seed.set_random_seed(299)
cell_inputs = array_ops.placeholder(
dtypes.float32, shape=[seq_length, batch_size, input_size])
(output, states) = _create_cudnn_compatible_canonical_rnn(
cudnn_model, cell_inputs, use_block_cell)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
with self.test_session(
use_gpu=True, graph=ops.get_default_graph()) as sess:
saver.restore(sess, save_path)
# BlockCell inference
output_v, states_v = sess.run(
[output, states], feed_dict={cell_inputs: inference_input})
# output across timestamps are packed into one tensor.
self.assertAllClose(cudnn_output[0], output_v, atol=1e-6, rtol=1e-6)
for i in range(num_layers):
if has_state_c:
# output_h
self.assertAllClose(
cudnn_output[1][i, :], states_v[i].h, atol=1e-6, rtol=1e-6)
# output_c
self.assertAllClose(
cudnn_output[2][i, :], states_v[i].c, atol=1e-6, rtol=1e-6)
else:
self.assertAllClose(
cudnn_output[1][i, :], states_v[i], atol=1e-6, rtol=1e-6)
def _testSaveRestoreOutput(self, rnn_mode, dtype):
num_layers = 2
num_units = 7
input_size = 7
seq_length = 10
batch_size = 5
dir_count = 1
model = self._CreateModel(
rnn_mode, num_layers, num_units, input_size, dtype=dtype)
params_size_t = model.params_size()
params = variables.Variable(
array_ops.ones([params_size_t], dtype=dtype),
validate_shape=False,
dtype=dtype)
self._create_params_savable(params, model)
save_path = os.path.join(self.get_temp_dir(), "save-restore-output-test")
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
has_input_c = (rnn_mode == cudnn_rnn_ops.CUDNN_LSTM)
input_data = array_ops.ones(
[seq_length, batch_size, input_size], dtype=dtype)
input_h = array_ops.ones(
[num_layers * dir_count, batch_size, num_units], dtype=dtype)
if has_input_c:
input_c = array_ops.ones(
[num_layers * dir_count, batch_size, num_units], dtype=dtype)
outputs = model(
input_data=input_data,
input_h=input_h,
input_c=input_c,
params=params,
is_training=False)
else:
outputs = model(
input_data=input_data,
input_h=input_h,
params=params,
is_training=False)
total_sum = sum(map(math_ops.reduce_sum, outputs))
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
total_sum_v = sess.run(total_sum)
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session(use_gpu=True) as sess:
reset_params = state_ops.assign(params,
array_ops.zeros(
[params_size_t], dtype=dtype))
sess.run(reset_params)
saver.restore(sess, save_path)
total_sum_v_restored = sess.run(total_sum)
self.assertAllEqual(total_sum_v, total_sum_v_restored)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestore(self):
rnn_modes = [
cudnn_rnn_ops.CUDNN_LSTM, cudnn_rnn_ops.CUDNN_GRU,
cudnn_rnn_ops.CUDNN_RNN_TANH, cudnn_rnn_ops.CUDNN_RNN_RELU
]
dtype_list = [dtypes.float32, dtypes.float64]
for rnn_mode, dtype in itertools.product(rnn_modes, dtype_list):
self._testSaveRestoreVariable(rnn_mode, dtype)
self._testSaveRestoreTwoVariables(rnn_mode, dtype)
self._testSaveRestoreOutput(rnn_mode, dtype)
def _MinLSTMParamSize(self,
num_layers,
num_units,
input_size,
input_mode="auto_select",
direction=cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION):
if direction != cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION:
# TODO(zhengxq): support bidirection in parameter size estimate.
raise ValueError("Only unidirection in parameter size estimate")
first_layer_weights = 4 * num_units * (num_units + input_size)
higher_layer_weights = 8 * (num_layers - 1) * num_units * num_units
all_biases = 8 * num_layers * num_units
return first_layer_weights + higher_layer_weights + all_biases
def _testOneLSTMParamsSize(self, num_layers, num_units, input_size):
min_params_size = self._MinLSTMParamSize(num_layers, num_units, input_size)
model = self._CreateModel(cudnn_rnn_ops.CUDNN_LSTM, num_layers, num_units,
input_size)
params_size = model.params_size()
with self.test_session(use_gpu=True) as sess:
params_size_v = sess.run(params_size)
self.assertLessEqual(min_params_size, params_size_v)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testLSTMParamsSize(self):
test_configs = [
[4, 200, 200],
[4, 200, 300],
[4, 200, 100],
[1, 100, 200],
[2, 200, 100],
[3, 200, 400],
]
with ops.Graph().as_default():
for (num_layers, num_units, input_size) in test_configs:
self._testOneLSTMParamsSize(num_layers, num_units, input_size)
def _testOneSimpleInference(self, rnn_mode, num_layers, num_units, input_size,
batch_size, seq_length, dir_count, dropout,
expected, tolerance):
random_seed.set_random_seed(5678)
model = self._CreateModel(
rnn_mode,
num_layers,
num_units,
input_size,
input_mode="auto_select",
dropout=dropout)
has_input_c = (rnn_mode == cudnn_rnn_ops.CUDNN_LSTM)
params_size_t = model.params_size()
input_data = array_ops.ones([seq_length, batch_size, input_size])
input_h = array_ops.ones([num_layers * dir_count, batch_size, num_units])
params = variables.Variable(
array_ops.ones([params_size_t]), validate_shape=False)
if has_input_c:
input_c = array_ops.ones([num_layers * dir_count, batch_size, num_units])
output, output_h, output_c = model(
input_data=input_data,
input_h=input_h,
input_c=input_c,
params=params,
is_training=False)
else:
output, output_h = model(
input_data=input_data,
input_h=input_h,
params=params,
is_training=False)
output_sum = math_ops.reduce_sum(output)
output_h_sum = math_ops.reduce_sum(output_h)
total_sum = output_sum + output_h_sum
if has_input_c:
output_c_sum = math_ops.reduce_sum(output_c)
total_sum += output_c_sum
with self.test_session(use_gpu=True, graph=ops.get_default_graph()) as sess:
sess.run(variables.global_variables_initializer())
total_sum_v = sess.run([total_sum])
self.assertAllClose(
total_sum_v[0], expected, atol=tolerance, rtol=tolerance)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleInference(self):
# Cudnn scales result for dropout during training, therefore dropout has no
# impact for inference results.
# (lstm, gru, rnn_tanh are saturated in the test. rnn_relu case is most
# demonstrative of the dropout-invariant nature of CudnnRnn.)
test_configs = [
{
"rnn_mode": cudnn_rnn_ops.CUDNN_LSTM,
"dropout": [0., 0.5, 1.],
"expected": 231833.22,
"tolerance": 1e-2,
"shape": {
"num_layers": 4,
"num_units": 200,
"input_size": 200,
"batch_size": 20,
"seq_length": 10,
"dir_count": 1,
},
},
{
"rnn_mode": cudnn_rnn_ops.CUDNN_GRU,
"dropout": [0., 0.5, 1.],
"expected": 56000,
"tolerance": 1e-2,
"shape": {
"num_layers": 4,
"num_units": 200,
"input_size": 200,
"batch_size": 20,
"seq_length": 10,
"dir_count": 1,
},
},
{
"rnn_mode": cudnn_rnn_ops.CUDNN_RNN_TANH,
"dropout": [0., 0.5, 1.],
"expected": 56000,
"tolerance": 1e-2,
"shape": {
"num_layers": 4,
"num_units": 200,
"input_size": 200,
"batch_size": 20,
"seq_length": 10,
"dir_count": 1,
},
},
{
"rnn_mode": cudnn_rnn_ops.CUDNN_RNN_RELU,
"dropout": [0., 0.5, 1.],
"expected": 130688,
"tolerance": 1e-2,
"shape": {
"num_layers": 2,
"num_units": 8,
"input_size": 4,
"batch_size": 4,
"seq_length": 2,
"dir_count": 1,
},
},
]
with ops.Graph().as_default():
for config in test_configs:
rnn_mode = config["rnn_mode"]
dropout_list = config.get("dropout", [0.])
expected = config["expected"]
tolerance = config["tolerance"]
shape = config["shape"]
for dropout in dropout_list:
self._testOneSimpleInference(
rnn_mode, shape["num_layers"], shape["num_units"],
shape["input_size"], shape["batch_size"], shape["seq_length"],
shape["dir_count"], dropout, expected, tolerance)
def _testOneSimpleTraining(self, rnn_mode, num_layers, num_units, input_size,
batch_size, seq_length, dir_count, dropout, dtype,
delta, tolerance):
# Gradient checking runs two forward ops with almost the same input. Need to
# make sure the drop patterns across the two runs are the same.
old_env_state = os.environ.get("TF_CUDNN_RESET_RND_GEN_STATE", str(False))
os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = str(True)
has_input_c = (rnn_mode == cudnn_rnn_ops.CUDNN_LSTM)
random_seed.set_random_seed(1234)
model = self._CreateModel(
rnn_mode,
num_layers,
num_units,
input_size,
dtype=dtype,
dropout=dropout)
params_size_t = model.params_size()
input_data = variables.Variable(
random_ops.random_uniform(
[seq_length, batch_size, input_size], dtype=dtype),
dtype=dtype)
input_h = variables.Variable(
random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units], dtype=dtype),
dtype=dtype)
params = variables.Variable(
random_ops.random_uniform([params_size_t], dtype=dtype),
validate_shape=False,
dtype=dtype)
if has_input_c:
input_c = variables.Variable(
random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units], dtype=dtype),
dtype=dtype)
output, output_h, output_c = model(
input_data=input_data,
input_h=input_h,
input_c=input_c,
params=params)
else:
output, output_h = model(
input_data=input_data, input_h=input_h, params=params)
output_sum = math_ops.reduce_sum(output)
output_h_sum = math_ops.reduce_sum(output_h)
total_sum = output_sum + output_h_sum
if has_input_c:
output_c_sum = math_ops.reduce_sum(output_c)
total_sum += output_c_sum
with self.test_session(use_gpu=True) as sess:
params_size_v = sess.run(params_size_t)
inputs_and_shapes = [
(input_data, [seq_length, batch_size, input_size]),
(input_h, [num_layers * dir_count, batch_size, num_units]),
(params, [params_size_v]),
]
if has_input_c:
inputs_and_shapes.append(
(input_c, [num_layers * dir_count, batch_size, num_units]),)
sess.run(variables.global_variables_initializer())
all_inputs = [entry[0] for entry in inputs_and_shapes]
all_shapes = [entry[1] for entry in inputs_and_shapes]
err = gradient_checker.compute_gradient_error(
all_inputs, all_shapes, total_sum, [1], delta=delta)
self.assertLess(err, tolerance)
os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = old_env_state
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTraining(self):
test_configs = [
{
"rnn_mode": cudnn_rnn_ops.CUDNN_LSTM,
"dropout": [0., 0.5, 1.],
"dtype": dtypes.float64,
"delta": 1e-4,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
"dir_count": 1,
},
},
{
"rnn_mode": cudnn_rnn_ops.CUDNN_GRU,
"dropout": [0., 0.5, 1.],
"dtype": dtypes.float64,
"delta": 1e-4,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
"dir_count": 1,
},
},
{
"rnn_mode": cudnn_rnn_ops.CUDNN_RNN_TANH,
"dropout": [0., 0.5, 1.],
"dtype": dtypes.float64,
"delta": 1e-4,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
"dir_count": 1,
},
},
{
"rnn_mode": cudnn_rnn_ops.CUDNN_RNN_RELU,
"dropout": [0., 0.5, 1.],
"dtype": dtypes.float64,
"delta": 1e-4,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
"dir_count": 1,
},
},
{
"rnn_mode": cudnn_rnn_ops.CUDNN_LSTM,
"dropout": [0., 0.5, 1.],
"dtype": dtypes.float32,
"tolerance": 1e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
"dir_count": 1,
},
},
{
"rnn_mode": cudnn_rnn_ops.CUDNN_GRU,
"dropout": [0., 0.5, 1.],
"dtype": dtypes.float32,
"tolerance": 4e-3,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
"dir_count": 1,
},
},
{
"rnn_mode": cudnn_rnn_ops.CUDNN_RNN_TANH,
"dropout": [0., 0.5, 1.],
"dtype": dtypes.float32,
"tolerance": 5e-3,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
"dir_count": 1,
},
},
{
"rnn_mode": cudnn_rnn_ops.CUDNN_RNN_RELU,
"dropout": [0., 0.5, 1.],
"dtype": dtypes.float32,
"tolerance": 4e-1,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
"dir_count": 1,
},
},
]
ops.reset_default_graph()
with ops.Graph().as_default():
for config in test_configs:
rnn_mode = config["rnn_mode"]
dropout_list = config.get("dropout", [0.])
dtype = config.get("dtype", dtypes.float32)
delta = config.get("delta", 1e-3)
tolerance = config["tolerance"]
shape = config["shape"]
for dropout in dropout_list:
self._testOneSimpleTraining(
rnn_mode, shape["num_layers"], shape["num_units"],
shape["input_size"], shape["batch_size"], shape["seq_length"],
shape["dir_count"], dropout, dtype, delta, tolerance)
if __name__ == "__main__":
googletest.main()
|
|
from numpy import *
from util.sentiment_util import *
from util.math_util import *
from util.adagrad import Adagrad
import cPickle, time, argparse
from collections import Counter
# compute model accuracy on a given fold
def validate(data, fold, params, deep, f=relu):
correct = 0.
total = 0.
for sent, label in data:
if len(sent) == 0:
continue
av = average(params[-1][:, sent], axis=1)
# forward prop
acts = zeros((deep, dh))
for i in range(0, deep):
start = i * 2
prev = av if i == 0 else acts[i - 1]
acts[i] = f(params[start].dot(prev) + params[start + 1])
Ws = params[deep * 2]
bs = params[deep * 2 + 1]
if deep == 0:
pred = softmax(Ws.dot(av) + bs).ravel()
else:
pred = softmax(Ws.dot(acts[-1]) + bs).ravel()
if argmax(pred) == label:
correct += 1
total += 1
print 'accuracy on ', fold, correct, total, str(correct / total), '\n'
return correct / total
# does both forward and backprop
def objective_and_grad(data, params, d, dh, len_voc, deep, labels, f=relu, df=drelu, compute_grad=True, word_drop=0.3, rho=1e-4, fine_tune=True):
params = unroll_params(params, d, dh, len_voc, deep=deep, labels=labels)
grads = init_grads(d, dh, len_voc, deep=deep, labels=labels)
error_sum = 0.0
for sent,label in data:
if len(sent) == 0:
continue
# store each layer's normalized and unnormalized acts
acts = zeros((deep, dh))
target = zeros(labels)
target[label] = 1.0
# input is average of all nouns in sentence
curr_sent = []
mask = random.rand(len(sent)) > word_drop
for index, keep in enumerate(mask):
if keep:
curr_sent.append(sent[index])
# all examples must have at least one word
if len(curr_sent) == 0:
curr_sent = sent
av = average(params[-1][:, curr_sent], axis=1)
# forward prop
for i in range(0, deep):
start = i * 2
prev = av if i == 0 else acts[i - 1]
acts[i] = f(params[start].dot(prev) + params[start + 1])
# compute softmax error
Ws = params[deep * 2]
bs = params[deep * 2 + 1]
if deep == 0:
pred = softmax(Ws.dot(av) + bs).ravel()
error_sum += crossent(target, pred)
soft_delta = dcrossent(target, pred)
grads[deep * 2] += outer(soft_delta, av)
grads[deep * 2 + 1] += soft_delta
delta = Ws.T.dot(soft_delta)
if fine_tune:
grads[-1][:, curr_sent] += delta.reshape((d, 1)) / len(curr_sent)
else:
pred = softmax(Ws.dot(acts[-1]) + bs).ravel()
error_sum += crossent(target, pred)
soft_delta = dcrossent(target, pred)
grads[deep * 2] += outer(soft_delta, acts[-1])
grads[deep * 2 + 1] += soft_delta
# backprop
prev_delta = Ws.T.dot(soft_delta)
for i in range(deep - 1, -1, -1):
start = i * 2
deriv = df(acts[i])
delta = deriv * prev_delta
if i > 0:
grads[start] += outer(delta, acts[i-1])
grads[start + 1] += delta
prev_delta = params[start].T.dot(delta)
else:
grads[0] += outer(delta, av)
grads[1] += delta
if fine_tune:
grads[-1][:, curr_sent] += params[0].T.dot(delta).reshape((d, 1)) / len(curr_sent)
for index in range(0, len(params)):
error_sum += 0.5 * rho * sum(params[index] ** 2)
grads[index] += rho * params[index]
cost = error_sum / len(data)
grad = roll_params(grads) / len(data)
if compute_grad:
return cost, grad
else:
return cost
if __name__ == '__main__':
# command line arguments
parser = argparse.ArgumentParser(description='sentiment DAN')
parser.add_argument('-data', help='location of dataset', default='data/sentiment/')
parser.add_argument('-vocab', help='location of vocab', default='data/sentiment/wordMapAll.bin')
parser.add_argument('-We', help='location of word embeddings', default='data/sentiment_all_We')
parser.add_argument('-rand_We', help='randomly init word embeddings', type=int, default=0)
parser.add_argument('-binarize', help='binarize labels', type=int, default=0)
parser.add_argument('-d', help='word embedding dimension', type=int, default=300)
parser.add_argument('-dh', help='hidden dimension', type=int, default=300)
parser.add_argument('-deep', help='number of layers', type=int, default=3)
parser.add_argument('-drop', help='dropout probability', type=float, default=0.3)
parser.add_argument('-rho', help='regularization weight', type=float, default=1e-4)
parser.add_argument('-labels', help='number of labels', type=int, default=5)
parser.add_argument('-ft', help='fine tune word vectors', type=int, default=1)
parser.add_argument('-b', '--batch_size', help='adagrad minibatch size (ideal: 25 minibatches \
per epoch). for provided datasets, x for history and y for lit', type=int,\
default=15)
parser.add_argument('-ep', '--num_epochs', help='number of training epochs, can also determine \
dynamically via validate method', type=int, default=5)
parser.add_argument('-agr', '--adagrad_reset', help='reset sum of squared gradients after this many\
epochs', type=int, default=50)
parser.add_argument('-lr', help='adagrad initial learning rate', type=float, default=0.005)
parser.add_argument('-o', '--output', help='desired location of output model', \
default='models/sentiment_params.pkl')
args = vars(parser.parse_args())
d = args['d']
dh = args['dh']
# load data
train = cPickle.load(open(args['data']+'train-rootfine', 'rb'))
dev = cPickle.load(open(args['data']+'dev-rootfine', 'rb'))
test = cPickle.load(open(args['data']+'test-rootfine', 'rb'))
vocab = cPickle.load(open(args['vocab'], 'rb'))
len_voc = len(vocab)
for split in [train, dev, test]:
c = Counter()
tot = 0
for sent, label in split:
c[label] += 1
tot += 1
print c, tot
if args['rand_We']:
print 'randomly initializing word embeddings...'
orig_We = (random.rand(d, len_voc) * 2 - 1) * 0.08
else:
print 'loading pretrained word embeddings...'
orig_We = cPickle.load(open(args['We'], 'rb'))
# output log and parameter file destinations
param_file = args['output']
log_file = param_file.split('_')[0] + '_log'
# generate params / We
params = init_params(d, dh, deep=args['deep'], labels=args['labels'])
# add We matrix to params
params += (orig_We, )
r = roll_params(params)
dim = r.shape[0]
print 'parameter vector dimensionality:', dim
log = open(log_file, 'w')
# minibatch adagrad training
ag = Adagrad(r.shape, args['lr'])
min_error = float('inf')
for epoch in range(0, args['num_epochs']):
lstring = ''
# create mini-batches
random.shuffle(train)
batches = [train[x : x + args['batch_size']] for x in xrange(0, len(train),
args['batch_size'])]
epoch_error = 0.0
ep_t = time.time()
for batch_ind, batch in enumerate(batches):
now = time.time()
err, grad = objective_and_grad(batch, r, d, dh, len_voc,
args['deep'], args['labels'], word_drop=args['drop'],
fine_tune=args['ft'], rho=args['rho'])
update = ag.rescale_update(grad)
r = r - update
lstring = 'epoch: ' + str(epoch) + ' batch_ind: ' + str(batch_ind) + \
' error, ' + str(err) + ' time = '+ str(time.time()-now) + ' sec'
log.write(lstring + '\n')
log.flush()
epoch_error += err
# done with epoch
print time.time() - ep_t
print 'done with epoch ', epoch, ' epoch error = ', epoch_error, ' min error = ', min_error
lstring = 'done with epoch ' + str(epoch) + ' epoch error = ' + str(epoch_error) \
+ ' min error = ' + str(min_error) + '\n'
log.write(lstring)
log.flush()
# save parameters if the current model is better than previous best model
if epoch_error < min_error:
min_error = epoch_error
params = unroll_params(r, d, dh, len_voc, deep = args['deep'], labels=args['labels'])
# d_score = validate(dev, 'dev', params, args['deep'])
cPickle.dump( params, open(param_file, 'wb'))
log.flush()
# reset adagrad weights
if epoch % args['adagrad_reset'] == 0 and epoch != 0:
ag.reset_weights()
log.close()
# compute test score
params = unroll_params(r, d, dh, len_voc, deep = args['deep'], labels=args['labels'])
t_score = validate(test, 'test', params, args['deep'])
|
|
import pytest
from pnc_cli.swagger_client.apis.buildrecords_api import BuildrecordsApi
from pnc_cli.swagger_client.apis.buildconfigurations_api import BuildconfigurationsApi
from test import testutils
import pnc_cli.user_config as uc
@pytest.fixture(scope='function', autouse=True)
def get_builds_api():
global builds_api
builds_api = BuildrecordsApi(uc.user.get_api_client())
@pytest.fixture(scope='function', autouse=True)
def get_configs_api():
global configs_api
configs_api = BuildconfigurationsApi(uc.user.get_api_client())
def test_get_all_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'get_all')
def test_get_all():
records = builds_api.get_all().content
assert records is not None
def test_get_specific_no_id():
testutils.assert_raises_valueerror(builds_api, 'get_specific', id=None)
def test_get_specific_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'get_specific', id=1)
def test_get_specific():
records = builds_api.get_all().content
record = builds_api.get_specific(id=records[1].id).content
assert record is not None
def test_get_all_for_build_configuration_no_configuration_id():
testutils.assert_raises_valueerror(builds_api, 'get_all_for_build_configuration', configuration_id=None)
def test_get_all_for_build_configuration_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'get_all_for_build_configuration', configuration_id=1)
def test_get_all_for_build_configuration():
build_config = configs_api.get_all().content[1]
records = builds_api.get_all_for_build_configuration(configuration_id=build_config.id).content
assert records is not None
def test_get_all_for_project_no_project_id():
testutils.assert_raises_valueerror(builds_api, 'get_all_for_project', project_id=None)
def test_get_all_for_project_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'get_all_for_project', project_id=1)
def test_get_all_for_project():
records = builds_api.get_all_for_project(project_id=1).content
assert records is not None
def test_get_built_artifacts_no_id():
testutils.assert_raises_valueerror(builds_api, 'get_built_artifacts', id=None)
def test_get_built_artifacts_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'get_built_artifacts', id=1)
def test_get_built_artifacts():
records = builds_api.get_all(q='(buildConfigurationAudited.name=like=%cli-test%)').content
record = records[len(records)-1] #should be the latest build record
artifacts = builds_api.get_built_artifacts(id=record.id).content
assert artifacts is not None
def test_get_dependency_artifacts_no_id():
testutils.assert_raises_valueerror(builds_api, 'get_dependency_artifacts', id=None)
def test_get_dependency_artifacts_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'get_dependency_artifacts', id=1)
def test_get_dependency_artifacts():
records = builds_api.get_all(q='(buildConfigurationAudited.name=like=%cli-test%)').content
record = records[len(records)-1] # latest build performed
artifacts = builds_api.get_dependency_artifacts(id=record.id).content
assert artifacts is not None
def test_get_logs_no_id():
testutils.assert_raises_valueerror(builds_api, 'get_logs', id=None)
def test_get_logs_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'get_logs', id=1)
def test_get_logs():
record = builds_api.get_all().content[1]
log = builds_api.get_logs(id=record.id)
assert log is not None
def test_get_build_configuration_audited_no_id():
testutils.assert_raises_valueerror(builds_api, 'get_build_configuration_audited', id=None)
def test_get_build_configuration_audited_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'get_build_configuration_audited', id=1)
def test_get_build_configuration_audited():
record = builds_api.get_all().content[1]
audited = builds_api.get_build_configuration_audited(id=record.id).content
assert audited is not None
def test_get_attributes_no_id():
testutils.assert_raises_valueerror(builds_api, 'get_attributes', id=None)
def test_get_attributes_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'get_attributes', id=1)
def test_get_attributes():
record = builds_api.get_all().content[1]
builds_api.put_attribute(id=record.id, key='test_get_attributes', value='hi')
result = builds_api.get_attributes(id=record.id).content
assert result is not None
builds_api.remove_attribute(id=record.id, key='test_get_attributes')
def test_put_attribute_no_id():
testutils.assert_raises_valueerror(builds_api, 'put_attribute', id=None, key='key', value='value')
def test_put_attribute_no_key():
testutils.assert_raises_valueerror(builds_api, 'put_attribute', id=1, key=None, value='value')
def test_put_attribute_no_value():
testutils.assert_raises_valueerror(builds_api, 'put_attribute', id=1, key='key', value=None)
def test_put_attribute_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'put_attribute', id=1, key='key', value='value')
def test_put_attribute():
record = builds_api.get_all().content[1]
builds_api.put_attribute(id=record.id, key='test_put_attribute', value='value')
result = builds_api.get_specific(id=record.id).content
assert 'test_put_attribute' in result.attributes
def test_query_by_attribute_no_key():
testutils.assert_raises_valueerror(builds_api, 'query_by_attribute', key=None, value='value')
def test_query_by_attribute_no_value():
testutils.assert_raises_valueerror(builds_api, 'query_by_attribute', key='key', value=None)
def test_query_by_attribute_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'query_by_attribute', key='key', value='value')
def test_query_by_attribute():
record = builds_api.get_all(q='(buildConfigurationAudited.name=like=%cli-test%)').content[1]
builds_api.put_attribute(id=record.id, key='test_query_by_attribute', value='value')
result = builds_api.query_by_attribute(key='test_query_by_attribute', value='value')
assert result is not None
builds_api.remove_attribute(id=record.id, key='test_query_by_attribute')
def test_remove_attribute_no_id():
testutils.assert_raises_valueerror(builds_api, 'remove_attribute', id=None, key='key')
def test_remove_attribute_no_key():
testutils.assert_raises_valueerror(builds_api, 'remove_attribute', id=1, key=None)
def test_remove_attribute_invalid_param():
testutils.assert_raises_typeerror(builds_api, 'remove_attribute', id=1, key='key')
def test_remove_attribute():
record = builds_api.get_all().content[1]
builds_api.put_attribute(id=record.id, key='test_remove_attribute', value='value')
assert 'test_remove_attribute' in builds_api.get_specific(id=record.id).content.attributes
builds_api.remove_attribute(id=record.id, key='test_remove_attribute')
assert 'test_remove_attribute' not in builds_api.get_specific(id=record.id).content.attributes
|
|
# Copyright 2011 Jamie Norrish ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db.models import Q
from tmapi.exceptions import IllegalArgumentException
from tmapi.indices.index import Index
from tmapi.models import Name, Occurrence, Topic
from tmapi.models.variant import Variant
class ScopedIndex (Index):
def get_associations (self, themes=None, match_all=False):
"""Returns the `Association`s in the topic map whose scope
property contains at least one of the specified `themes`.
If `themes` is None, all `Association`s in the unconstrained
scope are returned.
If `match_all` is True, the scope property of an association
must match all `themes`.
The return value may be empty but must never be None.
:param themes: scope of the `Association`s to be returned
:type themes: `Topic` or list of `Topic`s
:param match_all: whether an `Association`'s scope property
must match all `themes`
:type match_all: boolean
:rtype: `QuerySet` of `Association`s
"""
associations = self.topic_map.get_associations()
associations = self._get_constructs(associations, themes, match_all)
return associations
def get_association_themes (self):
"""Returns the topics in the topic map used in the scope
property of `Association`s.
The return value may be empty but must never be None.
:rtype: `QuerySet` of `Topic`s
"""
return self.topic_map.get_topics().exclude(scoped_associations=None)
def get_names (self, themes=None, match_all=False):
"""Returns the `Name`s in the topic map whose scope
property contains at least one of the specified `themes`.
If `themes` is None, all `Name`s in the unconstrained
scope are returned.
If `match_all` is True, the scope property of a name
must match all `themes`.
The return value may be empty but must never be None.
:param themes: scope of the `Name`s to be returned
:type themes: `Topic` or list of `Topic`s
:param match_all: whether a `Name`'s scope property must match
all `themes`
:type match_all: boolean
:rtype: `QuerySet` of `Name`s
"""
names = Name.objects.filter(topic__topic_map=self.topic_map)
names = self._get_constructs(names, themes, match_all)
return names
def get_name_themes (self):
"""Returns the topics in the topic map used in the scope
property of `Name`s.
The return value may be empty but must never be None.
:rtype: `QuerySet` of `Topic`s
"""
return self.topic_map.get_topics().exclude(scoped_names=None)
def get_occurrences (self, themes=None, match_all=False):
"""Returns the `Occurrence`s in the topic map whose scope
property contains at least one of the specified `themes`.
If `themes` is None, all `Occurrence`s in the unconstrained
scope are returned.
If `match_all` is True, the scope property of an occurrence
must match all `themes`.
The return value may be empty but must never be None.
:param themes: scope of the `Occurrence`s to be returned
:type themes: `Topic` or list of `Topic`s
:param match_all: whether a `Occurrence`'s scope property must
match all `themes`
:type match_all: boolean
:rtype: `QuerySet` of `Occurrence`s
"""
occurrences = Occurrence.objects.filter(topic__topic_map=self.topic_map)
occurrences = self._get_constructs(occurrences, themes, match_all)
return occurrences
def get_occurrence_themes (self):
"""Returns the topics in the topic map used in the scope
property of `Occurrence`s.
The return value may be empty but must never be None.
:rtype: `QuerySet` of `Topic`s
"""
return self.topic_map.get_topics().exclude(scoped_occurrences=None)
def get_variants (self, themes, match_all=False):
"""Returns the `Variant`s in the topic map whose scope
property contains the specified `theme`, or one of the
specified `themes` (if `match_all` is False), or all of the
specified `themes` (if `match_all` is True).
The return value may be empty but must never be None.
:param theme: the `Topic` that must be part of the scope
:type theme: `Topic`
:param themes: scope of the `Variant`s to be returned
:type themes: list of `Topic`s
:param match_all: whether a `Variant`'s scope property must
match all `themes`
:type match_all: boolean
:rtype: `QuerySet` of `Variant`s
"""
variants = Variant.objects.filter(name__topic__topic_map=self.topic_map)
if themes is not None:
if isinstance(themes, Topic):
variants = variants.filter(Q(scope=themes) |
Q(name__scope=themes))
elif match_all:
for theme in themes:
variants = variants.filter(Q(scope=theme) |
Q(name__scope=theme))
else:
query = None
for theme in themes:
if query is None:
query = Q(scope=theme) | Q(name__scope=theme)
else:
query = query | Q(scope=theme) | Q(name__scope=theme)
variants = variants.filter(query)
else:
raise IllegalArgumentException('themes must not be None')
return variants.distinct()
def get_variant_themes (self):
"""Returns the topics in the topic map used in the scope
property of `Variant`s.
The return value may be empty but must never be None.
:rtype: `QuerySet` of `Topic`s
"""
return self.topic_map.get_topics().exclude(scoped_variants=None,
scoped_names=None)
def _get_constructs (self, constructs, themes, match_all):
"""Returns those members of `constructs` whose scope property
contains at least one of the specified `themes`.
If `themes` is None, all members of `constructs` in the
unconstrained scope are returned.
If `match_all` is True, the scope property of a member of
`constructs` must match all `themes`.
The return value may be empty but must never be None.
:param constructs: `Construct`s to be filtered
:type constructs: `QuerySet` of `Construct`s
:param themes: scope of the members of `constructs` to be returned
:type themes: `Topic` or list of `Topic`s
:param match_all: whether a member of `constructs`'s scope
property must match all `themes`
:type match_all: boolean
:rtype: `QuerySet` of `Construct`s
"""
if themes is not None:
if isinstance(themes, Topic):
constructs = constructs.filter(scope=themes)
elif match_all:
for theme in themes:
constructs = constructs.filter(scope=theme)
else:
query = None
for theme in themes:
if query is None:
query = Q(scope=theme)
else:
query = query | Q(scope=theme)
constructs = constructs.filter(query)
else:
if match_all:
raise IllegalArgumentException(
'match_all must not be specified if themes is None')
constructs = constructs.filter(scope=None)
return constructs.distinct()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.translation.v3beta1 TranslationService API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import google.api_core.protobuf_helpers
import grpc
from google.cloud.translate_v3beta1.gapic import translation_service_client_config
from google.cloud.translate_v3beta1.gapic.transports import (
translation_service_grpc_transport,
)
from google.cloud.translate_v3beta1.proto import translation_service_pb2
from google.cloud.translate_v3beta1.proto import translation_service_pb2_grpc
from google.longrunning import operations_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-translate"
).version
class TranslationServiceClient(object):
"""Provides natural language translation operations."""
SERVICE_ADDRESS = "translate.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.translation.v3beta1.TranslationService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TranslationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def glossary_path(cls, project, location, glossary):
"""DEPRECATED. Return a fully-qualified glossary string."""
warnings.warn(
"Resource name helper functions are deprecated.",
PendingDeprecationWarning,
stacklevel=1,
)
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/glossaries/{glossary}",
project=project,
location=location,
glossary=glossary,
)
@classmethod
def location_path(cls, project, location):
"""DEPRECATED. Return a fully-qualified location string."""
warnings.warn(
"Resource name helper functions are deprecated.",
PendingDeprecationWarning,
stacklevel=1,
)
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}",
project=project,
location=location,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.TranslationServiceGrpcTransport,
Callable[[~.Credentials, type], ~.TranslationServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = translation_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=translation_service_grpc_transport.TranslationServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = translation_service_grpc_transport.TranslationServiceGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def translate_text(
self,
contents,
target_language_code,
parent,
mime_type=None,
source_language_code=None,
model=None,
glossary_config=None,
labels=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Translates input text and returns translated text.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> # TODO: Initialize `contents`:
>>> contents = []
>>>
>>> # TODO: Initialize `target_language_code`:
>>> target_language_code = ''
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> response = client.translate_text(contents, target_language_code, parent)
Args:
contents (list[str]): Required. The content of the input in string format.
We recommend the total content be less than 30k codepoints.
Use BatchTranslateText for larger text.
target_language_code (str): Required. The BCP-47 language code to use for translation of the input
text, set to one of the language codes listed in Language Support.
parent (str): Required. Project or location to make a call. Must refer to a caller's
project.
Format: ``projects/{project-id}`` or
``projects/{project-id}/locations/{location-id}``.
For global calls, use ``projects/{project-id}/locations/global`` or
``projects/{project-id}``.
Non-global location is required for requests using AutoML models or
custom glossaries.
Models and glossaries must be within the same region (have same
location-id), otherwise an INVALID\_ARGUMENT (400) error is returned.
mime_type (str): Optional. The format of the source text, for example, "text/html",
"text/plain". If left blank, the MIME type defaults to "text/html".
source_language_code (str): Optional. The BCP-47 language code of the input text if
known, for example, "en-US" or "sr-Latn". Supported language codes are
listed in Language Support. If the source language isn't specified, the API
attempts to identify the source language automatically and returns the
source language within the response.
model (str): Optional. The ``model`` type requested for this translation.
The format depends on model type:
- AutoML Translation models:
``projects/{project-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-id}/locations/{location-id}/models/general/nmt``,
``projects/{project-id}/locations/{location-id}/models/general/base``
For global (non-regionalized) requests, use ``location-id`` ``global``.
For example,
``projects/{project-id}/locations/global/models/general/nmt``.
If missing, the system decides which google base model to use.
glossary_config (Union[dict, ~google.cloud.translate_v3beta1.types.TranslateTextGlossaryConfig]): Optional. Glossary to be applied. The glossary must be within the same
region (have the same location-id) as the model, otherwise an
INVALID\_ARGUMENT (400) error is returned.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.translate_v3beta1.types.TranslateTextGlossaryConfig`
labels (dict[str -> str]): Optional. The labels with user-defined metadata for the request.
Label keys and values can be no longer than 63 characters
(Unicode codepoints), can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
Label values are optional. Label keys must start with a letter.
See https://cloud.google.com/translate/docs/labels for more information.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types.TranslateTextResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "translate_text" not in self._inner_api_calls:
self._inner_api_calls[
"translate_text"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.translate_text,
default_retry=self._method_configs["TranslateText"].retry,
default_timeout=self._method_configs["TranslateText"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.TranslateTextRequest(
contents=contents,
target_language_code=target_language_code,
parent=parent,
mime_type=mime_type,
source_language_code=source_language_code,
model=model,
glossary_config=glossary_config,
labels=labels,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["translate_text"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def detect_language(
self,
parent,
model=None,
content=None,
mime_type=None,
labels=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Detects the language of text within a request.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> response = client.detect_language(parent)
Args:
parent (str): Required. Project or location to make a call. Must refer to a caller's
project.
Format: ``projects/{project-id}/locations/{location-id}`` or
``projects/{project-id}``.
For global calls, use ``projects/{project-id}/locations/global`` or
``projects/{project-id}``.
Only models within the same region (has same location-id) can be used.
Otherwise an INVALID\_ARGUMENT (400) error is returned.
model (str): Optional. The language detection model to be used.
Format:
``projects/{project-id}/locations/{location-id}/models/language-detection/{model-id}``
Only one language detection model is currently supported:
``projects/{project-id}/locations/{location-id}/models/language-detection/default``.
If not specified, the default model is used.
content (str): The content of the input stored as a string.
mime_type (str): Optional. The format of the source text, for example, "text/html",
"text/plain". If left blank, the MIME type defaults to "text/html".
labels (dict[str -> str]): Optional. The labels with user-defined metadata for the request.
Label keys and values can be no longer than 63 characters
(Unicode codepoints), can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
Label values are optional. Label keys must start with a letter.
See https://cloud.google.com/translate/docs/labels for more information.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types.DetectLanguageResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "detect_language" not in self._inner_api_calls:
self._inner_api_calls[
"detect_language"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.detect_language,
default_retry=self._method_configs["DetectLanguage"].retry,
default_timeout=self._method_configs["DetectLanguage"].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(content=content)
request = translation_service_pb2.DetectLanguageRequest(
parent=parent,
model=model,
content=content,
mime_type=mime_type,
labels=labels,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["detect_language"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_supported_languages(
self,
parent,
display_language_code=None,
model=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns a list of supported languages for translation.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> response = client.get_supported_languages(parent)
Args:
parent (str): Required. Project or location to make a call. Must refer to a caller's
project.
Format: ``projects/{project-id}`` or
``projects/{project-id}/locations/{location-id}``.
For global calls, use ``projects/{project-id}/locations/global`` or
``projects/{project-id}``.
Non-global location is required for AutoML models.
Only models within the same region (have same location-id) can be used,
otherwise an INVALID\_ARGUMENT (400) error is returned.
display_language_code (str): Optional. The language to use to return localized, human readable names
of supported languages. If missing, then display names are not returned
in a response.
model (str): Optional. Get supported languages of this model.
The format depends on model type:
- AutoML Translation models:
``projects/{project-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-id}/locations/{location-id}/models/general/nmt``,
``projects/{project-id}/locations/{location-id}/models/general/base``
Returns languages supported by the specified model. If missing, we get
supported languages of Google general base (PBMT) model.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types.SupportedLanguages` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_supported_languages" not in self._inner_api_calls:
self._inner_api_calls[
"get_supported_languages"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_supported_languages,
default_retry=self._method_configs["GetSupportedLanguages"].retry,
default_timeout=self._method_configs["GetSupportedLanguages"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.GetSupportedLanguagesRequest(
parent=parent, display_language_code=display_language_code, model=model
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_supported_languages"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def batch_translate_text(
self,
parent,
source_language_code,
target_language_codes,
input_configs,
output_config,
models=None,
glossaries=None,
labels=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Translates a large volume of text in asynchronous batch mode.
This function provides real-time output as the inputs are being processed.
If caller cancels a request, the partial results (for an input file, it's
all or nothing) may still be available on the specified output location.
This call returns immediately and you can
use google.longrunning.Operation.name to poll the status of the call.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `source_language_code`:
>>> source_language_code = ''
>>>
>>> # TODO: Initialize `target_language_codes`:
>>> target_language_codes = []
>>>
>>> # TODO: Initialize `input_configs`:
>>> input_configs = []
>>>
>>> # TODO: Initialize `output_config`:
>>> output_config = {}
>>>
>>> response = client.batch_translate_text(parent, source_language_code, target_language_codes, input_configs, output_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. Location to make a call. Must refer to a caller's project.
Format: ``projects/{project-id}/locations/{location-id}``.
The ``global`` location is not supported for batch translation.
Only AutoML Translation models or glossaries within the same region
(have the same location-id) can be used, otherwise an INVALID\_ARGUMENT
(400) error is returned.
source_language_code (str): Required. Source language code.
target_language_codes (list[str]): Required. Specify up to 10 language codes here.
input_configs (list[Union[dict, ~google.cloud.translate_v3beta1.types.InputConfig]]): Required. Input configurations.
The total number of files matched should be <= 1000.
The total content size should be <= 100M Unicode codepoints.
The files must use UTF-8 encoding.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.translate_v3beta1.types.InputConfig`
output_config (Union[dict, ~google.cloud.translate_v3beta1.types.OutputConfig]): Required. Output configuration.
If 2 input configs match to the same file (that is, same input path),
we don't generate output for duplicate inputs.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.translate_v3beta1.types.OutputConfig`
models (dict[str -> str]): Optional. The models to use for translation. Map's key is target
language code. Map's value is model name. Value can be a built-in
general model, or an AutoML Translation model.
The value format depends on model type:
- AutoML Translation models:
``projects/{project-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-id}/locations/{location-id}/models/general/nmt``,
``projects/{project-id}/locations/{location-id}/models/general/base``
If the map is empty or a specific model is not requested for a language
pair, then default google model (nmt) is used.
glossaries (dict[str -> Union[dict, ~google.cloud.translate_v3beta1.types.TranslateTextGlossaryConfig]]): Optional. Glossaries to be applied for translation.
It's keyed by target language code.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.translate_v3beta1.types.TranslateTextGlossaryConfig`
labels (dict[str -> str]): Optional. The labels with user-defined metadata for the request.
Label keys and values can be no longer than 63 characters
(Unicode codepoints), can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
Label values are optional. Label keys must start with a letter.
See https://cloud.google.com/translate/docs/labels for more information.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_translate_text" not in self._inner_api_calls:
self._inner_api_calls[
"batch_translate_text"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_translate_text,
default_retry=self._method_configs["BatchTranslateText"].retry,
default_timeout=self._method_configs["BatchTranslateText"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.BatchTranslateTextRequest(
parent=parent,
source_language_code=source_language_code,
target_language_codes=target_language_codes,
input_configs=input_configs,
output_config=output_config,
models=models,
glossaries=glossaries,
labels=labels,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["batch_translate_text"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
translation_service_pb2.BatchTranslateResponse,
metadata_type=translation_service_pb2.BatchTranslateMetadata,
)
def create_glossary(
self,
parent,
glossary,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a glossary and returns the long-running operation. Returns
NOT\_FOUND, if the project doesn't exist.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `glossary`:
>>> glossary = {}
>>>
>>> response = client.create_glossary(parent, glossary)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The project name.
glossary (Union[dict, ~google.cloud.translate_v3beta1.types.Glossary]): Required. The glossary to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.translate_v3beta1.types.Glossary`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_glossary" not in self._inner_api_calls:
self._inner_api_calls[
"create_glossary"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_glossary,
default_retry=self._method_configs["CreateGlossary"].retry,
default_timeout=self._method_configs["CreateGlossary"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.CreateGlossaryRequest(
parent=parent, glossary=glossary
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["create_glossary"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
translation_service_pb2.Glossary,
metadata_type=translation_service_pb2.CreateGlossaryMetadata,
)
def list_glossaries(
self,
parent,
page_size=None,
filter_=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists glossaries in a project. Returns NOT\_FOUND, if the project
doesn't exist.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # Iterate over all results
>>> for element in client.list_glossaries(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_glossaries(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The name of the project from which to list all of the glossaries.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): Optional. Filter specifying constraints of a list operation.
Filtering is not supported yet, and the parameter currently has no effect.
If missing, no filtering is performed.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.translate_v3beta1.types.Glossary` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_glossaries" not in self._inner_api_calls:
self._inner_api_calls[
"list_glossaries"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_glossaries,
default_retry=self._method_configs["ListGlossaries"].retry,
default_timeout=self._method_configs["ListGlossaries"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.ListGlossariesRequest(
parent=parent, page_size=page_size, filter=filter_
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_glossaries"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="glossaries",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def get_glossary(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a glossary. Returns NOT\_FOUND, if the glossary doesn't exist.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> name = client.glossary_path('[PROJECT]', '[LOCATION]', '[GLOSSARY]')
>>>
>>> response = client.get_glossary(name)
Args:
name (str): Required. The name of the glossary to retrieve.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types.Glossary` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_glossary" not in self._inner_api_calls:
self._inner_api_calls[
"get_glossary"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_glossary,
default_retry=self._method_configs["GetGlossary"].retry,
default_timeout=self._method_configs["GetGlossary"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.GetGlossaryRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_glossary"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_glossary(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a glossary, or cancels glossary construction if the glossary
isn't created yet. Returns NOT\_FOUND, if the glossary doesn't exist.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> name = client.glossary_path('[PROJECT]', '[LOCATION]', '[GLOSSARY]')
>>>
>>> response = client.delete_glossary(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. The name of the glossary to delete.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_glossary" not in self._inner_api_calls:
self._inner_api_calls[
"delete_glossary"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_glossary,
default_retry=self._method_configs["DeleteGlossary"].retry,
default_timeout=self._method_configs["DeleteGlossary"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.DeleteGlossaryRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["delete_glossary"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
translation_service_pb2.DeleteGlossaryResponse,
metadata_type=translation_service_pb2.DeleteGlossaryMetadata,
)
|
|
from io import StringIO
from os import path as opath
from _plotly_utils.basevalidators import (
BaseDataValidator,
CompoundValidator,
CompoundArrayValidator,
)
from codegen.datatypes import (
reindent_validator_description,
add_constructor_params,
add_docstring,
)
from codegen.utils import PlotlyNode, write_source_py
import inflect
def build_figure_py(
trace_node,
base_package,
base_classname,
fig_classname,
data_validator,
layout_validator,
frame_validator,
subplot_nodes,
layout_array_nodes,
):
"""
Parameters
----------
trace_node : PlotlyNode
Root trace node (the node that is the parent of all of the
individual trace nodes like bar, scatter, etc.)
base_package : str
Package that the figure's superclass resides in
base_classname : str
Name of the figure's superclass
fig_classname : str
Name of the Figure class to be generated
data_validator : BaseDataValidator
DataValidator instance
layout_validator : CompoundValidator
LayoutValidator instance
frame_validator : CompoundArrayValidator
FrameValidator instance
subplot_nodes: list of str
List of names of all of the layout subplot properties
layout_array_nodes: list of PlotlyNode
List of array nodes under layout that can be positioned using xref/yref
Returns
-------
str
Source code for figure class definition
"""
# Initialize source code buffer
# -----------------------------
buffer = StringIO()
# Get list of trace type nodes
# ----------------------------
trace_nodes = trace_node.child_compound_datatypes
# Write imports
# -------------
# ### Import base class ###
buffer.write(f"from plotly.{base_package} import {base_classname}\n")
# Write class definition
# ----------------------
buffer.write(
f"""
class {fig_classname}({base_classname}):\n"""
)
# ### Constructor ###
# Build constructor description strings
data_description = reindent_validator_description(data_validator, 8)
layout_description = reindent_validator_description(layout_validator, 8)
frames_description = reindent_validator_description(frame_validator, 8)
buffer.write(
f"""
def __init__(self, data=None, layout=None,
frames=None, skip_invalid=False, **kwargs):
\"\"\"
Create a new :class:{fig_classname} instance
Parameters
----------
data
{data_description}
layout
{layout_description}
frames
{frames_description}
skip_invalid: bool
If True, invalid properties in the figure specification will be
skipped silently. If False (default) invalid properties in the
figure specification will result in a ValueError
Raises
------
ValueError
if a property in the specification of data, layout, or frames
is invalid AND skip_invalid is False
\"\"\"
super({fig_classname} ,self).__init__(data, layout,
frames, skip_invalid,
**kwargs)
"""
)
# ### add_trace methods for each trace type ###
for trace_node in trace_nodes:
include_secondary_y = bool(
[d for d in trace_node.child_datatypes if d.name_property == "yaxis"]
)
# #### Function signature ####
buffer.write(
f"""
def add_{trace_node.plotly_name}(self"""
)
# #### Function params####
param_extras = ["row", "col"]
if include_secondary_y:
param_extras.append("secondary_y")
add_constructor_params(
buffer, trace_node.child_datatypes, append_extras=param_extras
)
# #### Docstring ####
header = f"Add a new {trace_node.name_datatype_class} trace"
doc_extras = [
(
"row : int or None (default)",
"Subplot row index (starting from 1) for the trace to be "
"added. Only valid if figure was created using "
"`plotly.tools.make_subplots`",
),
(
"col : int or None (default)",
"Subplot col index (starting from 1) for the trace to be "
"added. Only valid if figure was created using "
"`plotly.tools.make_subplots`",
),
]
if include_secondary_y:
doc_extras.append(
(
"secondary_y: boolean or None (default None)",
"""\
If True, associate this trace with the secondary y-axis of the
subplot at the specified row and col. Only valid if all of the
following conditions are satisfied:
* The figure was created using `plotly.subplots.make_subplots`.
* The row and col arguments are not None
* The subplot at the specified row and col has type xy
(which is the default) and secondary_y True. These
properties are specified in the specs argument to
make_subplots. See the make_subplots docstring for more info.\
""",
)
)
add_docstring(
buffer,
trace_node,
header,
append_extras=doc_extras,
return_type=fig_classname,
)
# #### Function body ####
buffer.write(
f"""
from plotly.graph_objs import {trace_node.name_datatype_class}
new_trace = {trace_node.name_datatype_class}(
"""
)
for i, subtype_node in enumerate(trace_node.child_datatypes):
subtype_prop_name = subtype_node.name_property
buffer.write(
f"""
{subtype_prop_name}={subtype_prop_name},"""
)
buffer.write(
f"""
**kwargs)"""
)
if include_secondary_y:
secondary_y_kwarg = ", secondary_y=secondary_y"
else:
secondary_y_kwarg = ""
buffer.write(
f"""
return self.add_trace(
new_trace, row=row, col=col{secondary_y_kwarg})"""
)
# update layout subplots
# ----------------------
inflect_eng = inflect.engine()
for subplot_node in subplot_nodes:
singular_name = subplot_node.name_property
plural_name = inflect_eng.plural_noun(singular_name)
if singular_name == "yaxis":
secondary_y_1 = ", secondary_y=None"
secondary_y_2 = ", secondary_y=secondary_y"
secondary_y_docstring = f"""
secondary_y: boolean or None (default None)
* If True, only select yaxis objects associated with the secondary
y-axis of the subplot.
* If False, only select yaxis objects associated with the primary
y-axis of the subplot.
* If None (the default), do not filter yaxis objects based on
a secondary y-axis condition.
To select yaxis objects by secondary y-axis, the Figure must
have been created using plotly.subplots.make_subplots. See
the docstring for the specs argument to make_subplots for more
info on creating subplots with secondary y-axes."""
else:
secondary_y_1 = ""
secondary_y_2 = ""
secondary_y_docstring = ""
buffer.write(
f"""
def select_{plural_name}(
self, selector=None, row=None, col=None{secondary_y_1}):
\"\"\"
Select {singular_name} subplot objects from a particular subplot cell
and/or {singular_name} subplot objects that satisfy custom selection
criteria.
Parameters
----------
selector: dict or None (default None)
Dict to use as selection criteria.
{singular_name} objects will be selected if they contain
properties corresponding to all of the dictionary's keys, with
values that exactly match the supplied values. If None
(the default), all {singular_name} objects are selected.
row, col: int or None (default None)
Subplot row and column index of {singular_name} objects to select.
To select {singular_name} objects by row and column, the Figure
must have been created using plotly.subplots.make_subplots.
If None (the default), all {singular_name} objects are selected.\
{secondary_y_docstring}
Returns
-------
generator
Generator that iterates through all of the {singular_name}
objects that satisfy all of the specified selection criteria
\"\"\"
return self._select_layout_subplots_by_prefix(
'{singular_name}', selector, row, col{secondary_y_2})
def for_each_{singular_name}(
self, fn, selector=None, row=None, col=None{secondary_y_1}):
\"\"\"
Apply a function to all {singular_name} objects that satisfy the
specified selection criteria
Parameters
----------
fn:
Function that inputs a single {singular_name} object.
selector: dict or None (default None)
Dict to use as selection criteria.
{singular_name} objects will be selected if they contain
properties corresponding to all of the dictionary's keys, with
values that exactly match the supplied values. If None
(the default), all {singular_name} objects are selected.
row, col: int or None (default None)
Subplot row and column index of {singular_name} objects to select.
To select {singular_name} objects by row and column, the Figure
must have been created using plotly.subplots.make_subplots.
If None (the default), all {singular_name} objects are selected.\
{secondary_y_docstring}
Returns
-------
self
Returns the Figure object that the method was called on
\"\"\"
for obj in self.select_{plural_name}(
selector=selector, row=row, col=col{secondary_y_2}):
fn(obj)
return self
def update_{plural_name}(
self,
patch=None,
selector=None,
overwrite=False,
row=None, col=None{secondary_y_1},
**kwargs):
\"\"\"
Perform a property update operation on all {singular_name} objects
that satisfy the specified selection criteria
Parameters
----------
patch: dict
Dictionary of property updates to be applied to all
{singular_name} objects that satisfy the selection criteria.
selector: dict or None (default None)
Dict to use as selection criteria.
{singular_name} objects will be selected if they contain
properties corresponding to all of the dictionary's keys, with
values that exactly match the supplied values. If None
(the default), all {singular_name} objects are selected.
overwrite: bool
If True, overwrite existing properties. If False, apply updates
to existing properties recursively, preserving existing
properties that are not specified in the update operation.
row, col: int or None (default None)
Subplot row and column index of {singular_name} objects to select.
To select {singular_name} objects by row and column, the Figure
must have been created using plotly.subplots.make_subplots.
If None (the default), all {singular_name} objects are selected.\
{secondary_y_docstring}
**kwargs
Additional property updates to apply to each selected
{singular_name} object. If a property is specified in
both patch and in **kwargs then the one in **kwargs
takes precedence.
Returns
-------
self
Returns the Figure object that the method was called on
\"\"\"
for obj in self.select_{plural_name}(
selector=selector, row=row, col=col{secondary_y_2}):
obj.update(patch, overwrite=overwrite, **kwargs)
return self"""
)
# update annotations/shapes/images
# --------------------------------
for node in layout_array_nodes:
singular_name = node.plotly_name
plural_name = node.name_property
if singular_name == "image":
# Rename image to layout_image to avoid conflict with an image trace
method_prefix = "layout_"
else:
method_prefix = ""
buffer.write(
f"""
def select_{method_prefix}{plural_name}(
self, selector=None, row=None, col=None, secondary_y=None
):
\"\"\"
Select {plural_name} from a particular subplot cell and/or {plural_name}
that satisfy custom selection criteria.
Parameters
----------
selector: dict or None (default None)
Dict to use as selection criteria.
Annotations will be selected if they contain properties corresponding
to all of the dictionary's keys, with values that exactly match
the supplied values. If None (the default), all {plural_name} are
selected.
row, col: int or None (default None)
Subplot row and column index of {plural_name} to select.
To select {plural_name} by row and column, the Figure must have been
created using plotly.subplots.make_subplots. To select only those
{singular_name} that are in paper coordinates, set row and col to the
string 'paper'. If None (the default), all {plural_name} are selected.
secondary_y: boolean or None (default None)
* If True, only select {plural_name} associated with the secondary
y-axis of the subplot.
* If False, only select {plural_name} associated with the primary
y-axis of the subplot.
* If None (the default), do not filter {plural_name} based on secondary
y-axis.
To select {plural_name} by secondary y-axis, the Figure must have been
created using plotly.subplots.make_subplots. See the docstring
for the specs argument to make_subplots for more info on
creating subplots with secondary y-axes.
Returns
-------
generator
Generator that iterates through all of the {plural_name} that satisfy
all of the specified selection criteria
\"\"\"
return self._select_annotations_like(
"{plural_name}", selector=selector, row=row, col=col, secondary_y=secondary_y
)
def for_each_{method_prefix}{singular_name}(
self, fn, selector=None, row=None, col=None, secondary_y=None
):
\"\"\"
Apply a function to all {plural_name} that satisfy the specified selection
criteria
Parameters
----------
fn:
Function that inputs a single {singular_name} object.
selector: dict or None (default None)
Dict to use as selection criteria.
Traces will be selected if they contain properties corresponding
to all of the dictionary's keys, with values that exactly match
the supplied values. If None (the default), all {plural_name} are
selected.
row, col: int or None (default None)
Subplot row and column index of {plural_name} to select.
To select {plural_name} by row and column, the Figure must have been
created using plotly.subplots.make_subplots. To select only those
{plural_name} that are in paper coordinates, set row and col to the
string 'paper'. If None (the default), all {plural_name} are selected.
secondary_y: boolean or None (default None)
* If True, only select {plural_name} associated with the secondary
y-axis of the subplot.
* If False, only select {plural_name} associated with the primary
y-axis of the subplot.
* If None (the default), do not filter {plural_name} based on secondary
y-axis.
To select {plural_name} by secondary y-axis, the Figure must have been
created using plotly.subplots.make_subplots. See the docstring
for the specs argument to make_subplots for more info on
creating subplots with secondary y-axes.
Returns
-------
self
Returns the Figure object that the method was called on
\"\"\"
for obj in self._select_annotations_like(
prop='{plural_name}',
selector=selector,
row=row,
col=col,
secondary_y=secondary_y,
):
fn(obj)
return self
def update_{method_prefix}{plural_name}(
self,
patch=None,
selector=None,
row=None,
col=None,
secondary_y=None,
**kwargs
):
\"\"\"
Perform a property update operation on all {plural_name} that satisfy the
specified selection criteria
Parameters
----------
patch: dict or None (default None)
Dictionary of property updates to be applied to all {plural_name} that
satisfy the selection criteria.
selector: dict or None (default None)
Dict to use as selection criteria.
Traces will be selected if they contain properties corresponding
to all of the dictionary's keys, with values that exactly match
the supplied values. If None (the default), all {plural_name} are
selected.
row, col: int or None (default None)
Subplot row and column index of {plural_name} to select.
To select {plural_name} by row and column, the Figure must have been
created using plotly.subplots.make_subplots. To select only those
{singular_name} that are in paper coordinates, set row and col to the
string 'paper'. If None (the default), all {plural_name} are selected.
secondary_y: boolean or None (default None)
* If True, only select {plural_name} associated with the secondary
y-axis of the subplot.
* If False, only select {plural_name} associated with the primary
y-axis of the subplot.
* If None (the default), do not filter {plural_name} based on secondary
y-axis.
To select {plural_name} by secondary y-axis, the Figure must have been
created using plotly.subplots.make_subplots. See the docstring
for the specs argument to make_subplots for more info on
creating subplots with secondary y-axes.
**kwargs
Additional property updates to apply to each selected {singular_name}. If
a property is specified in both patch and in **kwargs then the
one in **kwargs takes precedence.
Returns
-------
self
Returns the Figure object that the method was called on
\"\"\"
for obj in self._select_annotations_like(
prop='{plural_name}',
selector=selector,
row=row,
col=col,
secondary_y=secondary_y,
):
obj.update(patch, **kwargs)
return self
"""
)
# Add layout array items
buffer.write(
f"""
def add_{method_prefix}{singular_name}(self"""
)
add_constructor_params(
buffer,
node.child_datatypes,
prepend_extras=["arg"],
append_extras=["row", "col", "secondary_y"],
)
prepend_extras = [
(
"arg",
f"instance of {node.name_datatype_class} or dict with "
"compatible properties",
)
]
append_extras = [
("row", f"Subplot row for {singular_name}"),
("col", f"Subplot column for {singular_name}"),
("secondary_y", f"Whether to add {singular_name} to secondary y-axis"),
]
add_docstring(
buffer,
node,
header=f"Create and add a new {singular_name} to the figure's layout",
prepend_extras=prepend_extras,
append_extras=append_extras,
return_type=fig_classname,
)
# #### Function body ####
buffer.write(
f"""
from plotly.graph_objs import layout as _layout
new_obj = _layout.{node.name_datatype_class}(arg,
"""
)
for i, subtype_node in enumerate(node.child_datatypes):
subtype_prop_name = subtype_node.name_property
buffer.write(
f"""
{subtype_prop_name}={subtype_prop_name},"""
)
buffer.write("""**kwargs)""")
buffer.write(
f"""
return self._add_annotation_like(
'{singular_name}',
'{plural_name}',
new_obj,
row=row,
col=col,
secondary_y=secondary_y,
)"""
)
# Return source string
# --------------------
buffer.write("\n")
return buffer.getvalue()
def write_figure_classes(
outdir,
trace_node,
data_validator,
layout_validator,
frame_validator,
subplot_nodes,
layout_array_nodes,
):
"""
Construct source code for the Figure and FigureWidget classes and
write to graph_objs/_figure.py and graph_objs/_figurewidget.py
respectively
Parameters
----------
outdir : str
Root outdir in which the graph_objs package should reside
trace_node : PlotlyNode
Root trace node (the node that is the parent of all of the
individual trace nodes like bar, scatter, etc.)
data_validator : BaseDataValidator
DataValidator instance
layout_validator : CompoundValidator
LayoutValidator instance
frame_validator : CompoundArrayValidator
FrameValidator instance
subplot_nodes: list of PlotlyNode
List of names of all of the layout subplot properties
layout_array_nodes: list of PlotlyNode
List of array nodes under layout that can be positioned using xref/yref
Returns
-------
None
"""
# Validate inputs
# ---------------
if trace_node.node_path:
raise ValueError(
f"Expected root trace node.\n"
f'Received node with path "{trace_node.path_str}"'
)
# Loop over figure types
# ----------------------
base_figures = [
("basewidget", "BaseFigureWidget", "FigureWidget"),
("basedatatypes", "BaseFigure", "Figure"),
]
for base_package, base_classname, fig_classname in base_figures:
# ### Build figure source code string ###
figure_source = build_figure_py(
trace_node,
base_package,
base_classname,
fig_classname,
data_validator,
layout_validator,
frame_validator,
subplot_nodes,
layout_array_nodes,
)
# ### Format and write to file###
filepath = opath.join(outdir, "graph_objs", f"_{fig_classname.lower()}.py")
write_source_py(figure_source, filepath)
|
|
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import command
import match
import random
import string_utils
import tokenizer
import world
from verb import verb
from ansi import Style, Fore, Back
DEFAULT_DEBUG = False
MIN_WRAP = 40
DEFAULT_WRAP = 80
class Root(world.Object):
def __init__(self):
super().__init__()
self.description = 'Nothing out of the ordinary.'
def title(self):
return self.name
def look_self(self, *args, **kwargs):
return self.description
def tell(self, v):
if self.is_player:
if type(v) is list:
for s in v:
print(s)
else:
print(v)
def moveto(self, where):
world.move(self, where)
class Exit(Root):
def __init__(self):
super().__init__()
self.other_side = None
self.dur = 10
def invoke(self, what, *args, **kwargs):
if self.other_side:
what.moveto(self.other_side)
return self.dur
def look_self(self, *args, **kwargs):
if self.other_side:
d = self.other_side.render(*args, **kwargs)
mobs = self.other_side.render_mobs(*args, **kwargs)
if mobs:
d.append('')
d += mobs
return d
class Thing(Root):
def __init__(self):
super().__init__()
def title(self):
s = self.name
if s.startswith(('a', 'e', 'i', 'u', 'o', 'A', 'E', 'I', 'U', 'O')):
return 'an ' + self.name
else:
return 'a ' + self.name
class Player(Root):
def __init__(self):
super().__init__()
self.debug = DEFAULT_DEBUG
self.wrap = DEFAULT_WRAP
self.wielded = None
@verb('@wrap', ('any', 'none', 'none'))
def set_wrap(self, *args, **kwargs):
dobjstr = kwargs['dobjstr']
if dobjstr and dobjstr.isnumeric():
v = int(dobjstr)
if v < MIN_WRAP:
self.tell('Minimum wrap is %i characters.' % MIN_WRAP)
else:
self.wrap = v
self.tell('Wrap set to %i characters.' % self.wrap)
@verb('@debug', ('any', 'none', 'none'))
def set_debug(self, *args, **kwargs):
dobjstr = kwargs['dobjstr']
if dobjstr == 'on':
self.debug = True
self.tell('Debug output is on')
elif dobjstr == 'off':
self.debug = False
self.tell('Debug output is off.')
@verb('l*ook', ('none', 'none', 'none'))
def look_around(self, *args, **kwargs):
if self.location:
d = self.location.render(*args, **kwargs)
mobs = self.location.render_mobs(*args, **kwargs)
if mobs:
d.append('')
d += mobs
things = self.location.render_things(*args, **kwargs)
if things:
d.append('')
d += things
exits = self.location.render_exits(*args, **kwargs)
if exits:
d.append('')
d += exits
self.tell(d)
else:
self.tell("You are nowhere.")
@verb('l*ook', ('any', 'none', 'none'))
def look_thing(self, *args, **kwargs):
player, thing = kwargs['player'], kwargs['dobj']
if thing:
player.tell(thing.look_self(*args, **kwargs))
else:
player.tell("There is no `%s' here." % kwargs['dobjstr'])
@verb('u*p d*own e*ast w*est n*orth s*outh ne northe*ast nw northw*est se southe*ast sw southw*est', ('none', 'none', 'none'))
def go_direction(self, *args, **kwargs):
kwargs['dobjstr'] = kwargs['verb']
return self.go(self, *args, **kwargs)
@verb('g*o', ('any', 'none', 'none'))
def go(self, *args, **kwargs):
dobjstr = kwargs['dobjstr']
exit = match.object(dobjstr, self.location.exits)
if exit is match.Ambiguous:
player.tell("I'm not sure which way `%s' you mean." % dobjstr)
elif exit:
dur = exit.invoke(self, *args, **kwargs)
player.look_around(*args, **kwargs)
return dur
else:
player.tell("You can't go that way.")
@verb('k*ill', ('any', 'none', 'none'))
def kill(self, *args, **kwargs):
player, dobj, dobjstr = kwargs['player'], kwargs['dobj'], kwargs['dobjstr']
if dobj:
if dobj == player:
self.kill_self(*args, **kwargs)
else:
player.tell("You attack %s!" % dobj.name)
elif kwargs['dobjstr']:
player.tell("There is no `%s' here." % dobjstr)
else:
player.tell("Kill what?")
def kill_self(self, *args, **kwargs):
player = kwargs['player']
if self.wielded:
player.tell("You are not THAT desperate!")
else:
player.tell("You try to strangle yourself but that doesn't really work.")
@verb('h*elp', ('any', 'any', 'any'))
def help(self, *args, **kwargs):
player = kwargs['player']
player.tell("Unfortunately there is nobody here to help you right now.")
class Room(Root):
def __init__(self):
super().__init__()
self.coords = None
self.area_icon = Fore.WHITE + '. ' + Style.RESET_ALL
self.map_icon = Fore.WHITE + Style.BRIGHT + '[]' + Style.RESET_ALL
self.exits = []
def render_map(self):
# Location should be an Area instance
return self.location.render_map(self.coords, render_player=True)
def render_description(self, *args, **kwargs):
return self.description
def render_name(self, *args, **kwargs):
return Style.BRIGHT + self.name + Style.RESET_ALL
def render_exits(self, *args, **kwargs):
player = kwargs['player']
if not self.exits:
return []
s = Fore.CYAN + '[ exits: '
for e in self.exits:
s += Style.BRIGHT + e.name + ' '
s += Style.NORMAL + ']'
return string_utils.wrap_to_lines(s, player.wrap)
def render(self, *args, **kwargs):
player = kwargs['player']
lines = []
i0 = 0
m = self.render_map()
if len(self.name) > 0:
lines.append(m[0] + ' ' + self.render_name(*args, **kwargs))
i0 = 1
# TODO: Fix this so we can pass in map size
rest = self.render_description(*args, **kwargs)
max_len = player.wrap - (6 * 2)
for i in range(i0, len(m)):
l = m[i]
if len(rest) > 0:
s, rest = string_utils.wrap(rest, max_len)
lines.append(l + ' ' + s)
else:
lines.append(l)
if len(rest) > 0:
s, rest = string_utils.wrap(rest, max_len)
lines.append(6 * ' ' + s)
max_len = player.wrap
while len(rest) > 0:
s, rest = string_utils.wrap(rest, max_len)
lines.append(s)
return lines
def render_things(self, *args, **kwargs):
player = kwargs['player']
things = [x.title() for x in self.contents if isinstance(x, Thing)]
if not things:
return []
d = "You see %s on the floor." % string_utils.english_list(things)
return string_utils.wrap_to_lines(d, player.wrap)
def render_mobs(self, *args, **kwargs):
player = kwargs['player']
mobs = [x for x in self.contents if isinstance(x, Mob)]
if not mobs:
return []
d = ""
groups = {}
for m in mobs:
doing = m.doing()
if not doing in groups:
groups[doing] = []
groups[doing].append(m.title())
for doing in groups:
actors = groups[doing]
verb = "is"
if len(actors) > 1:
verb = "are"
d += "%s %s %s here. " % (string_utils.english_list(actors), verb, doing)
return string_utils.wrap_to_lines(d, player.wrap)
class Area(Root):
def __init__(self):
super().__init__()
self.levels = {}
def render_map(self, origin, render_player=False):
xo, yo, zo = origin
m = [[] for y in range(5)]
level_rooms, level_map = self.levels[zo]
roomo = level_map[yo][xo]
for row in range(5):
for col in range(5):
y = yo + row - (5 // 2)
x = xo + col - (5 // 2)
m[row].append(roomo.area_icon)
if x >= 0 and y >= 0 and level_map[y][x]:
icon = level_map[y][x].map_icon
if x == xo and y == yo and render_player:
icon = Style.BRIGHT + Back.BLUE + Fore.WHITE + '()' + Style.RESET_ALL
m[row][col] = icon
return [''.join(xs) for xs in m]
def update(self):
self.levels = {}
mapped_room = lambda x: hasattr(x, 'coords') and x.coords
rooms = [x for x in self.contents if mapped_room]
for r in rooms:
x, y, z = r.coords
if not z in self.levels:
level_map = [[None for y in range(64)] for x in range(64)]
self.levels[z] = ([], level_map)
level_rooms, level_map = self.levels[z]
level_map[y][x] = r
level_rooms.append(r)
class Actor(Root):
def __init__(self, q):
super().__init__()
self.q = q
self.dt = 0
def act(self, dur, *args, **kwargs):
self.dt += dur
c = self.dt // self.q
self.dt = self.dt % self.q
return self.perform(c, *args, **kwargs)
def perform(self, count, *args, **kwargs):
return []
class Mob(Actor):
def __init__(self, name, q):
super().__init__(q)
self.name = name
self.doing_msg = 'standing'
self.q = q
self.dt = 0
def doing(self):
return self.doing_msg
def title(self):
return self.name
class Alana(Mob):
def __init__(self):
super().__init__('Alana', 3)
self.description = 'A big (for a cat at least) orange furball. He looks up at you curiously.'
self.doing_msg = 'walking around'
def perform(self, count, *args, **kwargs):
if count > 0:
return [lambda: self.move_around(*args, **kwargs)]
def move_around(self, *args, **kwargs):
player = kwargs['player']
if not self.location and self.location.exits:
return
if random.randint(0, 10) < 3:
i = random.randint(0, len(self.location.exits) - 1)
exit = self.location.exits[i]
return exit.invoke(self) # TODO: incorporate duration
class Rat(Mob):
def __init__(self):
super().__init__('furry rat', 10)
self.description = 'A small furry rat.'
self.doing_msg = 'scurrying around'
def title(self):
return 'a furry rat'
class World:
def __init__(self):
self.mobs = []
def parse(player, s):
tokens = tokenizer.tokenize(s)
cmd = command.parse(tokens)
return world.resolve(player, cmd)
def execute(cmd, player, actors=[]):
if callable(cmd['f']):
args = cmd['args']
cmd.update({'player': player})
dur = cmd['f'](*args, **cmd)
if dur and dur > 0:
actions = []
for actor in actors:
actions += actor.act(dur, *args, **cmd)
# Spice things up so not all mob actions execute in order
random.shuffle(actions)
for action in actions:
action()
else:
player.tell("That's not something you can do right now.")
def prompt():
return Style.BRIGHT + Fore.CYAN + "> " + Style.RESET_ALL
def loop(actors=[]):
while True:
s = input(prompt())
if not s:
continue
cmd = parse(player, s)
if player.debug:
print(cmd)
if cmd['verb'] == '@quit':
print("Zoning out...")
break
r = execute(cmd, player, actors)
foo = Thing()
foo.name = 'rusty nail'
foo.aliases = {'nail'}
foo.description = "A rusty casing nail. It's a little crooked."
bar = Thing()
bar.name = 'orange'
bar.description = "It's covered in mold. It's probably a bad idea to eat this."
alana = Alana()
rat = Rat()
rat.aliases = {'rat'}
player = Player()
player.is_player = True
DESTROYED_BUILDING = 'The foundation and a few walls remain but otherwise this building is completely destroyed.'
DAMP_CELLAR = 'The cellar is dark, damp and downright unpleasant.'
area = Area()
room = Room()
room.coords = (10, 10, 0)
room.name = 'Destroyed Building'
room.description = DESTROYED_BUILDING
world.move(room, area)
r1 = room
room = Room()
room.coords = (11, 10, 0)
room.name = "Vanity's Shack"
room.description = 'A small wooden shack seems a bit out of place.'
room.map_icon = Back.BLUE + Fore.YELLOW + Style.BRIGHT + 'Va' + Style.RESET_ALL
world.move(room, area)
r2 = room
exit = Exit()
exit.other_side = r2
exit.name = 'east'
r1.exits.append(exit)
exit.moveto(r1)
exit = Exit()
exit.other_side = r1
exit.name = 'west'
r2.exits.append(exit)
exit.moveto(r2)
room = Room()
room.coords = (11, 11, 0)
room.name = 'Destroyed Building'
room.description = DESTROYED_BUILDING
world.move(room, area)
world.move(alana, room)
world.move(rat, room)
r3 = room
exit = Exit()
exit.other_side = r3
exit.name = 'south'
r2.exits.append(exit)
exit.moveto(r2)
exit = Exit()
exit.other_side = r2
exit.name = 'north'
r3.exits.append(exit)
exit.moveto(r3)
room = Room()
room.coords = (9, 9, 0)
room.map_icon = Fore.YELLOW + '##' + Style.RESET_ALL
world.move(room, area)
room = Room()
room.coords = (10, 9, 0)
room.map_icon = Fore.YELLOW + '==' + Style.RESET_ALL
world.move(room, area)
room = Room()
room.coords = (11, 9, 0)
room.map_icon = Fore.YELLOW + '==' + Style.RESET_ALL
world.move(room, area)
room = Room()
room.coords = (12, 9, 0)
room.map_icon = Fore.YELLOW + '==' + Style.RESET_ALL
world.move(room, area)
room = Room()
room.coords = (9, 10, 0)
room.map_icon = Fore.YELLOW + '||' + Style.RESET_ALL
world.move(room, area)
room = Room()
room.coords = (9, 11, 0)
room.map_icon = Fore.YELLOW + '||' + Style.RESET_ALL
world.move(room, area)
room = Room()
room.coords = (9, 12, 0)
room.map_icon = Fore.YELLOW + '||' + Style.RESET_ALL
world.move(room, area)
room = Room()
room.coords = (0, 12, -1)
room.name = 'Damp Cellar'
room.description = DAMP_CELLAR
room.map_icon = '[]'
world.move(room, area)
world.move(player, room)
world.move(foo, room)
world.move(bar, room)
r4 = room
exit = Exit()
exit.name = 'up'
exit.other_side = r3
r4.exits.append(exit)
exit.moveto(r4)
exit = Exit()
exit.name = 'down'
exit.other_side = r4
r3.exits.append(exit)
exit.moveto(r3)
room = Room()
room.coords = (1, 12, -1)
room.name = 'Damp Cellar'
room.description = DAMP_CELLAR
room.map_icon = Style.BRIGHT + Fore.WHITE + Back.MAGENTA + "BR" + Style.RESET_ALL
world.move(room, area)
r5 = room
exit = Exit()
exit.name = 'east'
exit.other_side = r5
r4.exits.append(exit)
exit.moveto(r4)
exit = Exit()
exit.name = 'west'
exit.other_side = r4
r5.exits.append(exit)
exit.moveto(r5)
room = Room()
room.coords = (1, 12, -2)
room.name = 'The Pit'
room.map_icon = 'XX'
room.area_icon = '//'
world.move(room, area)
r6 = room
exit = Exit()
exit.name = 'down'
exit.other_side = r6
r5.exits.append(exit)
exit.moveto(r5)
exit = Exit()
exit.name = 'up'
exit.other_side = r5
r6.exits.append(exit)
exit.moveto(r6)
area.update()
if __name__ == '__main__':
loop([alana])
|
|
""":mod:`linkage.consumer` --- High level Amazon Kinesis Streams consumer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from gevent import monkey
monkey.patch_all() # noqa: E402
import os.path
import sys
import pkgutil
import gevent
import signal
from datetime import timedelta, datetime
from typing import Any, Callable, List, Optional
import boto3
from gevent import Greenlet
from gevent.pool import Group
from werkzeug.datastructures import ImmutableDict
from typeguard import typechecked
from .config import ConfigAttribute, Config
from .checkpointer import Checkpointer, InMemoryCheckpointer
from .ctx import ConsumerContext, has_shard_context
from .streams import KinesisStream, KinesisShard
from .globals import shard as current_shard
from .helpers import locked_cached_property
from .logging import create_logger, _Logger
# a singleton sentinel value for parameter defaults
_sentinel = object()
def _get_root_path(import_name: str) -> str:
"""
Returns the path to a package or cwd if that cannot be found
"""
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
loader = pkgutil.get_loader(import_name)
if loader is None or import_name == '__main__':
return os.getcwd()
filepath: str = loader.get_filename(import_name)
return os.path.dirname(os.path.abspath(filepath))
def _make_timedelta(value: Any) -> timedelta:
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
class Consumer(object):
#: The debug flag
#:
#: This attribute can also be configured from the config with the ``DEBUG``
#: configuration key. Defaults to ``False``.
debug: ConfigAttribute = ConfigAttribute('DEBUG')
#: A :class:`~datetime.timedelta` which is used as
#: shard iterator interval.
#:
#: This attribute can also be configured from the config with
#: ``SHARD_ITERATOR_INTERVAL`` configuration key. Defaults to
#: ``timedelta(seconds=1)``
shard_iterator_interval: ConfigAttribute = ConfigAttribute(
'SHARD_ITERATOR_INTERVAL',
get_converter=_make_timedelta
)
#: A :class:`~datetime.timedelta` which is used as
#: shard monitoring interval.
#:
#: This attribute can also be configured from the config with
#: ``SHARD_MONITORING_INTERVAL`` configuration key. Defaults to
#: ``timedelta(hours=1)``
shard_monitoring_interval: ConfigAttribute = ConfigAttribute(
'SHARD_MONITORING_INTERVAL',
get_converter=_make_timedelta
)
#: A :class:`~datetime.timedelta` which is used as overhang interval.
#:
#: This attribute can also be configured from the config with
#: ``PROTRACTOR_OVERHANG_INTERVAL`` configuration key. Defaults to
#: ``timedelta(seconds=30)``
protractor_overhang_interval: ConfigAttribute = ConfigAttribute(
'PROTRACTOR_OVERHANG_INTERVAL',
get_converter=_make_timedelta
)
#: Default configuration parameters.
__default_config: ImmutableDict = ImmutableDict({
'DEBUG': False,
'STREAM_REGION': 'ap-south-1',
'STREAM_NAME': None,
'SHARD_ITERATOR_TYPE': 'TRIM_HORIZON',
'SHARD_READ_LIMIT': 50,
'SHARD_ITERATOR_INTERVAL': timedelta(seconds=1),
'SHARD_MONITORING_INTERVAL': timedelta(hours=1),
'PROTRACTOR_ENABLE': False,
'PROTRACTOR_OVERHANG_INTERVAL': timedelta(seconds=30),
'LOGGER_HANDLER_POLICY': 'always',
'LOG_ROLLOVER': 'd',
'LOG_INTERVAL': 1,
'LOG_BACKUP_COUNT': 2,
'BUCKET_SIZE_LIMIT': 10000,
'BUCKET_COUNT_LIMIT': 120,
})
#: The name of the package or module that this consumer belongs to.
#: Do not change this once it is set by the constructor.
import_name: str = None
#: Absolute path to the package on the filesystem.
root_path: str = None
def __init__(self,
import_name: str,
root_path: str = None,
stream_region: str = None,
stream_name: str = None,
log_folder: str = 'log',
checkpointer: Checkpointer = None) -> None:
self.import_name = import_name
if root_path is None:
root_path = _get_root_path(import_name)
self.root_path = root_path
self.log_folder = log_folder
#: The configuration directory as :class:`Config`.
self.config = Config(self.root_path, self.__default_config)
if stream_region is not None:
self.config['STREAM_REGION'] = stream_region
if stream_name is not None:
self.config['STREAM_NAME'] = stream_name
#:
self.checkpointer = checkpointer
if self.checkpointer is None:
self.checkpointer = InMemoryCheckpointer()
#: A list of functions that will be called at the bucket is full.
self.__transform_funcs = []
#: A list of functions that should be called after transform.
self.__after_consume_func = []
#: A list of functions that are called when the consumer context
#: is destroyed. Since the consumer context is also torn down
self.__teardown_consumer_func = []
#:
self.__threads = Group()
self.shards = set()
@locked_cached_property
def name(self) -> str:
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@locked_cached_property
def logger(self) -> _Logger:
return create_logger(self)
@locked_cached_property
def kinesis_client(self):
return boto3.client('kinesis',
region_name=self.config['STREAM_REGION'])
@typechecked
def transform(self, func: Callable[[List[Any],
str,
str,
datetime],
List[Any]]) -> Callable:
self.__transform_funcs.append(func)
return func
@typechecked
def after_consume(self, func: Callable[[Optional[List[Any]],
str,
Optional[str],
Optional[datetime]],
None]) -> Callable:
self.__after_consume_func.append(func)
return func
@typechecked
def teardown_consumer(self, func: Callable[[Any], None]) -> Callable:
self.__teardown_consumer_func.append(func)
return func
@typechecked
def do_transform(self,
data: List[Any],
shard_id: str,
last_sequence_number: str,
last_arrival_timestamp: datetime) -> List[Any]:
for func in reversed(self.__transform_funcs):
data = func(
data,
shard_id,
last_sequence_number,
last_arrival_timestamp
)
return data
@typechecked
def do_after_consume(self,
data: Optional[List[Any]],
shard_id: str,
last_sequence_number: Optional[str],
last_arrival_timestamp: Optional[datetime]) -> None:
for func in reversed(self.__after_consume_func):
func(
data,
shard_id,
last_sequence_number,
last_arrival_timestamp
)
@typechecked
def do_teardown_consumer(self, exc=_sentinel) -> None:
if exc is _sentinel:
exc = sys.exc_info()[1]
for func in reversed(self.__teardown_consumer_func):
func(exc)
def handle_shard_exception(self, e) -> None:
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
self.log_exception((exc_type, exc_value, tb))
def handle_exception(self, e) -> None:
exc_type, exc_value, tb = sys.exc_info()
self.log_exception((exc_type, exc_value, tb))
def log_exception(self, exc_info) -> None:
if has_shard_context():
self.logger.error(
'Exception on {0}'.format(current_shard.id),
exc_info=exc_info
)
else:
self.logger.error(
'Exception', exc_info=exc_info
)
def get_context(self) -> ConsumerContext:
return ConsumerContext(self)
def get_stream(self) -> KinesisStream:
return KinesisStream(self.kinesis_client.describe_stream(
StreamName=self.config['STREAM_NAME']
))
def dispatch(self) -> None:
stream = self.get_stream()
if stream.status == 'ACTIVE':
gevent.signal(signal.SIGQUIT, gevent.killall)
shards = stream.get_shards(self)
for shard in shards:
self.spawn_shard(shard)
self.__threads.start(ShardMonitor(self))
self.__threads.join()
else:
sys.exit()
def spawn_shard(self, shard: KinesisShard) -> None:
self.__threads.start(shard)
self.shards.add(shard)
def close_shard(self, shard: KinesisShard) -> None:
self.logger.warn('Stream \'{0}\' Shard \'{1}\' closed'.format(
self.config['STREAM_NAME'], shard.id
))
self.shards.remove(shard)
def process(self, debug=None) -> None:
if debug is not None:
self.debug = bool(debug)
ctx = self.get_context()
error = None
try:
try:
ctx.push()
self.dispatch()
except Exception as e:
error = e
self.handle_exception(e)
finally:
ctx.pop(error)
def __repr__(self) -> str:
return '<{0!s} {1!r} - \'{2!s}\'>'.format(
self.__class__.__name__,
self.name,
self.config['STREAM_NAME']
)
class ShardMonitor(Greenlet):
def __init__(self, consumer: Consumer):
super().__init__()
self.consumer = consumer
self.interval = self.consumer.shard_monitoring_interval
self.running = False
def _run(self):
self.running = True
while self.running:
stream = self.consumer.get_stream()
shards = set(
{shard for shard in stream.get_shards(self.consumer)}
)
self.consumer.logger.warn(
'Monitoring shards: Consumer({0}), AWS({1})'.format(
self.consumer.shards,
shards
))
diff_shards = shards - self.consumer.shards
if len(diff_shards) > 0:
self.consumer.logger.warn(
'Stream {0} Spawning New Shards {1}'.format(
self.consumer.config['STREAM_NAME'],
len(diff_shards)
)
)
for shard in diff_shards:
self.consumer.spawn_shard(shard)
gevent.sleep(seconds=self.interval.seconds)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Flurbo Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:FlurboTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_FLURBOD' not in vars():
ENABLE_FLURBOD=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "FLURBOD" not in os.environ:
os.environ["FLURBOD"] = buildDir + '/src/flurbod' + EXEEXT
if "FLURBOCLI" not in os.environ:
os.environ["FLURBOCLI"] = buildDir + '/src/flurbo-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/flurbo/flurbo/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/flurbo/flurbo/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_FLURBOD == 1):
print("No rpc tests to run. Wallet, utils, and flurbod must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'decodescript.py',
'p2p-fullblocktest.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'pruning.py',
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `flurbo-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
|
"""Tests for the Google Assistant integration."""
from homeassistant.components.google_assistant import helpers
from tests.async_mock import MagicMock
def mock_google_config_store(agent_user_ids=None):
"""Fake a storage for google assistant."""
store = MagicMock(spec=helpers.GoogleConfigStore)
if agent_user_ids is not None:
store.agent_user_ids = agent_user_ids
else:
store.agent_user_ids = {}
return store
class MockConfig(helpers.AbstractConfig):
"""Fake config that always exposes everything."""
def __init__(
self,
*,
secure_devices_pin=None,
should_expose=None,
should_2fa=None,
entity_config=None,
hass=None,
local_sdk_webhook_id=None,
local_sdk_user_id=None,
enabled=True,
agent_user_ids=None,
):
"""Initialize config."""
super().__init__(hass)
self._should_expose = should_expose
self._should_2fa = should_2fa
self._secure_devices_pin = secure_devices_pin
self._entity_config = entity_config or {}
self._local_sdk_webhook_id = local_sdk_webhook_id
self._local_sdk_user_id = local_sdk_user_id
self._enabled = enabled
self._store = mock_google_config_store(agent_user_ids)
@property
def enabled(self):
"""Return if Google is enabled."""
return self._enabled
@property
def secure_devices_pin(self):
"""Return secure devices pin."""
return self._secure_devices_pin
@property
def entity_config(self):
"""Return secure devices pin."""
return self._entity_config
@property
def local_sdk_webhook_id(self):
"""Return local SDK webhook id."""
return self._local_sdk_webhook_id
@property
def local_sdk_user_id(self):
"""Return local SDK webhook id."""
return self._local_sdk_user_id
def get_agent_user_id(self, context):
"""Get agent user ID making request."""
return context.user_id
def should_expose(self, state):
"""Expose it all."""
return self._should_expose is None or self._should_expose(state)
def should_2fa(self, state):
"""Expose it all."""
return self._should_2fa is None or self._should_2fa(state)
BASIC_CONFIG = MockConfig()
DEMO_DEVICES = [
{
"id": "light.kitchen_lights",
"name": {"name": "Kitchen Lights"},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Brightness",
"action.devices.traits.ColorSetting",
],
"type": "action.devices.types.LIGHT",
"willReportState": False,
},
{
"id": "switch.ac",
"name": {"name": "AC"},
"traits": ["action.devices.traits.OnOff"],
"type": "action.devices.types.OUTLET",
"willReportState": False,
},
{
"id": "switch.decorative_lights",
"name": {"name": "Decorative Lights"},
"traits": ["action.devices.traits.OnOff"],
"type": "action.devices.types.SWITCH",
"willReportState": False,
},
{
"id": "light.ceiling_lights",
"name": {
"name": "Roof Lights",
"nicknames": ["Roof Lights", "top lights", "ceiling lights"],
},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Brightness",
"action.devices.traits.ColorSetting",
],
"type": "action.devices.types.LIGHT",
"willReportState": False,
},
{
"id": "light.bed_light",
"name": {"name": "Bed Light"},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Brightness",
"action.devices.traits.ColorSetting",
],
"type": "action.devices.types.LIGHT",
"willReportState": False,
},
{
"id": "cover.living_room_window",
"name": {"name": "Living Room Window"},
"traits": ["action.devices.traits.OpenClose"],
"type": "action.devices.types.BLINDS",
"willReportState": False,
},
{
"id": "cover.hall_window",
"name": {"name": "Hall Window"},
"traits": ["action.devices.traits.OpenClose"],
"type": "action.devices.types.BLINDS",
"willReportState": False,
},
{
"id": "cover.garage_door",
"name": {"name": "Garage Door"},
"traits": ["action.devices.traits.OpenClose"],
"type": "action.devices.types.GARAGE",
"willReportState": False,
},
{
"id": "cover.kitchen_window",
"name": {"name": "Kitchen Window"},
"traits": ["action.devices.traits.OpenClose"],
"type": "action.devices.types.BLINDS",
"willReportState": False,
},
{
"id": "media_player.bedroom",
"name": {"name": "Bedroom"},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Volume",
"action.devices.traits.Modes",
"action.devices.traits.TransportControl",
"action.devices.traits.MediaState",
],
"type": "action.devices.types.SETTOP",
"willReportState": False,
},
{
"id": "media_player.living_room",
"name": {"name": "Living Room"},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Volume",
"action.devices.traits.Modes",
"action.devices.traits.TransportControl",
"action.devices.traits.MediaState",
],
"type": "action.devices.types.SETTOP",
"willReportState": False,
},
{
"id": "media_player.lounge_room",
"name": {"name": "Lounge room"},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Modes",
"action.devices.traits.TransportControl",
"action.devices.traits.MediaState",
],
"type": "action.devices.types.SETTOP",
"willReportState": False,
},
{
"id": "media_player.walkman",
"name": {"name": "Walkman"},
"traits": [
"action.devices.traits.OnOff",
"action.devices.traits.Volume",
"action.devices.traits.Modes",
"action.devices.traits.TransportControl",
"action.devices.traits.MediaState",
],
"type": "action.devices.types.SETTOP",
"willReportState": False,
},
{
"id": "fan.living_room_fan",
"name": {"name": "Living Room Fan"},
"traits": ["action.devices.traits.FanSpeed", "action.devices.traits.OnOff"],
"type": "action.devices.types.FAN",
"willReportState": False,
},
{
"id": "fan.ceiling_fan",
"name": {"name": "Ceiling Fan"},
"traits": ["action.devices.traits.FanSpeed", "action.devices.traits.OnOff"],
"type": "action.devices.types.FAN",
"willReportState": False,
},
{
"id": "climate.hvac",
"name": {"name": "Hvac"},
"traits": ["action.devices.traits.TemperatureSetting"],
"type": "action.devices.types.THERMOSTAT",
"willReportState": False,
"attributes": {
"availableThermostatModes": "off,heat,cool,heatcool,auto,dry,fan-only",
"thermostatTemperatureUnit": "C",
},
},
{
"id": "climate.heatpump",
"name": {"name": "HeatPump"},
"traits": ["action.devices.traits.TemperatureSetting"],
"type": "action.devices.types.THERMOSTAT",
"willReportState": False,
},
{
"id": "climate.ecobee",
"name": {"name": "Ecobee"},
"traits": ["action.devices.traits.TemperatureSetting"],
"type": "action.devices.types.THERMOSTAT",
"willReportState": False,
},
{
"id": "humidifier.humidifier",
"name": {"name": "Humidifier"},
"traits": [
"action.devices.traits.HumiditySetting",
"action.devices.traits.OnOff",
],
"type": "action.devices.types.HUMIDIFIER",
"willReportState": False,
"attributes": {"humiditySetpointRange": {"minPercent": 0, "maxPercent": 100}},
},
{
"id": "humidifier.dehumidifier",
"name": {"name": "Dehumidifier"},
"traits": [
"action.devices.traits.HumiditySetting",
"action.devices.traits.OnOff",
],
"type": "action.devices.types.DEHUMIDIFIER",
"willReportState": False,
"attributes": {"humiditySetpointRange": {"minPercent": 0, "maxPercent": 100}},
},
{
"id": "humidifier.hygrostat",
"name": {"name": "Hygrostat"},
"traits": [
"action.devices.traits.HumiditySetting",
"action.devices.traits.Modes",
"action.devices.traits.OnOff",
],
"type": "action.devices.types.HUMIDIFIER",
"willReportState": False,
"attributes": {"humiditySetpointRange": {"minPercent": 0, "maxPercent": 100}},
},
{
"id": "lock.front_door",
"name": {"name": "Front Door"},
"traits": ["action.devices.traits.LockUnlock"],
"type": "action.devices.types.LOCK",
"willReportState": False,
},
{
"id": "lock.kitchen_door",
"name": {"name": "Kitchen Door"},
"traits": ["action.devices.traits.LockUnlock"],
"type": "action.devices.types.LOCK",
"willReportState": False,
},
{
"id": "lock.openable_lock",
"name": {"name": "Openable Lock"},
"traits": ["action.devices.traits.LockUnlock"],
"type": "action.devices.types.LOCK",
"willReportState": False,
},
{
"id": "alarm_control_panel.alarm",
"name": {"name": "Alarm"},
"traits": ["action.devices.traits.ArmDisarm"],
"type": "action.devices.types.SECURITYSYSTEM",
"willReportState": False,
},
]
|
|
"""
radish
~~~~~~
The root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by Timo Furrer <[email protected]>
:license: MIT, see LICENSE for more details.
"""
import pytest
from tagexpressions import parse
from radish.models import ScenarioOutline, Tag
def test_scenariooutline_set_feature_on_all_examples(mocker):
"""A ScenarioOutline should forward a set Feature to all its Examples"""
# given
feature_mock = mocker.MagicMock(name="Feature")
scenario = ScenarioOutline(
1, "Scenario Outline", "My ScenarioOutline", [], None, None, [], []
)
first_example = mocker.MagicMock(name="First Example")
second_example = mocker.MagicMock(name="Second Example")
scenario.examples = [first_example, second_example]
# when
scenario.set_feature(feature_mock)
# then
assert scenario.feature is feature_mock
first_example.set_feature.assert_called_once_with(feature_mock)
second_example.set_feature.assert_called_once_with(feature_mock)
def test_scenariooutline_set_background_on_all_examples(mocker):
"""A ScenarioOutline should forward a set Background to all its Examples"""
# given
background_mock = mocker.MagicMock(name="Background")
scenario = ScenarioOutline(
1, "Scenario Outline", "My ScenarioOutline", [], None, None, [], []
)
first_example = mocker.MagicMock(name="First Example")
second_example = mocker.MagicMock(name="Second Example")
scenario.examples = [first_example, second_example]
# when
scenario.set_background(background_mock)
# then
assert scenario.background is not None
first_example.set_background.assert_called_once_with(background_mock)
second_example.set_background.assert_called_once_with(background_mock)
def test_scenariooutline_set_rule_on_all_examples(mocker):
"""A ScenarioOutline should forward a set Rule to all its Examples"""
# given
rule_mock = mocker.MagicMock(name="Rule")
scenario = ScenarioOutline(
1, "Scenario Outline", "My ScenarioOutline", [], None, None, [], []
)
first_example = mocker.MagicMock(name="First Example")
second_example = mocker.MagicMock(name="Second Example")
scenario.examples = [first_example, second_example]
# when
scenario.set_rule(rule_mock)
# then
assert scenario.rule is rule_mock
first_example.set_rule.assert_called_once_with(rule_mock)
second_example.set_rule.assert_called_once_with(rule_mock)
def test_scenariooutline_build_examples_from_example_table():
"""A ScenarioOutline should build its Examples from the given Example Table"""
# given & when
scenario = ScenarioOutline(
1,
"Scenario Outline",
"My ScenarioOutline",
[],
None,
None,
[],
[{}, {}], # two empty examples
)
# then
assert len(scenario.examples) == 2
def test_scenariooutline_should_build_examples_with_info_in_short_description():
"""A ScenarioOutline should build its Examples with the Example Info in the short description"""
# given & when
scenario = ScenarioOutline(
1,
"Scenario Outline",
"My ScenarioOutline",
[],
None,
None,
[],
[{"foo": "bar", "bla": "meh"}, {"bar": "foo", "meh": "bla"}],
)
# then
assert (
scenario.examples[0].short_description
== "My ScenarioOutline [foo: bar, bla: meh]" # noqa
# Python 3.5 has no dict ordering
or scenario.examples[0].short_description # noqa
== "My ScenarioOutline [bla: meh, foo: bar]" # noqa
)
assert (
scenario.examples[1].short_description
== "My ScenarioOutline [bar: foo, meh: bla]" # noqa
# Python 3.5 has no dict ordering
or scenario.examples[1].short_description # noqa
== "My ScenarioOutline [meh: bla, bar: foo]" # noqa
)
def test_scenariooutline_should_build_examples_with_copied_steps(mocker):
"""A ScenarioOutline should build its Example with a copy of its own Steps"""
# given & when
scenario = ScenarioOutline(
1,
"Scenario Outline",
"My ScenarioOutline",
[],
None,
None,
[mocker.MagicMock(name="First Step"), mocker.MagicMock(name="Second Step")],
[{}, {}],
)
# then
assert len(scenario.examples[0].steps) == 2
assert len(scenario.examples[1].steps) == 2
assert scenario.steps is not scenario.examples[0].steps
assert scenario.steps is not scenario.examples[1].steps
def test_scenariooutline_should_build_examples_with_replaced_step_texts(mocker):
"""
A ScenarioOutline should build its Example with Step Texts that have the Example Info replaced
"""
# given & when
scenario = ScenarioOutline(
1,
"Scenario Outline",
"My ScenarioOutline",
[],
None,
None,
[
mocker.MagicMock(name="First Step", text="One <foo> Three"),
mocker.MagicMock(name="Second Step", text="Four <bar> Six"),
],
[{"foo": "Two", "bar": "Five"}, {"foo": "Zwei", "bar": "Fuenf"}],
)
# then
assert scenario.examples[0].steps[0].text == "One Two Three"
assert scenario.examples[0].steps[1].text == "Four Five Six"
assert scenario.examples[1].steps[0].text == "One Zwei Three"
assert scenario.examples[1].steps[1].text == "Four Fuenf Six"
@pytest.mark.parametrize(
"tagexpression, scenario_ids, expected_has_to_run",
[
(None, [], True),
(parse("tag-a"), [], True),
(parse("tag-c"), [], True),
(parse("tag-X"), [], False),
(None, [1], True),
(None, [3], True),
(None, [-1], False),
(parse("tag-a"), [-1], False),
(parse("tag-X"), [1], False),
(parse("tag-X"), [3], False),
(parse("tag-a"), [3], True),
],
ids=[
"no tagexpression, no scenario_ids => RUN",
"tagexpression match in Scenario Tags, no scenario_ids => RUN",
"tagexpression match in Feature Tags, no scenario_ids => RUN",
"tagexpression no match, no scenario_ids => NO RUN",
"no tagexpression, scenario_ids match => RUN",
"no tagexpression, scenario_ids match Example => RUN",
"no tagexpression, scenario_ids no match => NO RUN",
"tagexpression match, scenario_ids no match => NO RUN",
"tagexpression no match, scenario_ids match => NO RUN",
"tagexpression no match, scenario_ids match Example => NO RUN",
"tag expression match, scenario_ids match Example => RUN",
],
)
def test_scenario_should_correctly_evaluate_if_it_has_to_be_run(
mocker, tagexpression, scenario_ids, expected_has_to_run
):
"""Test that a Scenario should correctly evaluate if it has to be run or not"""
# given
feature_mock = mocker.MagicMock(tags=[Tag("tag-c", None, None)])
scenario = ScenarioOutline(
1,
"Scenario Outline",
"My ScenarioOutline",
[Tag("tag-a", None, None), Tag("tag-b", None, None)],
None,
None,
[],
[{"foo": "bar"}, {"foo": "meh"}],
)
scenario.set_feature(feature_mock)
# when
has_to_run = scenario.has_to_run(tagexpression, scenario_ids)
# then
assert has_to_run == expected_has_to_run
|
|
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod, abstractproperty
from errno import ENOENT
from functools import partial
from os import remove
from os.path import exists
import sqlite3
import warnings
from bcolz import (
carray,
ctable,
)
from collections import namedtuple
import logbook
import numpy as np
from numpy import (
array,
int64,
float64,
full,
iinfo,
integer,
issubdtype,
nan,
uint32,
zeros,
)
from pandas import (
DataFrame,
DatetimeIndex,
read_csv,
Timestamp,
NaT,
isnull,
)
from pandas.tslib import iNaT
from six import (
iteritems,
with_metaclass,
viewkeys,
)
from zipline.utils.functional import apply
from zipline.utils.preprocess import call
from zipline.utils.input_validation import (
coerce_string,
preprocess,
expect_element,
verify_indices_all_unique,
)
from zipline.utils.sqlite_utils import group_into_chunks
from zipline.utils.memoize import lazyval
from zipline.utils.cli import maybe_show_progress
from ._equities import _compute_row_slices, _read_bcolz_data
from ._adjustments import load_adjustments_from_sqlite
logger = logbook.Logger('UsEquityPricing')
OHLC = frozenset(['open', 'high', 'low', 'close'])
US_EQUITY_PRICING_BCOLZ_COLUMNS = (
'open', 'high', 'low', 'close', 'volume', 'day', 'id'
)
SQLITE_ADJUSTMENT_COLUMN_DTYPES = {
'effective_date': integer,
'ratio': float,
'sid': integer,
}
SQLITE_ADJUSTMENT_TABLENAMES = frozenset(['splits', 'dividends', 'mergers'])
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
'sid': integer,
'ex_date': integer,
'declared_date': integer,
'record_date': integer,
'pay_date': integer,
'amount': float,
}
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
'sid': integer,
'ex_date': integer,
'declared_date': integer,
'record_date': integer,
'pay_date': integer,
'payment_sid': integer,
'ratio': float,
}
UINT32_MAX = iinfo(uint32).max
class NoDataOnDate(Exception):
"""
Raised when a spot price can be found for the sid and date.
"""
pass
def check_uint32_safe(value, colname):
if value >= UINT32_MAX:
raise ValueError(
"Value %s from column '%s' is too large" % (value, colname)
)
@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def winsorise_uint32(df, invalid_data_behavior, column, *columns):
"""Drops any record where a value would not fit into a uint32.
Parameters
----------
df : pd.DataFrame
The dataframe to winsorise.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is outside the bounds of a uint32.
*columns : iterable[str]
The names of the columns to check.
Returns
-------
truncated : pd.DataFrame
``df`` with values that do not fit into a uint32 zeroed out.
"""
columns = list((column,) + columns)
mask = df[columns] > UINT32_MAX
if invalid_data_behavior != 'ignore':
mask |= df[columns].isnull()
else:
# we are not going to generate a warning or error for this so just use
# nan_to_num
df[columns] = np.nan_to_num(df[columns])
mv = mask.values
if mv.any():
if invalid_data_behavior == 'raise':
raise ValueError(
'%d values out of bounds for uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
)
if invalid_data_behavior == 'warn':
warnings.warn(
'Ignoring %d values because they are out of bounds for'
' uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
stacklevel=3, # one extra frame for `expect_element`
)
df[mask] = 0
return df
@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def to_ctable(raw_data, invalid_data_behavior):
if isinstance(raw_data, ctable):
# we already have a ctable so do nothing
return raw_data
winsorise_uint32(raw_data, invalid_data_behavior, 'volume', *OHLC)
processed = (raw_data[list(OHLC)] * 1000).astype('uint32')
dates = raw_data.index.values.astype('datetime64[s]')
check_uint32_safe(dates.max().view(np.int64), 'day')
processed['day'] = dates.astype('uint32')
processed['volume'] = raw_data.volume.astype('uint32')
return ctable.fromdataframe(processed)
class BcolzDailyBarWriter(object):
"""
Class capable of writing daily OHLCV data to disk in a format that can be
read efficiently by BcolzDailyOHLCVReader.
Parameters
----------
filename : str
The location at which we should write our output.
calendar : pandas.DatetimeIndex
Calendar to use to compute asset calendar offsets.
See Also
--------
zipline.data.us_equity_pricing.BcolzDailyBarReader
"""
_csv_dtypes = {
'open': float64,
'high': float64,
'low': float64,
'close': float64,
'volume': float64,
}
def __init__(self, filename, calendar):
self._filename = filename
self._calendar = calendar
@property
def progress_bar_message(self):
return "Merging daily equity files:"
def progress_bar_item_show_func(self, value):
return value if value is None else str(value[0])
def write(self,
data,
assets=None,
show_progress=False,
invalid_data_behavior='warn'):
"""
Parameters
----------
data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]]
The data chunks to write. Each chunk should be a tuple of sid
and the data for that asset.
assets : set[int], optional
The assets that should be in ``data``. If this is provided
we will check ``data`` against the assets and provide better
progress information.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional
What to do when data is encountered that is outside the range of
a uint32.
Returns
-------
table : bcolz.ctable
The newly-written table.
"""
ctx = maybe_show_progress(
((sid, to_ctable(df, invalid_data_behavior)) for sid, df in data),
show_progress=show_progress,
item_show_func=self.progress_bar_item_show_func,
label=self.progress_bar_message,
length=len(assets) if assets is not None else None,
)
with ctx as it:
return self._write_internal(it, assets)
def write_csvs(self,
asset_map,
show_progress=False,
invalid_data_behavior='warn'):
"""Read CSVs as DataFrames from our asset map.
Parameters
----------
asset_map : dict[int -> str]
A mapping from asset id to file path with the CSV data for that
asset
show_progress : bool
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is encountered that is outside the range of
a uint32.
"""
read = partial(
read_csv,
parse_dates=['day'],
index_col='day',
dtype=self._csv_dtypes,
)
return self.write(
((asset, read(path)) for asset, path in iteritems(asset_map)),
assets=viewkeys(asset_map),
show_progress=show_progress,
invalid_data_behavior=invalid_data_behavior,
)
def _write_internal(self, iterator, assets):
"""
Internal implementation of write.
`iterator` should be an iterator yielding pairs of (asset, ctable).
"""
total_rows = 0
first_row = {}
last_row = {}
calendar_offset = {}
# Maps column name -> output carray.
columns = {
k: carray(array([], dtype=uint32))
for k in US_EQUITY_PRICING_BCOLZ_COLUMNS
}
earliest_date = None
calendar = self._calendar
if assets is not None:
@apply
def iterator(iterator=iterator, assets=set(assets)):
for asset_id, table in iterator:
if asset_id not in assets:
raise ValueError('unknown asset id %r' % asset_id)
yield asset_id, table
for asset_id, table in iterator:
nrows = len(table)
for column_name in columns:
if column_name == 'id':
# We know what the content of this column is, so don't
# bother reading it.
columns['id'].append(
full((nrows,), asset_id, dtype='uint32'),
)
continue
columns[column_name].append(table[column_name])
if earliest_date is None:
earliest_date = table["day"][0]
else:
earliest_date = min(earliest_date, table["day"][0])
# Bcolz doesn't support ints as keys in `attrs`, so convert
# assets to strings for use as attr keys.
asset_key = str(asset_id)
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
first_row[asset_key] = total_rows
last_row[asset_key] = total_rows + nrows - 1
total_rows += nrows
# Calculate the number of trading days between the first date
# in the stored data and the first date of **this** asset. This
# offset used for output alignment by the reader.
asset_first_day = table['day'][0]
calendar_offset[asset_key] = calendar.get_loc(
Timestamp(asset_first_day, unit='s', tz='UTC'),
)
# This writes the table to disk.
full_table = ctable(
columns=[
columns[colname]
for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS
],
names=US_EQUITY_PRICING_BCOLZ_COLUMNS,
rootdir=self._filename,
mode='w',
)
full_table.attrs['first_trading_day'] = (
earliest_date // 1e6
if earliest_date is not None else
iNaT
)
full_table.attrs['first_row'] = first_row
full_table.attrs['last_row'] = last_row
full_table.attrs['calendar_offset'] = calendar_offset
full_table.attrs['calendar'] = calendar.asi8.tolist()
full_table.flush()
return full_table
class DailyBarReader(with_metaclass(ABCMeta)):
"""
Reader for OHCLV pricing data at a daily frequency.
"""
@abstractmethod
def load_raw_arrays(self, columns, start_date, end_date, assets):
pass
@abstractmethod
def spot_price(self, sid, day, colname):
pass
@abstractproperty
def last_available_dt(self):
pass
class BcolzDailyBarReader(DailyBarReader):
"""
Reader for raw pricing data written by BcolzDailyOHLCVWriter.
A Bcolz CTable is comprised of Columns and Attributes.
Columns
-------
The table with which this loader interacts contains the following columns:
['open', 'high', 'low', 'close', 'volume', 'day', 'id'].
The data in these columns is interpreted as follows:
- Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *
as-traded dollar value.
- Volume is interpreted as as-traded volume.
- Day is interpreted as seconds since midnight UTC, Jan 1, 1970.
- Id is the asset id of the row.
The data in each column is grouped by asset and then sorted by day within
each asset block.
The table is built to represent a long time range of data, e.g. ten years
of equity data, so the lengths of each asset block is not equal to each
other. The blocks are clipped to the known start and end date of each asset
to cut down on the number of empty values that would need to be included to
make a regular/cubic dataset.
When read across the open, high, low, close, and volume with the same
index should represent the same asset and day.
Parameters
----------
table : bcolz.ctable
The ctable contaning the pricing data, with attrs corresponding to the
Attributes list below.
read_all_threshold : int
The number of equities at which;
below, the data is read by reading a slice from the carray
per asset.
above, the data is read by pulling all of the data for all assets
into memory and then indexing into that array for each day and
asset pair.
Used to tune performance of reads when using a small or large number
of equities.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
first_row : dict
Map from asset_id -> index of first row in the dataset with that id.
last_row : dict
Map from asset_id -> index of last row in the dataset with that id.
calendar_offset : dict
Map from asset_id -> calendar index of first row.
calendar : list[int64]
Calendar used to compute offsets, in asi8 format (ns since EPOCH).
We use first_row and last_row together to quickly find ranges of rows to
load when reading an asset's data into memory.
We use calendar_offset and calendar to orient loaded blocks within a
range of queried dates.
See Also
--------
zipline.data.us_equity_pricing.BcolzDailyBarWriter
"""
def __init__(self, table, read_all_threshold=3000):
self._maybe_table_rootdir = table
# Cache of fully read np.array for the carrays in the daily bar table.
# raw_array does not use the same cache, but it could.
# Need to test keeping the entire array in memory for the course of a
# process first.
self._spot_cols = {}
self.PRICE_ADJUSTMENT_FACTOR = 0.001
self._read_all_threshold = read_all_threshold
@lazyval
def _table(self):
maybe_table_rootdir = self._maybe_table_rootdir
if isinstance(maybe_table_rootdir, ctable):
return maybe_table_rootdir
return ctable(rootdir=maybe_table_rootdir, mode='r')
@lazyval
def _calendar(self):
return DatetimeIndex(self._table.attrs['calendar'], tz='UTC')
@lazyval
def _first_rows(self):
return {
int(asset_id): start_index
for asset_id, start_index in iteritems(
self._table.attrs['first_row'],
)
}
@lazyval
def _last_rows(self):
return {
int(asset_id): end_index
for asset_id, end_index in iteritems(
self._table.attrs['last_row'],
)
}
@lazyval
def _calendar_offsets(self):
return {
int(id_): offset
for id_, offset in iteritems(
self._table.attrs['calendar_offset'],
)
}
@lazyval
def first_trading_day(self):
try:
return Timestamp(
self._table.attrs['first_trading_day'],
unit='ms',
tz='UTC'
)
except KeyError:
return None
@property
def last_available_dt(self):
return self._calendar[-1]
def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
)
def load_raw_arrays(self, columns, start_date, end_date, assets):
# Assumes that the given dates are actually in calendar.
start_idx = self._calendar.get_loc(start_date)
end_idx = self._calendar.get_loc(end_date)
first_rows, last_rows, offsets = self._compute_slices(
start_idx,
end_idx,
assets,
)
read_all = len(assets) > self._read_all_threshold
return _read_bcolz_data(
self._table,
(end_idx - start_idx + 1, len(assets)),
list(columns),
first_rows,
last_rows,
offsets,
read_all,
)
def _spot_col(self, colname):
"""
Get the colname from daily_bar_table and read all of it into memory,
caching the result.
Parameters
----------
colname : string
A name of a OHLCV carray in the daily_bar_table
Returns
-------
array (uint32)
Full read array of the carray in the daily_bar_table with the
given colname.
"""
try:
col = self._spot_cols[colname]
except KeyError:
col = self._spot_cols[colname] = self._table[colname]
return col
def get_last_traded_dt(self, asset, day):
volumes = self._spot_col('volume')
if day >= asset.end_date:
# go back to one day before the asset ended
search_day = self._calendar[
self._calendar.searchsorted(asset.end_date) - 1
]
else:
search_day = day
while True:
try:
ix = self.sid_day_index(asset, search_day)
except NoDataOnDate:
return None
if volumes[ix] != 0:
return search_day
prev_day_ix = self._calendar.get_loc(search_day) - 1
if prev_day_ix > -1:
search_day = self._calendar[prev_day_ix]
else:
return None
def sid_day_index(self, sid, day):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
int
Index into the data tape for the given sid and day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
"""
try:
day_loc = self._calendar.get_loc(day)
except:
raise NoDataOnDate("day={0} is outside of calendar={1}".format(
day, self._calendar))
offset = day_loc - self._calendar_offsets[sid]
if offset < 0:
raise NoDataOnDate(
"No data on or before day={0} for sid={1}".format(
day, sid))
ix = self._first_rows[sid] + offset
if ix > self._last_rows[sid]:
raise NoDataOnDate(
"No data on or after day={0} for sid={1}".format(
day, sid))
return ix
def spot_price(self, sid, day, colname):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
ix = self.sid_day_index(sid, day)
price = self._spot_col(colname)[ix]
if price == 0:
return -1
if colname != 'volume':
return price * 0.001
else:
return price
class PanelDailyBarReader(DailyBarReader):
"""
Reader for data passed as Panel.
DataPanel Structure
-------
items : Int64Index
Asset identifiers. Must be unique.
major_axis : DatetimeIndex
Dates for data provided provided by the Panel. Must be unique.
minor_axis : ['open', 'high', 'low', 'close', 'volume']
Price attributes. Must be unique.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
panel : pd.Panel
The panel from which to read OHLCV data.
first_trading_day : pd.Timestamp
The first trading day in the dataset.
"""
@preprocess(panel=call(verify_indices_all_unique))
def __init__(self, calendar, panel):
panel = panel.copy()
if 'volume' not in panel.minor_axis:
# Fake volume if it does not exist.
panel.loc[:, :, 'volume'] = int(1e9)
self.first_trading_day = panel.major_axis[0]
self._calendar = calendar
self.panel = panel
@property
def last_available_dt(self):
return self._calendar[-1]
def load_raw_arrays(self, columns, start_date, end_date, assets):
columns = list(columns)
cal = self._calendar
index = cal[cal.slice_indexer(start_date, end_date)]
shape = (len(index), len(assets))
results = []
for col in columns:
outbuf = zeros(shape=shape)
for i, asset in enumerate(assets):
data = self.panel.loc[asset, start_date:end_date, col]
data = data.reindex_axis(index).values
outbuf[:, i] = data
results.append(outbuf)
return results
def spot_price(self, sid, day, colname):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
return self.panel.loc[sid, day, colname]
def get_last_traded_dt(self, sid, dt):
"""
Parameters
----------
sid : int
The asset identifier.
dt : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
pd.Timestamp : The last know dt for the asset and dt;
NaT if no trade is found before the given dt.
"""
while dt in self.panel.major_axis:
freq = self.panel.major_axis.freq
if not isnull(self.panel.loc[sid, dt, 'close']):
return dt
dt -= freq
else:
return NaT
class SQLiteAdjustmentWriter(object):
"""
Writer for data to be read by SQLiteAdjustmentReader
Parameters
----------
conn_or_path : str or sqlite3.Connection
A handle to the target sqlite database.
daily_bar_reader : BcolzDailyBarReader
Daily bar reader to use for dividend writes.
overwrite : bool, optional, default=False
If True and conn_or_path is a string, remove any existing files at the
given path before connecting.
See Also
--------
zipline.data.us_equity_pricing.SQLiteAdjustmentReader
"""
def __init__(self,
conn_or_path,
daily_bar_reader,
calendar,
overwrite=False):
if isinstance(conn_or_path, sqlite3.Connection):
self.conn = conn_or_path
elif isinstance(conn_or_path, str):
if overwrite and exists(conn_or_path):
try:
remove(conn_or_path)
except OSError as e:
if e.errno != ENOENT:
raise
self.conn = sqlite3.connect(conn_or_path)
self.uri = conn_or_path
else:
raise TypeError("Unknown connection type %s" % type(conn_or_path))
self._daily_bar_reader = daily_bar_reader
self._calendar = calendar
def _write(self, tablename, expected_dtypes, frame):
if frame is None or frame.empty:
# keeping the dtypes correct for empty frames is not easy
frame = DataFrame(
np.array([], dtype=list(expected_dtypes.items())),
)
else:
if frozenset(frame.columns) != viewkeys(expected_dtypes):
raise ValueError(
"Unexpected frame columns:\n"
"Expected Columns: %s\n"
"Received Columns: %s" % (
set(expected_dtypes),
frame.columns.tolist(),
)
)
actual_dtypes = frame.dtypes
for colname, expected in iteritems(expected_dtypes):
actual = actual_dtypes[colname]
if not issubdtype(actual, expected):
raise TypeError(
"Expected data of type {expected} for column"
" '{colname}', but got '{actual}'.".format(
expected=expected,
colname=colname,
actual=actual,
),
)
frame.to_sql(
tablename,
self.conn,
if_exists='append',
chunksize=50000,
)
def write_frame(self, tablename, frame):
if tablename not in SQLITE_ADJUSTMENT_TABLENAMES:
raise ValueError(
"Adjustment table %s not in %s" % (
tablename,
SQLITE_ADJUSTMENT_TABLENAMES,
)
)
if not (frame is None or frame.empty):
frame = frame.copy()
frame['effective_date'] = frame['effective_date'].values.astype(
'datetime64[s]',
).astype('int64')
return self._write(
tablename,
SQLITE_ADJUSTMENT_COLUMN_DTYPES,
frame,
)
def write_dividend_payouts(self, frame):
"""
Write dividend payout data to SQLite table `dividend_payouts`.
"""
return self._write(
'dividend_payouts',
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES,
frame,
)
def write_stock_dividend_payouts(self, frame):
return self._write(
'stock_dividend_payouts',
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES,
frame,
)
def calc_dividend_ratios(self, dividends):
"""
Calculate the ratios to apply to equities when looking back at pricing
history so that the price is smoothed over the ex_date, when the market
adjusts to the change in equity value due to upcoming dividend.
Returns
-------
DataFrame
A frame in the same format as splits and mergers, with keys
- sid, the id of the equity
- effective_date, the date in seconds on which to apply the ratio.
- ratio, the ratio to apply to backwards looking pricing data.
"""
if dividends is None:
return DataFrame(np.array(
[],
dtype=[
('sid', uint32),
('effective_date', uint32),
('ratio', float64),
],
))
ex_dates = dividends.ex_date.values
sids = dividends.sid.values
amounts = dividends.amount.values
ratios = full(len(amounts), nan)
daily_bar_reader = self._daily_bar_reader
effective_dates = full(len(amounts), -1, dtype=int64)
calendar = self._calendar
for i, amount in enumerate(amounts):
sid = sids[i]
ex_date = ex_dates[i]
day_loc = calendar.get_loc(ex_date, method='bfill')
prev_close_date = calendar[day_loc - 1]
try:
prev_close = daily_bar_reader.spot_price(
sid, prev_close_date, 'close')
if prev_close != 0.0:
ratio = 1.0 - amount / prev_close
ratios[i] = ratio
# only assign effective_date when data is found
effective_dates[i] = ex_date
except NoDataOnDate:
logger.warn("Couldn't compute ratio for dividend %s" % {
'sid': sid,
'ex_date': ex_date,
'amount': amount,
})
continue
# Create a mask to filter out indices in the effective_date, sid, and
# ratio vectors for which a ratio was not calculable.
effective_mask = effective_dates != -1
effective_dates = effective_dates[effective_mask]
effective_dates = effective_dates.astype('datetime64[ns]').\
astype('datetime64[s]').astype(uint32)
sids = sids[effective_mask]
ratios = ratios[effective_mask]
return DataFrame({
'sid': sids,
'effective_date': effective_dates,
'ratio': ratios,
})
def _write_dividends(self, dividends):
if dividends is None:
dividend_payouts = None
else:
dividend_payouts = dividends.copy()
dividend_payouts['ex_date'] = dividend_payouts['ex_date'].values.\
astype('datetime64[s]').astype(integer)
dividend_payouts['record_date'] = \
dividend_payouts['record_date'].values.astype('datetime64[s]').\
astype(integer)
dividend_payouts['declared_date'] = \
dividend_payouts['declared_date'].values.astype('datetime64[s]').\
astype(integer)
dividend_payouts['pay_date'] = \
dividend_payouts['pay_date'].values.astype('datetime64[s]').\
astype(integer)
self.write_dividend_payouts(dividend_payouts)
def _write_stock_dividends(self, stock_dividends):
if stock_dividends is None:
stock_dividend_payouts = None
else:
stock_dividend_payouts = stock_dividends.copy()
stock_dividend_payouts['ex_date'] = \
stock_dividend_payouts['ex_date'].values.\
astype('datetime64[s]').astype(integer)
stock_dividend_payouts['record_date'] = \
stock_dividend_payouts['record_date'].values.\
astype('datetime64[s]').astype(integer)
stock_dividend_payouts['declared_date'] = \
stock_dividend_payouts['declared_date'].\
values.astype('datetime64[s]').astype(integer)
stock_dividend_payouts['pay_date'] = \
stock_dividend_payouts['pay_date'].\
values.astype('datetime64[s]').astype(integer)
self.write_stock_dividend_payouts(stock_dividend_payouts)
def write_dividend_data(self, dividends, stock_dividends=None):
"""
Write both dividend payouts and the derived price adjustment ratios.
"""
# First write the dividend payouts.
self._write_dividends(dividends)
self._write_stock_dividends(stock_dividends)
# Second from the dividend payouts, calculate ratios.
dividend_ratios = self.calc_dividend_ratios(dividends)
self.write_frame('dividends', dividend_ratios)
def write(self,
splits=None,
mergers=None,
dividends=None,
stock_dividends=None):
"""
Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame, optional
Dataframe containing split data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is divided by this value.
sid : int
The asset id associated with this adjustment.
mergers : pandas.DataFrame, optional
DataFrame containing merger data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is unaffected.
sid : int
The asset id associated with this adjustment.
dividends : pandas.DataFrame, optional
DataFrame containing dividend data. The format of the dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
amount : float
The cash amount paid for each share.
Dividend ratios are calculated as:
``1.0 - (dividend_value / "close on day prior to ex_date")``
stock_dividends : pandas.DataFrame, optional
DataFrame containing stock dividend data. The format of the
dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
payment_sid : int
The asset id of the shares that should be paid instead of
cash.
ratio : float
The ratio of currently held shares in the held sid that
should be paid with new shares of the payment_sid.
See Also
--------
zipline.data.us_equity_pricing.SQLiteAdjustmentReader
"""
self.write_frame('splits', splits)
self.write_frame('mergers', mergers)
self.write_dividend_data(dividends, stock_dividends)
self.conn.execute(
"CREATE INDEX splits_sids "
"ON splits(sid)"
)
self.conn.execute(
"CREATE INDEX splits_effective_date "
"ON splits(effective_date)"
)
self.conn.execute(
"CREATE INDEX mergers_sids "
"ON mergers(sid)"
)
self.conn.execute(
"CREATE INDEX mergers_effective_date "
"ON mergers(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividends_sid "
"ON dividends(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_effective_date "
"ON dividends(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividend_payouts_sid "
"ON dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_payouts_ex_date "
"ON dividend_payouts(ex_date)"
)
self.conn.execute(
"CREATE INDEX stock_dividend_payouts_sid "
"ON stock_dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX stock_dividends_payouts_ex_date "
"ON stock_dividend_payouts(ex_date)"
)
def close(self):
self.conn.close()
UNPAID_QUERY_TEMPLATE = """
SELECT sid, amount, pay_date from dividend_payouts
WHERE ex_date=? AND sid IN ({0})
"""
Dividend = namedtuple('Dividend', ['asset', 'amount', 'pay_date'])
UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE = """
SELECT sid, payment_sid, ratio, pay_date from stock_dividend_payouts
WHERE ex_date=? AND sid IN ({0})
"""
StockDividend = namedtuple(
'StockDividend',
['asset', 'payment_asset', 'ratio', 'pay_date'])
class SQLiteAdjustmentReader(object):
"""
Loads adjustments based on corporate actions from a SQLite database.
Expects data written in the format output by `SQLiteAdjustmentWriter`.
Parameters
----------
conn : str or sqlite3.Connection
Connection from which to load data.
See Also
--------
zipline.data.us_equity_pricing.SQLiteAdjustmentWriter
"""
@preprocess(conn=coerce_string(sqlite3.connect))
def __init__(self, conn):
self.conn = conn
def load_adjustments(self, columns, dates, assets):
return load_adjustments_from_sqlite(
self.conn,
list(columns),
dates,
assets,
)
def get_adjustments_for_sid(self, table_name, sid):
t = (sid,)
c = self.conn.cursor()
adjustments_for_sid = c.execute(
"SELECT effective_date, ratio FROM %s WHERE sid = ?" %
table_name, t).fetchall()
c.close()
return [[Timestamp(adjustment[0], unit='s', tz='UTC'), adjustment[1]]
for adjustment in
adjustments_for_sid]
def get_dividends_with_ex_date(self, assets, date, asset_finder):
seconds = date.value / int(1e9)
c = self.conn.cursor()
divs = []
for chunk in group_into_chunks(assets):
query = UNPAID_QUERY_TEMPLATE.format(
",".join(['?' for _ in chunk]))
t = (seconds,) + tuple(map(lambda x: int(x), chunk))
c.execute(query, t)
rows = c.fetchall()
for row in rows:
div = Dividend(
asset_finder.retrieve_asset(row[0]),
row[1], Timestamp(row[2], unit='s', tz='UTC'))
divs.append(div)
c.close()
return divs
def get_stock_dividends_with_ex_date(self, assets, date, asset_finder):
seconds = date.value / int(1e9)
c = self.conn.cursor()
stock_divs = []
for chunk in group_into_chunks(assets):
query = UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE.format(
",".join(['?' for _ in chunk]))
t = (seconds,) + tuple(map(lambda x: int(x), chunk))
c.execute(query, t)
rows = c.fetchall()
for row in rows:
stock_div = StockDividend(
asset_finder.retrieve_asset(row[0]), # asset
asset_finder.retrieve_asset(row[1]), # payment_asset
row[2],
Timestamp(row[3], unit='s', tz='UTC'))
stock_divs.append(stock_div)
c.close()
return stock_divs
|
|
# -*- coding: utf-8 -*-
"""Installer script for Pywikibot framework."""
#
# (C) Pywikibot team, 2009-2020
#
# Distributed under the terms of the MIT license.
#
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import sys
from setuptools import setup
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
versions_required_message = """
Pywikibot is not available on:
{version}
This version of Pywikibot only supports Python 2.7.4+ or 3.4+.
"""
def python_is_supported():
"""Check that Python is supported."""
# Any change to this must be copied to pwb.py
return PYTHON_VERSION >= (3, 4, 0) or PY2 and PYTHON_VERSION >= (2, 7, 4)
if not python_is_supported():
raise RuntimeError(versions_required_message.format(version=sys.version))
# ------- setup extra_requires ------- #
extra_deps = {
# Core library dependencies
'eventstreams': ['sseclient!=0.0.23,!=0.0.24,>=0.0.18'],
'isbn': ['python-stdnum>=1.13'],
'Graphviz': ['pydot>=1.2'],
'Google': ['google>=1.7'],
'mwparserfromhell': ['mwparserfromhell>=0.3.3'],
'Tkinter': [ # vulnerability found in Pillow<6.2.2
'Pillow>=6.2.2,<7.0.0;python_version<"3"',
'Pillow<6.0.0;python_version=="3.4"',
'Pillow>=6.2.2;python_version>="3.5"',
],
'security': [
'requests[security]'
';python_full_version=="2.7.7" or python_full_version=="2.7.8"',
],
'mwoauth': ['mwoauth!=0.3.1,>=0.2.4'],
'html': ['BeautifulSoup4'],
'http': ['fake_useragent'],
'flake8': [ # Due to incompatibilities between packages the order matters.
'flake8>=3.7.5',
'pydocstyle<=3.0.0;python_version<"3"',
'pydocstyle>=4.0.0;python_version>="3.4"',
'hacking',
'flake8-coding',
'flake8-comprehensions>=3.1.4;python_version>="3.8"',
'flake8-comprehensions>=2.2.0;python_version>="3.5"',
'flake8-comprehensions>=2.0.0,<2.2.0;python_version=="3.4"',
'flake8-comprehensions<2.0.0;python_version<"3"',
'flake8-docstrings>=1.3.1',
'flake8-future-import',
'flake8-mock>=0.3',
'flake8-print>=2.0.1',
'flake8-quotes>=2.0.1',
'flake8-string-format',
'flake8-tuple>=0.2.8',
'flake8-no-u-prefixed-strings>=0.2',
'pep8-naming>=0.7',
'pyflakes>=2.1.0',
],
# Additional core library dependencies which are only available on Python 2
'csv': ['unicodecsv;python_version<"3"'],
}
# ------- setup extra_requires for scripts ------- #
script_deps = {
'data_ingestion.py': extra_deps['csv'],
'flickrripper.py': [
'flickrapi<3.0.0;python_version<"3.5"',
'flickrapi>=2.2;python_version>="3.5"',
] + extra_deps['Tkinter'],
'imageharvest.py': extra_deps['html'],
'isbn.py': extra_deps['isbn'],
'match_images.py': extra_deps['Tkinter'],
'patrol.py': extra_deps['mwparserfromhell'],
'states_redirect.py': ['pycountry'],
'weblinkchecker.py': ['memento_client!=0.6.0,>=0.5.1'],
}
extra_deps.update(script_deps)
extra_deps.update({'scripts': [i for k, v in script_deps.items() for i in v]})
# ------- setup install_requires ------- #
# packages which are mandatory
dependencies = ['requests>=2.20.1,<2.22.0; python_version == "3.4"',
'requests>=2.20.1; python_version != "3.4"',
'enum34>=1.1.6,!=1.1.8; python_version < "3"',
'ipaddress; python_version < "3"',
'pathlib2;python_version<"3"']
# Python versions before 2.7.9 will cause urllib3 to trigger
# InsecurePlatformWarning warnings for all HTTPS requests. By
# installing with security extras, requests will automatically set
# them up and the warnings will stop. See
# <https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning>
# for more details.
# There is no secure version of cryptography for Python 2.7.6 or older.
dependencies += extra_deps['security']
try:
import bz2
except ImportError:
# Use bz2file if the python is not compiled with bz2 support.
dependencies.append('bz2file')
else:
assert bz2
# ------- setup tests_require ------- #
test_deps = ['bz2file', 'mock']
# Some of the ui_tests depend on accessing the console window's menu
# to set the console font and copy and paste, achieved using pywinauto
# which depends on pywin32.
# These tests may be disabled because pywin32 depends on VC++, is time
# consuming to build, and the console window can't be accessed during appveyor
# builds.
# Microsoft makes available a compiler for Python 2.7
# http://www.microsoft.com/en-au/download/details.aspx?id=44266
if os.name == 'nt' and os.environ.get('PYSETUP_TEST_NO_UI', '0') != '1':
test_deps += [
'pywinauto>0.6.4;python_version>="3.5" or python_version<"3"',
'pywinauto<=0.6.4;python_version=="3.4"',
'pywin32>220;python_version>="3.5" or python_version<"3"',
'pywin32<=220;python_version=="3.4"',
]
# Add all dependencies as test dependencies,
# so all scripts can be compiled for script_tests, etc.
if 'PYSETUP_TEST_EXTRAS' in os.environ:
test_deps += [i for k, v in extra_deps.items() if k != 'flake8' for i in v]
if 'requests[security]' in test_deps:
# Bug T105767 on Python 2.7 release 9+
if PY2 and PYTHON_VERSION[2] >= 9:
test_deps.remove('requests[security]')
# These extra dependencies are needed other unittest fails to load tests.
test_deps += extra_deps['csv']
test_deps += extra_deps['eventstreams']
test_deps += ['six;python_version>="3"']
def get_version(name):
"""Get a valid pywikibot module version string.
Either create a timebased version number for the package
or read the version number from the package.
@return: pywikibot module version string
@rtype: str
"""
version = '3.0'
try:
import subprocess
date = subprocess.check_output(
['git', 'log', '-1', '--format=%ci']).strip()
date = date.decode().split(' ', 1)[0].replace('-', '')
version += '.' + date
if 'sdist' not in sys.argv:
version += '.dev0'
except Exception as e:
print(e)
from pkg_resources import get_distribution, DistributionNotFound
try:
version = get_distribution(name).version
except DistributionNotFound as e:
print(e)
version += '.dev0'
return version
def read_desc(filename):
"""Read long description.
Combine included restructured text files which must be done before
uploading because the source isn't available after creating the package.
"""
desc = []
with open(filename) as f:
for line in f:
if line.strip().startswith('.. include::'):
include = os.path.relpath(line.rsplit('::')[1].strip())
if os.path.exists(include):
with open(include) as g:
desc.append(g.read())
else:
print('Cannot include {0}; file not found'.format(include))
else:
desc.append(line)
return ''.join(desc)
def get_packages(name):
"""Find framework packages."""
if PY2:
from setuptools import find_packages
packages = [package for package in find_packages()
if package.startswith(name + '.')]
else:
from setuptools import find_namespace_packages
packages = find_namespace_packages(include=[name + '.*'])
return [str(name)] + packages
def main():
"""Setup entry point."""
name = 'pywikibot'
setup(
name=name,
version=get_version(name),
description='Python MediaWiki Bot Framework',
long_description=read_desc('README.rst'),
keywords=['API', 'bot', 'framework', 'mediawiki', 'pwb', 'python',
'pywikibot', 'pywikipedia', 'pywikipediabot', 'wiki',
'wikimedia', 'wikipedia'],
maintainer='The Pywikibot team',
maintainer_email='[email protected]',
license='MIT License',
packages=get_packages(name),
python_requires='>=2.7.4, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
install_requires=dependencies,
extras_require=extra_deps,
url='https://www.mediawiki.org/wiki/Manual:Pywikibot',
download_url='https://tools.wmflabs.org/pywikibot/',
test_suite='tests.collector',
tests_require=test_deps,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Wiki',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
use_2to3=False
)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
Program Purpose
Are you new to population genetics? Not familiar with command-line programs, or
just want a simple way to run basic pop gen analyses right on your labtop? Then
POPEase is perfect for you. The purpose of the POPEase program is to help novices
obtain reliable pop gen analyses for SNP data in just a few simple steps.
What The Program Does
POPEase (and the associated software) completes analyses that are normally done
using a series of GUI or command-line programs that can be complicated for
beginners. These programs are integrated into the script and run with a few
simple command-line flags detailed in the program documents. The programs
integrated into this script are standard analyses when examining population
genetics of your data set.
This single python script bypasses the need to understand parsing your file into
the correct format for the downstream analyses, it bypasses FSAT and/or Genepop
to provide population genetics statistics, and it bypasses all programs
associated with population structure including Structure, CLUMPP or CLUMPAK,
distruct, and Structure Harvester.
Edited by Alicia Reigel. 4 May 2016.
Copyright Alicia Reigel. Louisiana State University. 4 May 2016. All
rights reserved.
"""
import csv
import re
import glob
import os
import sys
import argparse
import subprocess
import numpy
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def parser_get_args():
"""Collect the path to the SNP data (forward and reverse), vcf data, and
base name for output files"""
parser = argparse.ArgumentParser(
description="""Input the desired information to run POPEase."""
)
parser.add_argument(
"--runstructure",
dest='runstructure',
action="store_true",
default=False,
help="""Add this flag if you want to run structure.""",
)
parser.add_argument(
"--computebestk",
dest='computebestK',
action="store_true",
default=False,
help="""Add this flag if you want an output of potential best K values after running structure.""",
)
parser.add_argument(
'--dirpath',
required=True,
type=str,
help='Enter the full path to the folder/directory where the required programs are located.'
)
parser.add_argument(
'--vcfpath',
required=True,
type=str,
help='Enter the full path to the input vcf file.'
)
parser.add_argument(
'--spidpath',
required=True,
type=str,
help='Enter the full path to the spidfile.'
)
parser.add_argument(
'--outputfile',
required=True,
type=str,
help='Enter the desired base name for the output files.'
)
parser.add_argument(
'--kpops',
required=True,
type=str,
help='Enter the maximum populations (K) to test when running structure.'
)
parser.add_argument(
'--loci',
required=True,
type=str,
help='Enter the number of loci in your data set.'
)
parser.add_argument(
'--individuals',
required=True,
type=str,
help='Enter the number of individuals that you will include in your structure run.'
)
parser.add_argument(
'--numruns',
type=int,
default=10,
help='Enter the number of structure runs for each K value.'
)
return parser.parse_args()
def pgd_structure(outputfolder, inputfile, directorypath, spid):
"""Converts the user's VCF file into a STRUCTURE-formatted file"""
mypath = os.path.join(directorypath + "/STRUCTURE_directory")
if not os.path.isdir(mypath):
os.makedirs(mypath)
path_to_new_directory = os.path.abspath(mypath)
# gets the absolute path to the new directory created
PGDStructstdout = os.path.join(path_to_new_directory, "PGDStructstdout.txt")
PGDStructstderr = os.path.join(path_to_new_directory, "PGDStructstderr.txt" )
outputfile_pgdstruct = os.path.join(path_to_new_directory, "STRUCTUREformatfile")
outputfileabspath = os.path.abspath(outputfile_pgdstruct)
STRUC_PGD_params = ['PGDSpider2-cli.exe', '-inputfile', inputfile,
'-inputformat', 'VCF', '-outputfile', outputfile_pgdstruct, '-outputformat',
'STRUCTURE', '-spid', spid]
with open(PGDStructstdout, 'w') as stdout_file:
with open(PGDStructstderr, 'w') as stderr_file:
my_proc1 = subprocess.run(STRUC_PGD_params,
stdout=stdout_file,
stderr=stderr_file,
universal_newlines=True)
print("\nCreated output file in Structure format: {}\n".format(outputfile_pgdstruct))
return path_to_new_directory, outputfileabspath
def structure_run(structK, loci, individuals, numruns, pgd_structformatted_file, outputfile_struct_run, directorypath):
"""Runs STRUCTURE based on user inputs and mainparams file defaults"""
K = (int(structK) + 1)
numruns = (int(numruns) + 1)
for x in range(1, K):
for i in range(1, numruns):
x = str(x)
i = str(i)
outputfile = os.path.join(outputfile_struct_run + x + '_' + i)
list_struct_run = ['structure', '-K', x, '-o', outputfile, '-i', pgd_structformatted_file, '-L', loci, '-N', individuals]
stdoutfile = os.path.join(directorypath, 'STRUCTUREstdout_' + x + '_' + i)
stderrfile = os.path.join(directorypath, 'STRUCTUREsterr_' + x + '_' + i)
with open(stdoutfile, 'w') as struct_stdout_file:
with open(stderrfile, 'w') as struct_stderr_file:
my_proc_2 = subprocess.run(list_struct_run,
input=pgd_structformatted_file,
stdout=struct_stdout_file,
stderr=struct_stderr_file,
universal_newlines=True)
print('''\nOutput files created for this process can be found in the STRUCTURE_Directory.
Please see documentation for specific output file details.\n\n If you opted to compute best K then those values will be computed now.''')
def compute_ln(structK, directorypath):
"""Collects the Mean_LnP(D) from the structure run files and obtains a mean
for each K value"""
os.chdir(directorypath)
with open('Best_K_Analysis.csv', 'w') as LN_output:
headers = ['K', 'Mean_LnP(D)', 'StDevLN']
writer = csv.writer(LN_output)
writer.writerow(headers)
K = (int(structK) + 1)
for x in range(1, K, 1):
x = str(x)
filename = os.path.join("*STRUCTURERUN" + x + '*')
path_name = os.path.join(directorypath, filename)
# finds the pathnames for any files matching the filename
file_path_list = glob.glob(path_name)
# creates a list of the path names associated with the files found
LN_prob_list = []
for file in file_path_list:
with open(file, 'r') as info:
for line in info:
if re.search("Estimated Ln Prob of Data", line):
new_list = line.replace('\n', '').split(' = ')
LN_prob_list.append(float(new_list[1]))
meanLN = (sum(LN_prob_list) / float(len(LN_prob_list)))
stdevln = numpy.std(LN_prob_list)
row = [x, meanLN, stdevln]
writer.writerow(row)
path = os.path.abspath('Best_K_Analysis.csv')
return path
def calculate_ln1P(directorypath, path):
"""Expands the data to obtain Delta K and prints graphs"""
os.chdir(directorypath)
dataframe = pd.read_csv(path)
list1 = (-(dataframe.loc[:, 'Mean_LnP(D)'] - dataframe.loc[:, 'Mean_LnP(D)'].shift(-1)))
list2 = list(list1)
list2.insert(0, 'NaN')
list2.pop()
dataframe["Ln'P(D)"] = pd.DataFrame(list2)
dataframe['Ln"P(D)'] = (dataframe.loc[1:, "Ln'P(D)"] - dataframe.loc[:, "Ln'P(D)"].shift(-1))
dataframe['Ln"P(D)'] = dataframe['Ln"P(D)'].abs()
dataframe['Delta_K'] = (dataframe.loc[1:, 'Ln"P(D)'] / dataframe.loc[:, 'StDevLN'])
best_ln = dataframe['Mean_LnP(D)'].min()
best_deltak = dataframe['Delta_K'].max()
ln_row = dataframe.loc[(dataframe['Mean_LnP(D)'] == best_ln), 'K': 'Mean_LnP(D)']
deltak_row = dataframe.loc[(dataframe['Delta_K'] == best_deltak), 'K': 'Delta_K']
with PdfPages('Best_K_Figures.pdf') as pdf:
dataframe.plot(x='K', y='Mean_LnP(D)')
"""plt.ylim(plt.ylim()[::-1])"""
# flips y-axis to put lowest value on top. If desired ten simply remove three tick marks on each side of phrase.
plt.savefig("Mean_LnP(D)_Figure.png", bbox_inches='tight')
plt.title('Mean_LnP(D)')
pdf.savefig() # saves the current figure into a pdf page
plt.close()
dataframe.plot(x='K', y='Delta_K')
plt.savefig("DeltaK_figure.png", bbox_inches='tight')
plt.title('Delta K')
pdf.savefig()
plt.close()
with open(path, 'w') as csvfinal:
dataframe.to_csv(csvfinal)
with open('Deter_Best_K_info.txt', 'w') as output2:
output2.write("\nThe data for best K value based on Mean_LnP(D) is below:")
output2.write("{}\n".format(ln_row))
output2.write("The data from best K value based on Delta K is below:")
output2.write("{}\n".format(deltak_row))
output2.write('''These "best" K values are only SUGGESTIONS based on the lowest
value for Mean_LnP(D) and the highest value for Delta K. You should always
check the Structure documentation and consider your data and study system
carefully before choosing a final K value.\n''')
output2.write('''The output files include a PDF (Best_K_Figures.pdf) containing
graphs for Mean_LnP(D) and Delta K as well as a .png figure file for each
graph. Additionally, a CSV file (Best_K_Analysis.csv) of all related data
was created.\nThis program is finished running. To obtain a graphical
display of the Structure results you should see the documentation.''')
print('''Output files from --computebestk can be found in the STRUCTURE_Directory.\n
The program is now complete. Thanks!\n''')
def main():
args = parser_get_args()
inputfile = os.path.abspath(args.vcfpath)
directorypath = os.path.abspath(args.dirpath)
numruns = args.numruns
structK = args.kpops
loci = args.loci
spid = os.path.abspath(args.spidpath)
individuals = args.individuals
path_to_new_directory, outputfileabspath = pgd_structure(args.outputfile, inputfile, directorypath, spid)
if args.runstructure is True:
print('''\nPlease check your STRUCTURE-formatted file for missing data
Missing or bad quality data is indicated by a -9. You may want to remove
individuals with high amounts of missing data. Please follow instructions
in the documentation to do so and re-run this program. If your data is
correct and ready for running through STRUCTURE please enter "Y". If not,
please enter "N", correct your data and rerun this program.''')
y = ((str(input())).lower())
if y != "y" or "n":
print("Please enter either Y or N.")
y = (str(input())).lower()
if y == "y":
print('''Great, STRUCTURE will run now. This may take a significant
amount of time depending on the size of your data set. Please do not
close this window or type. If on Windows, the curser will blink while
the program is running.''')
outputfile_struct_run = os.path.join(path_to_new_directory, args.outputfile + '_STRUCTURERUN')
structure_run(structK, loci, individuals, numruns, outputfileabspath, outputfile_struct_run, path_to_new_directory)
if args.computebestK is True:
path = compute_ln(structK, path_to_new_directory)
calculate_ln1P(path_to_new_directory, path)
else:
print("Structure is complete. Exiting program. Thanks!")
sys.exit()
if y == "n":
print('Exiting program.')
sys.exit()
else:
if args.computebestK is True:
path = compute_ln(structK, path_to_new_directory)
calculate_ln1P(path_to_new_directory, path)
else:
print("Structure is complete. Exiting program. Thanks!")
sys.exit()
if __name__ == '__main__':
main()
|
|
#---------------------------------------------------------------------------
#
# ReactionDiffusion.py: implementation of reaction-diffusion chemical systems
#
# originally based on the breve Hypercycle.[tz/py] demo by jon klein
# <[email protected]>, www.spiderland.org
#
# by Lidia Yamamoto, Univ. Basel, Switzerland, January 2010
# 20150910: removed breve dependencies to run within PyCellChemistry
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copyright (C) 2015 Lidia A. R. Yamamoto
# Contact: http://www.artificial-chemistries.org/
#
# This file is part of PyCellChemistry.
#
# PyCellChemistry is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 3, as published by the Free Software Foundation.
#
# PyCellChemistry is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyCellChemistry, see file COPYING. If not, see
# http://www.gnu.org/licenses/
#
import sys
sys.path.insert(0, '..')
from artchem.ReactionVessel import *
import WritePNG as png
class ReactionDiffusionSystem( ODESystem ):
""" a PDE integrator for a reaction-diffusion system """
def __init__( self, x, y, dx=1.0, periodic=True, nneighs=4 ):
ODESystem.__init__( self )
self.sizex = x # grid size (x, y)
self.sizey = y
self.dx = dx # square grid cells of size (dx, dx)
self.periodic = periodic # boundary conditions
self.nneighs = self.set_neighbors(nneighs) # neighborhood config
self.conc = None # concentrations of all species on grid
self.dcdt = None # spatial dcdt for PDE integration
self.prod = None
self.rate = None
self.diff = None
self.diffcoef = None # diffusion coefficients
self.color = {} # color of a molecule
def close( self ):
""" closes the PDE system such that it can be integrated """
if self.me != None: return # already closed
ODESystem.close(self)
ns = self.nspecies()
x = self.sizex
y = self.sizey
self.conc = np.zeros((ns, x, y))
self.dcdt = np.zeros((x, y))
self.prod = np.zeros((x, y))
self.rate = np.zeros((x, y))
self.diff = np.zeros((x, y))
self.diffcoef = np.zeros(ns)
def set_diffcoef( self, mol, value ):
""" set the diffusion coefficient of molecule mol """
if (mol == '' or self.species.count(mol) < 1):
return
i = self.species.index(mol)
self.diffcoef[i] = value
def get_diffcoef( self, mol ):
""" returns the diffusion coefficient of molecule mol """
if (mol == '' or self.species.count(mol) < 1):
return 0.0
i = self.species.index(mol)
return self.diffcoef[i]
def set_color( self, mol, color):
""" set color of molecule to color=(red, green, blue) """
self.color[mol] = color
def get_color( self, mol ):
""" returns the color that has been assigned to molecule mol """
if mol == '' or mol not in self.species or mol not in self.color:
return (0, 0, 0)
return self.color[mol]
def set_periodic( self, flag ):
""" periodic (toroidal) vs. non-periodic boundary conditions """
self.periodic = flag
def get_periodic( self ):
""" True if the current setup is periodic boundary """
return self.periodic
def set_neighbors( self, nn ):
""" set neighborhood configuration: nn =
2: simple (north and right neighbors)
4: Von Neumann
6: hexagonal lattice
8: Moore
"""
if nn not in [ 2, 4, 6, 8 ]:
print >> sys.stderr, "ns =", self.ns
exit -1
self.nneighs = nn
def get_neighbors( self ):
""" returns the current neighborhood configuration """
return self.nneighs
def get_pos( self, x, y ):
""" make sure coordinates always fall within boundaries """
if (self.periodic):
x = (x + self.sizex) % self.sizex
y = (y + self.sizey) % self.sizey
else:
x = min(max(x, 0), self.sizex)
y = min(max(y, 0), self.sizey)
return (x, y)
def get_conc_by_index( self, idx, x, y ):
""" get concentration of molecule by index, respecting boundaries """
if (self.periodic):
(x, y) = self.get_pos(x, y)
return self.conc[idx, x, y]
elif (x >= 0 and x < self.sizex and y >= 0 and y < self.sizey):
return self.conc[idx, x, y]
else:
return 0.0
def get_conc( self, mol, x, y ):
""" get concentration of molecule by name, respecting boundaries """
if (mol == '' or self.species.count(mol) < 1):
return 0.0
i = self.species.index(mol)
return self.get_conc_by_index(i, x, y)
def set_conc( self, mol, conc, x, y ):
""" set the concentration of a molecule at position x,y to a
given value, respecting boundary conditions
"""
if (mol == '' or self.species.count(mol) < 1):
return
i = self.species.index(mol)
if (conc < 0):
conc = 0.0
if (self.periodic):
(x, y) = self.get_pos(x, y)
self.conc[i, x, y] = conc
elif (x >= 0 and x < self.sizex and y >= 0 and y < self.sizey):
self.conc[i, x, y] = conc
def deposit( self, mol, conc, x, y ):
""" deposit a given amount of a molecule at a precise location """
c = self.get_conc(mol, x, y)
self.set_conc(mol, c + conc, x, y)
def rnd_deposit( self, npoints, mol, conc, ampl=0.0):
""" deposit a random amount of a molecule at random locations """
if (mol == '' or self.species.count(mol) < 1):
return
c = conc
for i in range(npoints):
x = np.random.randint(self.sizex)
y = np.random.randint(self.sizey)
if (ampl > 0.0):
c = conc + ampl * np.random.random() - ampl / 2
self.deposit(mol, c, x, y)
def reset( self, mol, x, y ):
""" reset the concentration of a given molecule to zero """
self.set_conc(mol, 0.0, x, y)
def resetAll( self, mol='', conc=0.0 ):
""" reset the concentration of a given molecule to a given
value, overall on the grid; if no molecule is specified,
reset the concentrations of all molecules
"""
if (conc < 0):
conc = 0.0
if (mol == ''):
self.conc.fill(conc)
return
if (self.species.count(mol) < 1):
return
i = self.species.index(mol)
self.conc[i].fill(conc)
def set_patch_at( self, mol, conc, x, y ):
""" create a patch of chemical at a given location """
self.set_conc( mol, conc, x, y )
self.set_conc( mol, conc, x, ( y + 1 ) )
self.set_conc( mol, conc, x, ( y - 1 ) )
self.set_conc( mol, conc, ( x + 1 ), y )
self.set_conc( mol, conc, ( x - 1 ), y )
self.set_conc( mol, conc, (x - 1), ( y - 1 ) )
self.set_conc( mol, conc, (x - 1), ( y + 1 ) )
self.set_conc( mol, conc, ( x + 1 ), (y - 1) )
self.set_conc( mol, conc, ( x + 1 ), (y + 1) )
def set_patch( self, mol, conc ):
""" create a patch of chemical at a random location """
if (self.sizex < 3 or self.sizey < 3):
return
x = 1 + np.random.randint( self.sizex - 2 )
y = 1 + np.random.randint( self.sizey - 2 )
self.set_patch_at( mol, conc, x, y )
def set_patches( self, npatches, mol, initconc ):
""" create some initial random patches of chemicals """
m = mol
for i in range(npatches):
if (mol == ''):
c = np.random.randint( self.ns )
m = self.species[c]
self.set_patch(m, initconc)
def add_patch_at( self, mol, conc, x, y ):
""" add some concentration to a patch of chemical at a given
location
"""
self.deposit( mol, conc, x, y )
self.deposit( mol, conc, x, ( y + 1 ) )
self.deposit( mol, conc, x, ( y - 1 ) )
self.deposit( mol, conc, ( x + 1 ), y )
self.deposit( mol, conc, ( x - 1 ), y )
self.deposit( mol, conc, (x - 1), ( y - 1 ) )
self.deposit( mol, conc, (x - 1), ( y + 1 ) )
self.deposit( mol, conc, ( x + 1 ), (y - 1) )
self.deposit( mol, conc, ( x + 1 ), (y + 1) )
def add_patch( self, mol, conc ):
""" add a patch of chemical at a random location """
if (self.sizex < 3 or self.sizey < 3):
return
x = 1 + np.random.randint( self.sizex - 2 )
y = 1 + np.random.randint( self.sizey - 2 )
self.add_patch_at( mol, conc, x, y )
def add_patches( self, npatches, mol, initconc ):
""" add some random patches of chemicals """
m = mol
for i in range(npatches):
if (mol == ''):
c = np.random.randint( self.ns )
m = self.species[c]
self.add_patch(m, initconc)
def reset_region( self, mol, val, x0, y0, x1, y1 ):
""" set the concentration of substances in this region to the
given value
"""
y = y0
while (y < y1):
x = x0
while (x < x1):
self.set_conc(mol, val, x, y)
x += 1
y += 1
def disrupt( self, mol, val, x0, y0, x1, y1 ):
""" disrupt the concentration of a chemical within a given
rectangle: clamp the concentr to at most val
"""
y = y0
while (y < y1):
x = x0
while (x < x1):
c = self.get_conc(mol, x, y)
c = min(c, val)
self.set_conc(mol, c, x, y)
x += 1
y += 1
def perturb( self, mol, prob, ampl, x, y ):
""" perturb concentration at the given point """
c0 = self.get_conc(mol, x, y)
if (c0 > 0):
dice = np.random.random()
if dice < prob:
c = c0 * ( ampl * np.random.random() - ampl / 2)
self.deposit(mol, c, x, y)
def perturbAll( self, mol, prob, ampl ):
""" perturb the given chemical with a given probability and
amplitude: a random concentration value in the range
[-amp/2 ; ampl/2] will be deposited at each node with
probability prob (if the chemical is not in the node,
nothing happens)
"""
for y in range(self.sizey):
for x in range(self.sizex):
self.perturb(mol, prob, ampl, x, y)
def perturb_region( self, mol, prob, ampl, x0, y0, x1, y1 ):
""" perturb concentrations in the given region """
y = y0
while (y < y1):
x = x0
while (x < x1):
self.perturb(mol, prob, ampl, x, y)
x += 1
y += 1
def diffusion_term_NOTUSED( self, n ):
""" calculate diffusion term for molecule with index n, leave
result on self.diff (calculate one by one, far too slow,
but I leave the code here to illustrate conceptually how
it works, it's more readable than the matrix form below)
"""
self.diff.fill(0.0)
for y in range(self.sizey):
for x in range(self.sizex):
c0 = self.get_conc_by_index(n, x, y);
c1 = self.get_conc_by_index(n, x - 1, y);
c2 = self.get_conc_by_index(n, x + 1, y);
c3 = self.get_conc_by_index(n, x, y - 1);
c4 = self.get_conc_by_index(n, x, y + 1);
dc = c1 + c2 + c3 + c4 - 4 * c0;
if self.nneighs == 6:
sign = 2 * (x % 2) - 1;
c1 = self.get_conc_by_index(n, x + sign, y - 1);
c2 = self.get_conc_by_index(n, x + sign, y + 1);
dc += c1 + c2 - 2 * c0;
if self.nneighs == 8:
c1 = self.get_conc_by_index(n, x - 1, y - 1);
c2 = self.get_conc_by_index(n, x - 1, y + 1);
c3 = self.get_conc_by_index(n, x + 1, y - 1);
c4 = self.get_conc_by_index(n, x + 1, y + 1);
dc += c1 + c2 + c3 + c4 - 4 * c0;
self.diff[x, y] = dc / (self.dx ** 2)
def diffusion_term( self, n ):
""" calculate diffusion term for molecule with index n, for
whole grid at once using matrix operations; leave result
on self.diff
"""
c0 = self.conc[n]
if self.periodic:
# rotate whole grid left, right, up and down
c1 = np.roll(c0, 1, axis=0)
c2 = np.roll(c0, -1, axis=0)
c3 = np.roll(c0, 1, axis=1)
c4 = np.roll(c0, -1, axis=1)
self.diff = ( c1 + c2 + c3 + c4 - 4 * c0 ) / (self.dx ** 2)
else:
# shift whole grid, duplicating edges as if chemicals bounce back
y = self.sizey
c1 = np.vstack([c0[1:,:], c0[-1]])
c2 = np.vstack([c0[0], c0[:-1,:]])
c3 = np.hstack((c0[:,0:1], c0[:,:-1]))
c4 = np.hstack((c0[:,1:], c0[:,y-1:y]))
self.diff = ( c1 + c2 + c3 + c4 - 4 * c0 ) / (self.dx ** 2)
# PENDING
#if self.nneighs == 6:
#if self.nneighs == 8:
#self.diff = dc
def reaction_term( self, n):
""" calculate reaction term for molecule with index n, leaving
the result on self.dcdt
"""
self.dcdt.fill(0.0)
for j in range(self.nreactions()):
k = self.ms[n,j] * self.kv[j]
if (k == 0.0): continue
self.rate.fill(k)
for i in range(self.nspecies()):
# calculate [conc]^me
self.prod = self.conc[i] ** self.me[i,j]
# calculate r = k * [c1]^e1 * [c2]^p2 ...
self.rate = np.multiply(self.rate, self.prod)
self.dcdt += self.rate
def integrate( self, dt=1.0 ):
""" PDE integration of reaction-diffusion system with timestep dt """
for n in range(self.nspecies()):
self.reaction_term(n)
if (self.diffcoef[n] != 0.0):
self.diffusion_term(n)
# dc/dt = sum(r[j]) + diffcoef*diffterm
self.dcdt += self.diffcoef[n] * self.diff
# conc += dc/dt * dt
self.conc[n] += dt * self.dcdt
#self.apply_dilution()
#minc = self.conc.min()
#if minc < 0.0:
# self.conc += minc0
self.time += dt
def trace( self ):
""" print internal variables, for debug purposes """
ReactionVessel.trace( self )
print "diffcoef=", self.diffcoef, "\n"
def trace_title( self ):
""" output tab-separated title line for plotting """
print "time",
for i in range(self.ns):
print"%s" % self.species[i],
print''
def trace_conc( self ):
""" print the full grid of concentrations for each chemical,
producing a tab-separated matrix for each chemical
"""
for mol in self.species:
i = self.species.index(mol)
tot = self.conc[i].sum()
avg = tot / (self.sizex * self.sizey)
minv = self.conc[i].min()
print "t=", self.time, " mol=", mol,
print "sum=", tot, "avg=", avg, "min=", minv
for y in range(self.sizey):
for x in range(self.sizex):
print "\t", self.get_conc(mol, x, y),
print ''
def trace_conc_xy( self, x, y ):
""" print the concentations of all chemicals at position (x,y) """
print "%g" % self.time,
for i in range(self.ns):
print"%g" % self.conc[i].getValue(x, y),
print ''
def computeTextureMatrix( self, transparent ):
""" produce a texture matrix to be used by animate() """
if not hasattr(self, 'texmat'):
# 4D texture matrix with one (r,g,b,a) color per grid position
self.texmat = np.empty((self.sizex, self.sizey, 4), np.ubyte)
maxc = self.conc.max()
if maxc > 1.0:
norm = self.conc / maxc
else:
norm = self.conc
self.texmat.fill(0)
if not transparent:
self.texmat[:,:,3] = 0xFF
for mol in self.species:
n = self.species.index(mol)
(r0, g0, b0) = self.get_color(mol)
self.texmat[:,:,0] += r0 * 0xFF * norm[n]
self.texmat[:,:,1] += g0 * 0xFF * norm[n]
self.texmat[:,:,2] += b0 * 0xFF * norm[n]
if transparent: self.texmat[:,:,3] += 0xFF * norm[n]
def animate( self, sleeptime=1.0, transparent=True, blur=False ):
""" display the reaction-diffusion grid using vpytyon
transparent: use transparent background instead of black
blur: produce fuzzy borders between cells on the grid
"""
import visual as vpy
self.computeTextureMatrix(transparent)
self.texture = vpy.materials.texture(data=self.texmat, \
mapping='sign', interpolate=blur)
self.plate = vpy.box(axis=(0,0,1), width=2, height=2, \
length=0.1, material=self.texture)
# TODO: how to reuse existing box and texture???
print "animate t=", self.time
vpy.sleep(sleeptime)
def conc2img(self, cellsizex, cellsizey, transparent):
""" convert the concentration matrix to the ARGB format
accepted by png.saveAsPNG()
if the transparent flag is set, the resulting png image
will have a transparent background in the spots where
there are no chemicals;
else the image will have a black background in those spots
TODO: implement hexagonal grid
"""
maxc = self.conc.max()
img = []
for y in range(self.sizey):
row = []
for x in range(self.sizex):
if transparent: A = 0
else: A = 255
(R, G, B) = (0, 0, 0)
for mol in self.species:
n = self.species.index(mol)
(r0, g0, b0) = self.get_color(mol)
if maxc > 1.0:
norm = self.conc[n,x,y] / maxc
else:
norm = self.conc[n,x,y]
if transparent: A += int(round(norm * 0xFF))
R += int(round(norm * r0 * 0xFF))
G += int(round(norm * g0 * 0xFF))
B += int(round(norm * b0 * 0xFF))
if transparent: A = min(max(0, A), 255)
R = min(max(0, R), 255)
G = min(max(0, G), 255)
B = min(max(0, B), 255)
val = (A << 24) | (R << 16) | (G << 8) | B
for j in range(cellsizex): row.append(val)
for i in range(cellsizey): img.append(row)
return img
def conc2img_DISABLED(self, cellsizex, cellsizey, transparent):
""" convert the concentration matrix to the ARGB format
accepted by png.saveAsPNG()
tried to use matrix ops for speedup, but it turned out
slower than the element-by-element implementation above
"""
if not hasattr(self, 'A'):
self.A = np.empty((self.sizex, self.sizey), np.uint32)
self.R = np.empty((self.sizex, self.sizey), np.uint32)
self.G = np.empty((self.sizex, self.sizey), np.uint32)
self.B = np.empty((self.sizex, self.sizey), np.uint32)
if transparent:
self.A.fill(0x00)
else:
self.A.fill(0xFF)
self.R.fill(0x00)
self.G.fill(0x00)
self.B.fill(0x00)
maxc = self.conc.max()
if maxc > 1.0:
norm = self.conc / maxc
else:
norm = self.conc
for mol in self.species:
n = self.species.index(mol)
(r0, g0, b0) = self.get_color(mol)
if transparent: self.A += 0xFF * norm[n]
self.R += r0 * 0xFF * norm[n]
self.G += g0 * 0xFF * norm[n]
self.B += b0 * 0xFF * norm[n]
np.clip(self.A, 0, 0xFF, out=self.A)
np.clip(self.R, 0, 0xFF, out=self.R)
np.clip(self.G, 0, 0xFF, out=self.G)
np.clip(self.B, 0, 0xFF, out=self.B)
tot = (self.A << 24) | (self.R << 16) | (self.G << 8) | self.B
img = np.repeat(tot, cellsizex, axis=0)
img = np.repeat(img, cellsizey, axis=1)
return img
def writepng( self, fname, pngsizex=256, pngsizey=256, transparent=True ):
""" write molecule concentrations to a png image; each grid
position will have a size of cellsize per cellsize pixels
"""
cellsizex = max(1, pngsizex / self.sizex)
cellsizey = max(1, pngsizey / self.sizey)
img = self.conc2img(cellsizex, cellsizey, transparent)
data = png.saveAsPNG(img, fname)
#---------------------------------------------------------------------------
if __name__ == '__main__':
# simple diffusion test
rsys = ReactionDiffusionSystem(32, 32)
rsys.parse([ "A --> A", "B --> B" ])
rsys.set_diffcoef('A', 0.8)
rsys.set_diffcoef('B', 0.5)
rsys.set_color('A', (1, 0, 0))
rsys.set_color('B', (0, 0, 1))
rsys.deposit('A', 2.0, rsys.sizex / 2, rsys.sizey / 2)
#rsys.deposit('A', 2.0, 2, 2)
#rsys.deposit('B', 2.0, 8, 8)
#rsys.trace_conc()
rsys.writepng('testAB0.png')
for i in range(100):
rsys.integrate(0.1)
#rsys.trace_conc()
if (i > 0 and i % 10 == 0):
#fname = ('testAB%d.png' % i )
#rsys.writepng(fname)
rsys.animate()
|
|
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath",
"samefile", "sameopenfile", "samestat", "commonpath"]
# strings representing various path-related bits and pieces
# These are primarily for export; internally, they are hardcoded.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
devnull = 'nul'
def _get_bothseps(path):
if isinstance(path, bytes):
return b'\\/'
else:
return '\\/'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
try:
if isinstance(s, bytes):
return s.replace(b'/', b'\\').lower()
else:
return s.replace('/', '\\').lower()
except (TypeError, AttributeError):
if not isinstance(s, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not %r" % s.__class__.__name__) from None
raise
# Return whether a path is absolute.
# Trivial in Posix, harder on Windows.
# For Windows it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume-letter-and-colon or UNC-resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return len(s) > 0 and s[0] in _get_bothseps(s)
# Join two (or more) paths.
def join(path, *paths):
if isinstance(path, bytes):
sep = b'\\'
seps = b'\\/'
colon = b':'
else:
sep = '\\'
seps = '\\/'
colon = ':'
try:
if not paths:
path[:0] + sep #23780: Ensure compatible data type even if p is null.
result_drive, result_path = splitdrive(path)
for p in paths:
p_drive, p_path = splitdrive(p)
if p_path and p_path[0] in seps:
# Second path is absolute
if p_drive or not result_drive:
result_drive = p_drive
result_path = p_path
continue
elif p_drive and p_drive != result_drive:
if p_drive.lower() != result_drive.lower():
# Different drives => ignore the first path entirely
result_drive = p_drive
result_path = p_path
continue
# Same drive in different case
result_drive = p_drive
# Second path is relative to the first
if result_path and result_path[-1] not in seps:
result_path = result_path + sep
result_path = result_path + p_path
## add separator between UNC and non-absolute path
if (result_path and result_path[0] not in seps and
result_drive and result_drive[-1:] != colon):
return result_drive + sep + result_path
return result_drive + result_path
except (TypeError, AttributeError, BytesWarning):
genericpath._check_arg_types('join', path, *paths)
raise
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive/UNC sharepoint and relative path specifiers.
Returns a 2-tuple (drive_or_unc, path); either part may be empty.
If you assign
result = splitdrive(p)
It is always true that:
result[0] + result[1] == p
If the path contained a drive letter, drive_or_unc will contain everything
up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir")
If the path contained a UNC path, the drive_or_unc will contain the host name
and share up to but not including the fourth directory separator character.
e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir")
Paths cannot contain both a drive letter and a UNC path.
"""
if len(p) >= 2:
if isinstance(p, bytes):
sep = b'\\'
altsep = b'/'
colon = b':'
else:
sep = '\\'
altsep = '/'
colon = ':'
normp = p.replace(altsep, sep)
if (normp[0:2] == sep*2) and (normp[2:3] != sep):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv drive letter or UNC path
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^^
index = normp.find(sep, 2)
if index == -1:
return p[:0], p
index2 = normp.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 == index + 1:
return p[:0], p
if index2 == -1:
index2 = len(p)
return p[:index2], p[index2:]
if normp[1:2] == colon:
return p[:2], p[2:]
return p[:0], p
# Parse UNC paths
def splitunc(p):
"""Deprecated since Python 3.1. Please use splitdrive() instead;
it now handles UNC paths.
Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
import warnings
warnings.warn("ntpath.splitunc is deprecated, use ntpath.splitdrive instead",
DeprecationWarning, 2)
drive, path = splitdrive(p)
if len(drive) == 2:
# Drive letter present
return p[:0], p
return drive, path
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
seps = _get_bothseps(p)
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in seps:
i -= 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head = head.rstrip(seps) or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
if isinstance(p, bytes):
return genericpath._splitext(p, b'\\', b'/', b'.')
else:
return genericpath._splitext(p, '\\', '/', '.')
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link.
This will always return false for Windows prior to 6.0.
"""
try:
st = os.lstat(path)
except (OSError, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except OSError:
return False
return True
# Is a path a mount point?
# Any drive letter root (eg c:\)
# Any share UNC (eg \\server\share)
# Any volume mounted on a filesystem folder
#
# No one method detects all three situations. Historically we've lexically
# detected drive letter roots and share UNCs. The canonical approach to
# detecting mounted volumes (querying the reparse tag) fails for the most
# common case: drive letter roots. The alternative which uses GetVolumePathName
# fails if the drive letter is the result of a SUBST.
try:
from nt import _getvolumepathname
except ImportError:
_getvolumepathname = None
def ismount(path):
"""Test whether a path is a mount point (a drive root, the root of a
share, or a mounted volume)"""
seps = _get_bothseps(path)
path = abspath(path)
root, rest = splitdrive(path)
if root and root[0] in seps:
return (not rest) or (rest in seps)
if rest in seps:
return True
if _getvolumepathname:
return path.rstrip(seps) == _getvolumepathname(path).rstrip(seps)
else:
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
i, n = 1, len(path)
while i < n and path[i] not in _get_bothseps(path):
i += 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if isinstance(path, bytes):
if b'$' not in path and b'%' not in path:
return path
import string
varchars = bytes(string.ascii_letters + string.digits + '_-', 'ascii')
quote = b'\''
percent = b'%'
brace = b'{'
rbrace = b'}'
dollar = b'$'
environ = getattr(os, 'environb', None)
else:
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
quote = '\''
percent = '%'
brace = '{'
rbrace = '}'
dollar = '$'
environ = os.environ
res = path[:0]
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index:index+1]
if c == quote: # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index(c)
res += c + path[:index + 1]
except ValueError:
res += c + path
index = pathlen - 1
elif c == percent: # variable or '%'
if path[index + 1:index + 2] == percent:
res += c
index += 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index(percent)
except ValueError:
res += percent + path
index = pathlen - 1
else:
var = path[:index]
try:
if environ is None:
value = os.fsencode(os.environ[os.fsdecode(var)])
else:
value = environ[var]
except KeyError:
value = percent + var + percent
res += value
elif c == dollar: # variable or '$$'
if path[index + 1:index + 2] == dollar:
res += c
index += 1
elif path[index + 1:index + 2] == brace:
path = path[index+2:]
pathlen = len(path)
try:
index = path.index(rbrace)
except ValueError:
res += dollar + brace + path
index = pathlen - 1
else:
var = path[:index]
try:
if environ is None:
value = os.fsencode(os.environ[os.fsdecode(var)])
else:
value = environ[var]
except KeyError:
value = dollar + brace + var + rbrace
res += value
else:
var = path[:0]
index += 1
c = path[index:index + 1]
while c and c in varchars:
var += c
index += 1
c = path[index:index + 1]
try:
if environ is None:
value = os.fsencode(os.environ[os.fsdecode(var)])
else:
value = environ[var]
except KeyError:
value = dollar + var
res += value
if c:
index -= 1
else:
res += c
index += 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'\\'
altsep = b'/'
curdir = b'.'
pardir = b'..'
special_prefixes = (b'\\\\.\\', b'\\\\?\\')
else:
sep = '\\'
altsep = '/'
curdir = '.'
pardir = '..'
special_prefixes = ('\\\\.\\', '\\\\?\\')
if path.startswith(special_prefixes):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace(altsep, sep)
prefix, path = splitdrive(path)
# collapse initial backslashes
if path.startswith(sep):
prefix += sep
path = path.lstrip(sep)
comps = path.split(sep)
i = 0
while i < len(comps):
if not comps[i] or comps[i] == curdir:
del comps[i]
elif comps[i] == pardir:
if i > 0 and comps[i-1] != pardir:
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith(sep):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(curdir)
return prefix + sep.join(comps)
# Return an absolute path.
try:
if os.name == 'nt':
from nt import _getfullpathname
else:
from uwp_os import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except OSError:
pass # Bad path - return unchanged.
elif isinstance(path, bytes):
path = os.getcwdb()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def relpath(path, start=None):
"""Return a relative version of a path"""
if isinstance(path, bytes):
sep = b'\\'
curdir = b'.'
pardir = b'..'
else:
sep = '\\'
curdir = '.'
pardir = '..'
if start is None:
start = curdir
if not path:
raise ValueError("no path specified")
try:
start_abs = abspath(normpath(start))
path_abs = abspath(normpath(path))
start_drive, start_rest = splitdrive(start_abs)
path_drive, path_rest = splitdrive(path_abs)
if normcase(start_drive) != normcase(path_drive):
raise ValueError("path is on mount %r, start on mount %r" % (
path_drive, start_drive))
start_list = [x for x in start_rest.split(sep) if x]
path_list = [x for x in path_rest.split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if normcase(e1) != normcase(e2):
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
except (TypeError, ValueError, AttributeError, BytesWarning, DeprecationWarning):
genericpath._check_arg_types('relpath', path, start)
raise
# Return the longest common sub-path of the sequence of paths given as input.
# The function is case-insensitive and 'separator-insensitive', i.e. if the
# only difference between two paths is the use of '\' versus '/' as separator,
# they are deemed to be equal.
#
# However, the returned path will have the standard '\' separator (even if the
# given paths had the alternative '/' separator) and will have the case of the
# first path given in the sequence. Additionally, any trailing separator is
# stripped from the returned path.
def commonpath(paths):
"""Given a sequence of path names, returns the longest common sub-path."""
if not paths:
raise ValueError('commonpath() arg is an empty sequence')
if isinstance(paths[0], bytes):
sep = b'\\'
altsep = b'/'
curdir = b'.'
else:
sep = '\\'
altsep = '/'
curdir = '.'
try:
drivesplits = [splitdrive(p.replace(altsep, sep).lower()) for p in paths]
split_paths = [p.split(sep) for d, p in drivesplits]
try:
isabs, = set(p[:1] == sep for d, p in drivesplits)
except ValueError:
raise ValueError("Can't mix absolute and relative paths") from None
# Check that all drive letters or UNC paths match. The check is made only
# now otherwise type errors for mixing strings and bytes would not be
# caught.
if len(set(d for d, p in drivesplits)) != 1:
raise ValueError("Paths don't have the same drive")
drive, path = splitdrive(paths[0].replace(altsep, sep))
common = path.split(sep)
common = [c for c in common if c and c != curdir]
split_paths = [[c for c in s if c and c != curdir] for s in split_paths]
s1 = min(split_paths)
s2 = max(split_paths)
for i, c in enumerate(s1):
if c != s2[i]:
common = common[:i]
break
else:
common = common[:len(s1)]
prefix = drive + sep if isabs else drive
return prefix + sep.join(common)
except (TypeError, AttributeError):
genericpath._check_arg_types('commonpath', *paths)
raise
# determine if two files are in fact the same file
try:
# GetFinalPathNameByHandle is available starting with Windows 6.0.
# Windows XP and non-Windows OS'es will mock _getfinalpathname.
if sys.getwindowsversion()[:2] >= (6, 0):
from nt import _getfinalpathname
else:
raise ImportError
except (AttributeError, ImportError):
# On Windows XP and earlier, two files are the same if their absolute
# pathnames are the same.
# Non-Windows operating systems fake this method with an XP
# approximation.
def _getfinalpathname(f):
return normcase(abspath(f))
try:
# The genericpath.isdir implementation uses os.stat and checks the mode
# attribute to tell whether or not the path is a directory.
# This is overkill on Windows - just pass the path to GetFileAttributes
# and check the attribute from there.
from nt import _isdir as isdir
except ImportError:
# Use genericpath.isdir as imported above.
pass
|
|
import os, re, fileinput, shutil, urllib2
import simplejson as json
from fabric.api import local
from utils.version import get_version
from itertools import combinations
VERSION = get_version()
DATE = get_version(True)
# Lists all the names of the modules of Tiramisu,
# each module has an identification number (also called "cups_of_coffee")
official_dictionary_names = [
'',
'selector',
'browserdetect',
'selector.dom',
'ajax',
'selector.event',
'taskengine',
'json'
]
# all the cups_of_coffee of Tiramisu
ALL_CUPS_OF_COFFEE = '1234567'
def beautify():
print '\n####### Beautifying tiramisu.js #######'
local('python utils/jsbeautifier.py src/build/tiramisu.js > src/build/tiramisu-beautified.js')
local('mv src/build/tiramisu-beautified.js src/build/tiramisu.js')
def beautify_modules():
print '\n####### Beautifying modules #######'
# get the names of all .js files
modules_directory = [ x[:-3] for x in os.listdir('src/modules/') if x.split('.')[-1] == 'js']
for name in modules_directory:
local('python utils/jsbeautifier.py src/modules/{0}.js > src/modules/{0}-beautified.js'.format(name))
local('mv src/modules/{0}-beautified.js src/modules/{0}.js'.format(name))
# fab unify:list_modules='ajax'
def unify(list_modules=None):
# list of all dependencies of own Tiramisu
list_dependency = []
# read tiramisu.json
f = open('tiramisu.json', 'r')
tiramisu_json = json.load(f)
f.close()
# list_modules contains modules chosen to create their own tiramisu
if list_modules:
# Unify only selected modules
print '\n####### Unifying custom Tiramisu #######'
modules_chosen = ''.join(list_modules)
check_dependency(list_dependency, list_modules)
list_dependency = map(lambda x: x.strip(), list_dependency)
list_modules = sorted(set(list_modules + ''.join(list_dependency)))
modules = ['src/modules/tiramisu.'+official_dictionary_names[int(x)]+'.js' for x in list_modules]
modules.sort() # sorts normally by alphabetical order
modules.sort(key=len) # sorts by length
# modules in tiramisu
name_custom = ''.join(list_modules)
# unify modules with cat command
cat = "cat src/tiramisu.core.js {0} > src/custom/tiramisu-{1}.js".format(' '.join(modules), name_custom)
local(cat)
# minify tiramisy with yuicompressor
local('yuicompressor -o src/custom/tiramisu-{0}-min.js src/custom/tiramisu-{0}.js'.format(name_custom))
# Get size Tiramisu in KiloBytes
bytes = os.path.getsize('src/custom/tiramisu-{0}-min.js'.format(name_custom))
size = round(bytes / 1024.0, 2)
# Saves for each combination as tiramisu needs, and the weight of tiramisu created
tiramisu_json['custom'][modules_chosen] = name_custom
tiramisu_json['custom_size'][name_custom] = size
else:
# Unify all modules
print '\n####### Unifying all modules in tiramisu.js #######'
modules = [ module for module in local("ls -rd $(find src/modules) | grep '.*\.js'", capture=True).split()]
# unify modules with cat command
cat = 'cat src/tiramisu.core.js {0} > src/build/tiramisu.js'.format(' '.join(modules))
local(cat)
# minify tiramisy with yuicompressor
local('yuicompressor -o src/build/tiramisu-{0}-min.js src/build/tiramisu.js'.format(VERSION))
# Get size Tiramisu in KiloBytes
bytes = os.path.getsize('src/build/tiramisu-{0}-min.js'.format(VERSION))
size = round(bytes / 1024.0, 2)
# Saves the weight of tiramisu created
tiramisu_json['tiramisu_size'] = size
# write tiramisu.json
outfile = open("tiramisu.json", "w")
outfile.write(json.dumps(tiramisu_json))
outfile.close()
def check_dependency(list_dependency, list_modules):
""" """
for cups_of_coffee in list_modules:
try:
# For each module is looking for its dependencies, and if not already on the list are added
dependencies = official_dictionary_names[int(cups_of_coffee)]
url = 'src/modules/tiramisu.{0}.js'.format(dependencies)
with open(url, "r") as f:
for line in f:
find = False
dependency = []
dep = re.search(r"ingredients = \[(.*?)\]", line)
dep_two = re.search(r"'ingredients': \[(.*?)\]", line)
if dep:
find = True
dependency = [x.replace("\'","") for x in dep.group(1).split(',')]
elif dep_two:
find = True
dependency = [x.replace("\'","") for x in dep_two.group(1).split(',')]
# For each dependency is looking for its dependencies
for x in dependency:
if not x in list_dependency and len(x):
list_dependency.append(x)
check_dependency(list_dependency, x)
if find:
break
except:
print 'Error, there is no module with id ' + cups_of_coffee
def minify():
print '\n####### Minifying tiramisu.js #######'
local('yuicompressor -o src/build/tiramisu-{0}-min.js src/build/tiramisu.js'.format(VERSION))
def docs():
print '\n####### Generating docs #######'
local('dox < utils/readme.js > docs_json/readme.json')
for name in official_dictionary_names:
if len(name) < 2:
local('dox < src/tiramisu.core.js > docs_json/tmp.json')
local('python utils/jsbeautifier.py docs_json/tmp.json > docs_json/core.json')
else:
local('dox < src/modules/tiramisu.{}.js > docs_json/tmp.json'.format(name))
local('python utils/jsbeautifier.py docs_json/tmp.json > docs_json/{}.json'.format(name))
local('python utils/clean_json.py')
def all():
""" """
clean()
beautify_modules()
unify()
beautify()
docs()
def cook_all_tiramisu():
""" """
print '\n####### Cool all tiramisu... #######'
try:
local('rm src/custom/* ')
except:
pass
f = open('tiramisu.json', 'r')
tiramisu_json = json.load(f)
f.close()
for i in range(1,7):
for cups_of_coffee in [x for x in combinations(ALL_CUPS_OF_COFFEE,i)]:
cups_of_coffee = ''.join(cups_of_coffee)
if cups_of_coffee != ALL_CUPS_OF_COFFEE:
unify(cups_of_coffee)
def clean():
""" """
print '\n####### Cleaning... #######'
local('rm -f src/build/*')
def publish():
""" """
print '\n####### Publish... #######'
all()
local('git add .')
local('git commit -am "Released version {}"'.format(VERSION))
local('git tag -f -a {} -m "{}"'.format(VERSION, DATE))
local('git checkout stable')
local('git merge master')
local('git push origin')
local('git push origin --tags')
local('git checkout master')
|
|
# $Id$
from cPickle import load, dump
import logging
import os
import shelve
from time import time
import traceback
from mx.DateTime import localtime, mktime
from quixote import get_publisher
from quixote.errors import AccessError
from quixote.form2 import Form
from quixote.publish import SessionPublisher
from quixote.session import Session, SessionManager
import canary.context
import canary.user
# Sql* classes adapted from Titus Brown's examples at:
#
# http://issola.caltech.edu/~t/transfer/sql_example/session.py
#
class SqlQuixoteSession (object, Session):
def __init__ (self, request, id):
Session.__init__(self, request, id)
self.messages = ''
self._dirty = 0
self._form_tokens = []
#self._Session__access_time = self.__access_time
#self._Session__creation_time = self.__creation_time
#self._Session__remote_address = self.remote_address
def __getattr__ (self, att):
if att == '_Session__remote_address':
return self.__remote_address
elif att == '_Session__creation_time':
return self.__creation_time
def add_message (self, msg=''):
if len(self.messages) == 0:
self.messages = str(msg)
else:
self.messages += '~~%s' % str(msg)
self._dirty = 1
def has_messages (self):
return len(self.messages) > 0
def clear_messages (self):
self.messages = ''
self._dirty = 1
def set_user (self, user):
"""
Set the user! The change in user will be detected by Quixote
through the 'is_dirty' function and saved accordingly.
"""
if not self.user or user.id != self.user.id:
self._dirty = 1
self.user = user
def has_info (self):
"""
Is this session worthy of storage?
"""
return self.user
def is_dirty (self):
"""
Check to see if this session needs to be stored again, e.g. if
the user has changed.
"""
return self._dirty
def _set_access_time (self, resolution):
Session._set_access_time(self, resolution)
self._dirty = 1
class SqlTableMap:
"""
Intercept dictionary requests and channel them to the SQL database.
"""
def __init__ (self, context):
"""
WAS: Store the database connection.
"""
self.uncommitted = {}
self.context = context
def get_conn (self):
"""
Return the database connection after doing a rollback.
"""
conn = self.context.connection
#try:
# conn.rollback()
#except NotSupportedError:
# pass
return conn
def keys (self):
"""
Get a list of the session IDs in the database.
"""
cursor = self.context.get_cursor()
#context.execute("SELECT uid FROM sessions")
cursor.execute("SELECT session_id FROM sessions")
return [id for (id,) in cursor.fetchall()]
def values (self):
"""
Load all of the sessions in the database.
"""
cursor = self.context.get_cursor()
cursor.execute("""
SELECT session_id, user_id, remote_addr, creation_time,
access_time, messages, form_tokens
FROM sessions
""")
return [self._create_from_db(session_id, user_id, addr, c, a, msg, tokens) \
for (session_id, user_id, addr, c, a, msg, tokens) in cursor.fetchall() ]
def items (self):
"""
Get a list of the (key, value) pairs in the database.
"""
d = {}
for v in self.values():
d[v.id] = v
return d
def get (self, session_id, default=None):
"""
Get the given item from the database.
"""
cursor = self.context.get_cursor()
cursor.execute("""
SELECT session_id, user_id, remote_addr, creation_time,
access_time, messages, form_tokens
FROM sessions
WHERE session_id=%(session_id)s
""", {'session_id': session_id})
assert cursor.rowcount <= 1
if cursor.rowcount == 1:
(session_id, user_id, addr, c, a, msg, tokens) = cursor.fetchone()
return self._create_from_db(session_id, user_id, addr, c, a, msg, tokens)
else:
return default
def __getitem__ (self, session_id):
"""
Get the given session from the database.
"""
return self.get(session_id)
def has_key (self, session_id):
"""
Does this session exist in the database?
"""
if self.get(session_id) == None:
return 0
return 1
def __setitem__ (self, session_id, session):
"""
Store the given session in the database.
"""
self.uncommitted[session_id] = session
def __delitem__ (self, session_id):
"""
Delete the given session from the database.
"""
if session_id:
if self.uncommitted.has_key(session_id):
del self.uncommitted[session_id]
#conn = self.get_conn()
#cursor = self.get_conn().cursor()
#cursor = conn.cursor()
cursor = self.context.get_cursor()
cursor.execute("""
DELETE FROM sessions
WHERE session_id=%(session_id)s
""", {'session_id': session_id})
#conn.commit()
def _save_to_db (self, session):
"""
Save a given session to the database.
"""
#conn = self.get_conn()
#cursor = conn.cursor()
cursor = self.context.get_cursor()
# ORIGINAL: save a db-thrash by checking for update possibility
# instead of the following, which always does an extra delete-
# and-insert:
#
# don't allow multiple session IDs; this also removes it from
# the uncommitted dictionary.
del self[session.id]
#if self.uncommitted.has_key(session.id):
# del self.uncommitted[session.id]
#if self.has_key(session.id):
# cursor.execute("""
# UPDATE sessions
# SET access_time = %s, messages = %s
# WHERE session_id = %s
# """, (str(localtime(session.get_access_time())), session.messages,
# session.id))
#else:
cursor.execute("""
INSERT INTO sessions
(session_id, user_id, remote_addr,
creation_time,
access_time,
messages,
form_tokens)
VALUES
(%s, %s, %s,
%s,
%s,
%s,
%s)
""", (session.id, session.user.id, session.get_remote_address(),
str(localtime(session.get_creation_time())),
str(localtime(session.get_access_time())),
str(session.messages),
str('~~'.join(session._form_tokens))))
#conn.commit()
def _create_from_db (self, session_id, user_id, addr, create_time,
access_time, messages, tokens=[]):
"""
Create a new session from database data.
This goes through the new-style object function __new__ rather than
through the __init__ function.
"""
session = SqlQuixoteSession.__new__(SqlQuixoteSession)
session.id = session_id
session.user = canary.user.get_user_by_id(self.context, user_id)
# FIXME: one '_' to be removed for qx-1.0
#session.__remote_address = addr
#session.__creation_time = create_time.ticks()
#session.__access_time = access_time.ticks()
try:
session.__access_time = mktime(access_time.timetuple()).ticks()
session.__creation_time = mktime(create_time.timetuple()).ticks()
except AttributeError:
session.__creation_time = create_time.ticks()
session.__access_time = access_time.ticks()
session.__remote_address = addr
session.messages = messages
session._form_tokens = tokens.split('~~')
return session
def _abort_uncommitted (self, session):
"""
Toss a session without committing any changes.
"""
if self.uncommitted.has_key(session.id):
del self.uncommitted[session.id]
class SqlSessionManager (SessionManager):
"""
A session manager that uses the SqlTableMap to map sessions into an
SQL database.
"""
def __init__ (self, context):
SessionManager.__init__(self, SqlQuixoteSession, SqlTableMap(context))
self.context = context
def abort_changes (self, session):
if session:
self.sessions._abort_uncommitted(session)
def commit_changes (self, session):
if session \
and session.has_info():
self.sessions._save_to_db(session)
class CanaryPublisher (SessionPublisher):
def __init__ (self, *args, **kwargs):
self.logger = logging.getLogger(str(self.__class__))
try:
self.context = kwargs['context']
self.logger.info('Found context')
except KeyError:
self.context = canary.context.Context()
self.logger.info('Started new context')
self.config = self.context.config
SessionPublisher.__init__(self, root_namespace='canary.ui',
session_mgr=SqlSessionManager(self.context), config=self.config)
self.setup_logs()
class NotLoggedInError (AccessError):
"""
To be called when the requested action requires a logged in user.
Whether that user has access rights or not can only be determined
after the user actually logs in.
"""
status_code = 403
title = "Access denied"
description = "Authorized access only."
class MyForm (Form):
"""
Automatically creates a logger instance on any arbitrary Form.
"""
def __init__ (self, context, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.logger = logging.getLogger(str(self.__class__))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import itertools
import logging
import uuid
import mock
from oslo_config import cfg
from oslo_config import fixture as config
from oslo_serialization import jsonutils
import requests
import six
from testtools import matchers
from keystoneclient import adapter
from keystoneclient.auth import base
from keystoneclient import exceptions
from keystoneclient.i18n import _
from keystoneclient import session as client_session
from keystoneclient.tests.unit import utils
class SessionTests(utils.TestCase):
TEST_URL = 'http://127.0.0.1:5000/'
def setUp(self):
super(SessionTests, self).setUp()
self.deprecations.expect_deprecations()
def test_get(self):
session = client_session.Session()
self.stub_url('GET', text='response')
resp = session.get(self.TEST_URL)
self.assertEqual('GET', self.requests_mock.last_request.method)
self.assertEqual(resp.text, 'response')
self.assertTrue(resp.ok)
def test_post(self):
session = client_session.Session()
self.stub_url('POST', text='response')
resp = session.post(self.TEST_URL, json={'hello': 'world'})
self.assertEqual('POST', self.requests_mock.last_request.method)
self.assertEqual(resp.text, 'response')
self.assertTrue(resp.ok)
self.assertRequestBodyIs(json={'hello': 'world'})
def test_head(self):
session = client_session.Session()
self.stub_url('HEAD')
resp = session.head(self.TEST_URL)
self.assertEqual('HEAD', self.requests_mock.last_request.method)
self.assertTrue(resp.ok)
self.assertRequestBodyIs('')
def test_put(self):
session = client_session.Session()
self.stub_url('PUT', text='response')
resp = session.put(self.TEST_URL, json={'hello': 'world'})
self.assertEqual('PUT', self.requests_mock.last_request.method)
self.assertEqual(resp.text, 'response')
self.assertTrue(resp.ok)
self.assertRequestBodyIs(json={'hello': 'world'})
def test_delete(self):
session = client_session.Session()
self.stub_url('DELETE', text='response')
resp = session.delete(self.TEST_URL)
self.assertEqual('DELETE', self.requests_mock.last_request.method)
self.assertTrue(resp.ok)
self.assertEqual(resp.text, 'response')
def test_patch(self):
session = client_session.Session()
self.stub_url('PATCH', text='response')
resp = session.patch(self.TEST_URL, json={'hello': 'world'})
self.assertEqual('PATCH', self.requests_mock.last_request.method)
self.assertTrue(resp.ok)
self.assertEqual(resp.text, 'response')
self.assertRequestBodyIs(json={'hello': 'world'})
def test_user_agent(self):
session = client_session.Session(user_agent='test-agent')
self.stub_url('GET', text='response')
resp = session.get(self.TEST_URL)
self.assertTrue(resp.ok)
self.assertRequestHeaderEqual('User-Agent', 'test-agent')
resp = session.get(self.TEST_URL, headers={'User-Agent': 'new-agent'})
self.assertTrue(resp.ok)
self.assertRequestHeaderEqual('User-Agent', 'new-agent')
resp = session.get(self.TEST_URL, headers={'User-Agent': 'new-agent'},
user_agent='overrides-agent')
self.assertTrue(resp.ok)
self.assertRequestHeaderEqual('User-Agent', 'overrides-agent')
def test_http_session_opts(self):
session = client_session.Session(cert='cert.pem', timeout=5,
verify='certs')
FAKE_RESP = utils.test_response(text='resp')
RESP = mock.Mock(return_value=FAKE_RESP)
with mock.patch.object(session.session, 'request', RESP) as mocked:
session.post(self.TEST_URL, data='value')
mock_args, mock_kwargs = mocked.call_args
self.assertEqual(mock_args[0], 'POST')
self.assertEqual(mock_args[1], self.TEST_URL)
self.assertEqual(mock_kwargs['data'], 'value')
self.assertEqual(mock_kwargs['cert'], 'cert.pem')
self.assertEqual(mock_kwargs['verify'], 'certs')
self.assertEqual(mock_kwargs['timeout'], 5)
def test_not_found(self):
session = client_session.Session()
self.stub_url('GET', status_code=404)
self.assertRaises(exceptions.NotFound, session.get, self.TEST_URL)
def test_server_error(self):
session = client_session.Session()
self.stub_url('GET', status_code=500)
self.assertRaises(exceptions.InternalServerError,
session.get, self.TEST_URL)
def test_session_debug_output(self):
"""Test request and response headers in debug logs
in order to redact secure headers while debug is true.
"""
session = client_session.Session(verify=False)
headers = {'HEADERA': 'HEADERVALB'}
security_headers = {'Authorization': uuid.uuid4().hex,
'X-Auth-Token': uuid.uuid4().hex,
'X-Subject-Token': uuid.uuid4().hex, }
body = 'BODYRESPONSE'
data = 'BODYDATA'
all_headers = dict(
itertools.chain(headers.items(), security_headers.items()))
self.stub_url('POST', text=body, headers=all_headers)
resp = session.post(self.TEST_URL, headers=all_headers, data=data)
self.assertEqual(resp.status_code, 200)
self.assertIn('curl', self.logger.output)
self.assertIn('POST', self.logger.output)
self.assertIn('--insecure', self.logger.output)
self.assertIn(body, self.logger.output)
self.assertIn("'%s'" % data, self.logger.output)
for k, v in six.iteritems(headers):
self.assertIn(k, self.logger.output)
self.assertIn(v, self.logger.output)
# Assert that response headers contains actual values and
# only debug logs has been masked
for k, v in six.iteritems(security_headers):
self.assertIn('%s: {SHA1}' % k, self.logger.output)
self.assertEqual(v, resp.headers[k])
self.assertNotIn(v, self.logger.output)
def test_logs_failed_output(self):
"""Test that output is logged even for failed requests"""
session = client_session.Session()
body = uuid.uuid4().hex
self.stub_url('GET', text=body, status_code=400)
resp = session.get(self.TEST_URL, raise_exc=False)
self.assertEqual(resp.status_code, 400)
self.assertIn(body, self.logger.output)
def test_unicode_data_in_debug_output(self):
"""Verify that ascii-encodable data is logged without modification."""
session = client_session.Session(verify=False)
body = 'RESP'
data = u'unicode_data'
self.stub_url('POST', text=body)
session.post(self.TEST_URL, data=data)
self.assertIn("'%s'" % data, self.logger.output)
def test_binary_data_not_in_debug_output(self):
"""Verify that non-ascii-encodable data causes replacement."""
if six.PY2:
data = "my data" + chr(255)
else:
# Python 3 logging handles binary data well.
return
session = client_session.Session(verify=False)
body = 'RESP'
self.stub_url('POST', text=body)
# Forced mixed unicode and byte strings in request
# elements to make sure that all joins are appropriately
# handled (any join of unicode and byte strings should
# raise a UnicodeDecodeError)
session.post(unicode(self.TEST_URL), data=data)
self.assertIn("Replaced characters that could not be decoded"
" in log output", self.logger.output)
# Our data payload should have changed to
# include the replacement char
self.assertIn(u"-d 'my data\ufffd'", self.logger.output)
def test_logging_cacerts(self):
path_to_certs = '/path/to/certs'
session = client_session.Session(verify=path_to_certs)
self.stub_url('GET', text='text')
session.get(self.TEST_URL)
self.assertIn('--cacert', self.logger.output)
self.assertIn(path_to_certs, self.logger.output)
def test_connect_retries(self):
def _timeout_error(request, context):
raise requests.exceptions.Timeout()
self.stub_url('GET', text=_timeout_error)
session = client_session.Session()
retries = 3
with mock.patch('time.sleep') as m:
self.assertRaises(exceptions.RequestTimeout,
session.get,
self.TEST_URL, connect_retries=retries)
self.assertEqual(retries, m.call_count)
# 3 retries finishing with 2.0 means 0.5, 1.0 and 2.0
m.assert_called_with(2.0)
# we count retries so there will be one initial request + 3 retries
self.assertThat(self.requests_mock.request_history,
matchers.HasLength(retries + 1))
def test_uses_tcp_keepalive_by_default(self):
session = client_session.Session()
requests_session = session.session
self.assertIsInstance(requests_session.adapters['http://'],
client_session.TCPKeepAliveAdapter)
self.assertIsInstance(requests_session.adapters['https://'],
client_session.TCPKeepAliveAdapter)
def test_does_not_set_tcp_keepalive_on_custom_sessions(self):
mock_session = mock.Mock()
client_session.Session(session=mock_session)
self.assertFalse(mock_session.mount.called)
def test_ssl_error_message(self):
error = uuid.uuid4().hex
def _ssl_error(request, context):
raise requests.exceptions.SSLError(error)
self.stub_url('GET', text=_ssl_error)
session = client_session.Session()
# The exception should contain the URL and details about the SSL error
msg = _('SSL exception connecting to %(url)s: %(error)s') % {
'url': self.TEST_URL, 'error': error}
six.assertRaisesRegex(self,
exceptions.SSLError,
msg,
session.get,
self.TEST_URL)
def test_mask_password_in_http_log_response(self):
session = client_session.Session()
def fake_debug(msg):
self.assertNotIn('verybadpass', msg)
logger = mock.Mock(isEnabledFor=mock.Mock(return_value=True))
logger.debug = mock.Mock(side_effect=fake_debug)
body = {
"connection_info": {
"driver_volume_type": "iscsi",
"data": {
"auth_password": "verybadpass",
"target_discovered": False,
"encrypted": False,
"qos_specs": None,
"target_iqn": ("iqn.2010-10.org.openstack:volume-"
"744d2085-8e78-40a5-8659-ef3cffb2480e"),
"target_portal": "172.99.69.228:3260",
"volume_id": "744d2085-8e78-40a5-8659-ef3cffb2480e",
"target_lun": 1,
"access_mode": "rw",
"auth_username": "verybadusername",
"auth_method": "CHAP"}}}
body_json = jsonutils.dumps(body)
response = mock.Mock(text=body_json, status_code=200, headers={})
session._http_log_response(response, logger)
self.assertEqual(1, logger.debug.call_count)
class TCPKeepAliveAdapter(utils.TestCase):
@mock.patch.object(client_session, 'socket')
@mock.patch('requests.adapters.HTTPAdapter.init_poolmanager')
def test_init_poolmanager_all_options(self, mock_parent_init_poolmanager,
mock_socket):
# properties expected to be in socket.
mock_socket.TCP_KEEPIDLE = mock.sentinel.TCP_KEEPIDLE
mock_socket.TCP_KEEPCNT = mock.sentinel.TCP_KEEPCNT
mock_socket.TCP_KEEPINTVL = mock.sentinel.TCP_KEEPINTVL
desired_opts = [mock_socket.TCP_KEEPIDLE, mock_socket.TCP_KEEPCNT,
mock_socket.TCP_KEEPINTVL]
adapter = client_session.TCPKeepAliveAdapter()
adapter.init_poolmanager()
call_args, call_kwargs = mock_parent_init_poolmanager.call_args
called_socket_opts = call_kwargs['socket_options']
call_options = [opt for (protocol, opt, value) in called_socket_opts]
for opt in desired_opts:
self.assertIn(opt, call_options)
@mock.patch.object(client_session, 'socket')
@mock.patch('requests.adapters.HTTPAdapter.init_poolmanager')
def test_init_poolmanager(self, mock_parent_init_poolmanager, mock_socket):
spec = ['IPPROTO_TCP', 'TCP_NODELAY', 'SOL_SOCKET', 'SO_KEEPALIVE']
mock_socket.mock_add_spec(spec)
adapter = client_session.TCPKeepAliveAdapter()
adapter.init_poolmanager()
call_args, call_kwargs = mock_parent_init_poolmanager.call_args
called_socket_opts = call_kwargs['socket_options']
call_options = [opt for (protocol, opt, value) in called_socket_opts]
self.assertEqual([mock_socket.TCP_NODELAY, mock_socket.SO_KEEPALIVE],
call_options)
class RedirectTests(utils.TestCase):
REDIRECT_CHAIN = ['http://myhost:3445/',
'http://anotherhost:6555/',
'http://thirdhost/',
'http://finaldestination:55/']
DEFAULT_REDIRECT_BODY = 'Redirect'
DEFAULT_RESP_BODY = 'Found'
def setUp(self):
super(RedirectTests, self).setUp()
self.deprecations.expect_deprecations()
def setup_redirects(self, method='GET', status_code=305,
redirect_kwargs=None, final_kwargs=None):
redirect_kwargs = redirect_kwargs or {}
final_kwargs = final_kwargs or {}
redirect_kwargs.setdefault('text', self.DEFAULT_REDIRECT_BODY)
for s, d in zip(self.REDIRECT_CHAIN, self.REDIRECT_CHAIN[1:]):
self.requests_mock.register_uri(method, s, status_code=status_code,
headers={'Location': d},
**redirect_kwargs)
final_kwargs.setdefault('status_code', 200)
final_kwargs.setdefault('text', self.DEFAULT_RESP_BODY)
self.requests_mock.register_uri(method, self.REDIRECT_CHAIN[-1],
**final_kwargs)
def assertResponse(self, resp):
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.text, self.DEFAULT_RESP_BODY)
def test_basic_get(self):
session = client_session.Session()
self.setup_redirects()
resp = session.get(self.REDIRECT_CHAIN[-2])
self.assertResponse(resp)
def test_basic_post_keeps_correct_method(self):
session = client_session.Session()
self.setup_redirects(method='POST', status_code=301)
resp = session.post(self.REDIRECT_CHAIN[-2])
self.assertResponse(resp)
def test_redirect_forever(self):
session = client_session.Session(redirect=True)
self.setup_redirects()
resp = session.get(self.REDIRECT_CHAIN[0])
self.assertResponse(resp)
self.assertTrue(len(resp.history), len(self.REDIRECT_CHAIN))
def test_no_redirect(self):
session = client_session.Session(redirect=False)
self.setup_redirects()
resp = session.get(self.REDIRECT_CHAIN[0])
self.assertEqual(resp.status_code, 305)
self.assertEqual(resp.url, self.REDIRECT_CHAIN[0])
def test_redirect_limit(self):
self.setup_redirects()
for i in (1, 2):
session = client_session.Session(redirect=i)
resp = session.get(self.REDIRECT_CHAIN[0])
self.assertEqual(resp.status_code, 305)
self.assertEqual(resp.url, self.REDIRECT_CHAIN[i])
self.assertEqual(resp.text, self.DEFAULT_REDIRECT_BODY)
def test_history_matches_requests(self):
self.setup_redirects(status_code=301)
session = client_session.Session(redirect=True)
req_resp = requests.get(self.REDIRECT_CHAIN[0],
allow_redirects=True)
ses_resp = session.get(self.REDIRECT_CHAIN[0])
self.assertEqual(len(req_resp.history), len(ses_resp.history))
for r, s in zip(req_resp.history, ses_resp.history):
self.assertEqual(r.url, s.url)
self.assertEqual(r.status_code, s.status_code)
class ConstructSessionFromArgsTests(utils.TestCase):
KEY = 'keyfile'
CERT = 'certfile'
CACERT = 'cacert-path'
def _s(self, k=None, **kwargs):
k = k or kwargs
with self.deprecations.expect_deprecations_here():
return client_session.Session.construct(k)
def test_verify(self):
self.assertFalse(self._s(insecure=True).verify)
self.assertTrue(self._s(verify=True, insecure=True).verify)
self.assertFalse(self._s(verify=False, insecure=True).verify)
self.assertEqual(self._s(cacert=self.CACERT).verify, self.CACERT)
def test_cert(self):
tup = (self.CERT, self.KEY)
self.assertEqual(self._s(cert=tup).cert, tup)
self.assertEqual(self._s(cert=self.CERT, key=self.KEY).cert, tup)
self.assertIsNone(self._s(key=self.KEY).cert)
def test_pass_through(self):
value = 42 # only a number because timeout needs to be
for key in ['timeout', 'session', 'original_ip', 'user_agent']:
args = {key: value}
self.assertEqual(getattr(self._s(args), key), value)
self.assertNotIn(key, args)
class AuthPlugin(base.BaseAuthPlugin):
"""Very simple debug authentication plugin.
Takes Parameters such that it can throw exceptions at the right times.
"""
TEST_TOKEN = utils.TestCase.TEST_TOKEN
TEST_USER_ID = 'aUser'
TEST_PROJECT_ID = 'aProject'
SERVICE_URLS = {
'identity': {'public': 'http://identity-public:1111/v2.0',
'admin': 'http://identity-admin:1111/v2.0'},
'compute': {'public': 'http://compute-public:2222/v1.0',
'admin': 'http://compute-admin:2222/v1.0'},
'image': {'public': 'http://image-public:3333/v2.0',
'admin': 'http://image-admin:3333/v2.0'}
}
def __init__(self, token=TEST_TOKEN, invalidate=True):
self.token = token
self._invalidate = invalidate
def get_token(self, session):
return self.token
def get_endpoint(self, session, service_type=None, interface=None,
**kwargs):
try:
return self.SERVICE_URLS[service_type][interface]
except (KeyError, AttributeError):
return None
def invalidate(self):
return self._invalidate
def get_user_id(self, session):
return self.TEST_USER_ID
def get_project_id(self, session):
return self.TEST_PROJECT_ID
class CalledAuthPlugin(base.BaseAuthPlugin):
ENDPOINT = 'http://fakeendpoint/'
def __init__(self, invalidate=True):
self.get_token_called = False
self.get_endpoint_called = False
self.endpoint_arguments = {}
self.invalidate_called = False
self._invalidate = invalidate
def get_token(self, session):
self.get_token_called = True
return utils.TestCase.TEST_TOKEN
def get_endpoint(self, session, **kwargs):
self.get_endpoint_called = True
self.endpoint_arguments = kwargs
return self.ENDPOINT
def invalidate(self):
self.invalidate_called = True
return self._invalidate
class SessionAuthTests(utils.TestCase):
TEST_URL = 'http://127.0.0.1:5000/'
TEST_JSON = {'hello': 'world'}
def setUp(self):
super(SessionAuthTests, self).setUp()
self.deprecations.expect_deprecations()
def stub_service_url(self, service_type, interface, path,
method='GET', **kwargs):
base_url = AuthPlugin.SERVICE_URLS[service_type][interface]
uri = "%s/%s" % (base_url.rstrip('/'), path.lstrip('/'))
self.requests_mock.register_uri(method, uri, **kwargs)
def test_auth_plugin_default_with_plugin(self):
self.stub_url('GET', base_url=self.TEST_URL, json=self.TEST_JSON)
# if there is an auth_plugin then it should default to authenticated
auth = AuthPlugin()
sess = client_session.Session(auth=auth)
resp = sess.get(self.TEST_URL)
self.assertDictEqual(resp.json(), self.TEST_JSON)
self.assertRequestHeaderEqual('X-Auth-Token', AuthPlugin.TEST_TOKEN)
def test_auth_plugin_disable(self):
self.stub_url('GET', base_url=self.TEST_URL, json=self.TEST_JSON)
auth = AuthPlugin()
sess = client_session.Session(auth=auth)
resp = sess.get(self.TEST_URL, authenticated=False)
self.assertDictEqual(resp.json(), self.TEST_JSON)
self.assertRequestHeaderEqual('X-Auth-Token', None)
def test_service_type_urls(self):
service_type = 'compute'
interface = 'public'
path = '/instances'
status = 200
body = 'SUCCESS'
self.stub_service_url(service_type=service_type,
interface=interface,
path=path,
status_code=status,
text=body)
sess = client_session.Session(auth=AuthPlugin())
resp = sess.get(path,
endpoint_filter={'service_type': service_type,
'interface': interface})
self.assertEqual(self.requests_mock.last_request.url,
AuthPlugin.SERVICE_URLS['compute']['public'] + path)
self.assertEqual(resp.text, body)
self.assertEqual(resp.status_code, status)
def test_service_url_raises_if_no_auth_plugin(self):
sess = client_session.Session()
self.assertRaises(exceptions.MissingAuthPlugin,
sess.get, '/path',
endpoint_filter={'service_type': 'compute',
'interface': 'public'})
def test_service_url_raises_if_no_url_returned(self):
sess = client_session.Session(auth=AuthPlugin())
self.assertRaises(exceptions.EndpointNotFound,
sess.get, '/path',
endpoint_filter={'service_type': 'unknown',
'interface': 'public'})
def test_raises_exc_only_when_asked(self):
# A request that returns a HTTP error should by default raise an
# exception by default, if you specify raise_exc=False then it will not
self.requests_mock.get(self.TEST_URL, status_code=401)
sess = client_session.Session()
self.assertRaises(exceptions.Unauthorized, sess.get, self.TEST_URL)
resp = sess.get(self.TEST_URL, raise_exc=False)
self.assertEqual(401, resp.status_code)
def test_passed_auth_plugin(self):
passed = CalledAuthPlugin()
sess = client_session.Session()
self.requests_mock.get(CalledAuthPlugin.ENDPOINT + 'path',
status_code=200)
endpoint_filter = {'service_type': 'identity'}
# no plugin with authenticated won't work
self.assertRaises(exceptions.MissingAuthPlugin, sess.get, 'path',
authenticated=True)
# no plugin with an endpoint filter won't work
self.assertRaises(exceptions.MissingAuthPlugin, sess.get, 'path',
authenticated=False, endpoint_filter=endpoint_filter)
resp = sess.get('path', auth=passed, endpoint_filter=endpoint_filter)
self.assertEqual(200, resp.status_code)
self.assertTrue(passed.get_endpoint_called)
self.assertTrue(passed.get_token_called)
def test_passed_auth_plugin_overrides(self):
fixed = CalledAuthPlugin()
passed = CalledAuthPlugin()
sess = client_session.Session(fixed)
self.requests_mock.get(CalledAuthPlugin.ENDPOINT + 'path',
status_code=200)
resp = sess.get('path', auth=passed,
endpoint_filter={'service_type': 'identity'})
self.assertEqual(200, resp.status_code)
self.assertTrue(passed.get_endpoint_called)
self.assertTrue(passed.get_token_called)
self.assertFalse(fixed.get_endpoint_called)
self.assertFalse(fixed.get_token_called)
def test_requests_auth_plugin(self):
sess = client_session.Session()
requests_auth = object()
FAKE_RESP = utils.test_response(text='resp')
RESP = mock.Mock(return_value=FAKE_RESP)
with mock.patch.object(sess.session, 'request', RESP) as mocked:
sess.get(self.TEST_URL, requests_auth=requests_auth)
mocked.assert_called_once_with('GET', self.TEST_URL,
headers=mock.ANY,
allow_redirects=mock.ANY,
auth=requests_auth,
verify=mock.ANY)
def test_reauth_called(self):
auth = CalledAuthPlugin(invalidate=True)
sess = client_session.Session(auth=auth)
self.requests_mock.get(self.TEST_URL,
[{'text': 'Failed', 'status_code': 401},
{'text': 'Hello', 'status_code': 200}])
# allow_reauth=True is the default
resp = sess.get(self.TEST_URL, authenticated=True)
self.assertEqual(200, resp.status_code)
self.assertEqual('Hello', resp.text)
self.assertTrue(auth.invalidate_called)
def test_reauth_not_called(self):
auth = CalledAuthPlugin(invalidate=True)
sess = client_session.Session(auth=auth)
self.requests_mock.get(self.TEST_URL,
[{'text': 'Failed', 'status_code': 401},
{'text': 'Hello', 'status_code': 200}])
self.assertRaises(exceptions.Unauthorized, sess.get, self.TEST_URL,
authenticated=True, allow_reauth=False)
self.assertFalse(auth.invalidate_called)
def test_endpoint_override_overrides_filter(self):
auth = CalledAuthPlugin()
sess = client_session.Session(auth=auth)
override_base = 'http://mytest/'
path = 'path'
override_url = override_base + path
resp_text = uuid.uuid4().hex
self.requests_mock.get(override_url, text=resp_text)
resp = sess.get(path,
endpoint_override=override_base,
endpoint_filter={'service_type': 'identity'})
self.assertEqual(resp_text, resp.text)
self.assertEqual(override_url, self.requests_mock.last_request.url)
self.assertTrue(auth.get_token_called)
self.assertFalse(auth.get_endpoint_called)
def test_endpoint_override_ignore_full_url(self):
auth = CalledAuthPlugin()
sess = client_session.Session(auth=auth)
path = 'path'
url = self.TEST_URL + path
resp_text = uuid.uuid4().hex
self.requests_mock.get(url, text=resp_text)
resp = sess.get(url,
endpoint_override='http://someother.url',
endpoint_filter={'service_type': 'identity'})
self.assertEqual(resp_text, resp.text)
self.assertEqual(url, self.requests_mock.last_request.url)
self.assertTrue(auth.get_token_called)
self.assertFalse(auth.get_endpoint_called)
def test_user_and_project_id(self):
auth = AuthPlugin()
sess = client_session.Session(auth=auth)
self.assertEqual(auth.TEST_USER_ID, sess.get_user_id())
self.assertEqual(auth.TEST_PROJECT_ID, sess.get_project_id())
def test_logger_object_passed(self):
logger = logging.getLogger(uuid.uuid4().hex)
logger.setLevel(logging.DEBUG)
logger.propagate = False
io = six.StringIO()
handler = logging.StreamHandler(io)
logger.addHandler(handler)
auth = AuthPlugin()
sess = client_session.Session(auth=auth)
response = uuid.uuid4().hex
self.stub_url('GET',
text=response,
headers={'Content-Type': 'text/html'})
resp = sess.get(self.TEST_URL, logger=logger)
self.assertEqual(response, resp.text)
output = io.getvalue()
self.assertIn(self.TEST_URL, output)
self.assertIn(response, output)
self.assertNotIn(self.TEST_URL, self.logger.output)
self.assertNotIn(response, self.logger.output)
class AdapterTest(utils.TestCase):
SERVICE_TYPE = uuid.uuid4().hex
SERVICE_NAME = uuid.uuid4().hex
INTERFACE = uuid.uuid4().hex
REGION_NAME = uuid.uuid4().hex
USER_AGENT = uuid.uuid4().hex
VERSION = uuid.uuid4().hex
TEST_URL = CalledAuthPlugin.ENDPOINT
def setUp(self):
super(AdapterTest, self).setUp()
self.deprecations.expect_deprecations()
def _create_loaded_adapter(self):
auth = CalledAuthPlugin()
sess = client_session.Session()
return adapter.Adapter(sess,
auth=auth,
service_type=self.SERVICE_TYPE,
service_name=self.SERVICE_NAME,
interface=self.INTERFACE,
region_name=self.REGION_NAME,
user_agent=self.USER_AGENT,
version=self.VERSION)
def _verify_endpoint_called(self, adpt):
self.assertEqual(self.SERVICE_TYPE,
adpt.auth.endpoint_arguments['service_type'])
self.assertEqual(self.SERVICE_NAME,
adpt.auth.endpoint_arguments['service_name'])
self.assertEqual(self.INTERFACE,
adpt.auth.endpoint_arguments['interface'])
self.assertEqual(self.REGION_NAME,
adpt.auth.endpoint_arguments['region_name'])
self.assertEqual(self.VERSION,
adpt.auth.endpoint_arguments['version'])
def test_setting_variables_on_request(self):
response = uuid.uuid4().hex
self.stub_url('GET', text=response)
adpt = self._create_loaded_adapter()
resp = adpt.get('/')
self.assertEqual(resp.text, response)
self._verify_endpoint_called(adpt)
self.assertTrue(adpt.auth.get_token_called)
self.assertRequestHeaderEqual('User-Agent', self.USER_AGENT)
def test_setting_variables_on_get_endpoint(self):
adpt = self._create_loaded_adapter()
url = adpt.get_endpoint()
self.assertEqual(self.TEST_URL, url)
self._verify_endpoint_called(adpt)
def test_legacy_binding(self):
key = uuid.uuid4().hex
val = uuid.uuid4().hex
response = jsonutils.dumps({key: val})
self.stub_url('GET', text=response)
auth = CalledAuthPlugin()
sess = client_session.Session(auth=auth)
adpt = adapter.LegacyJsonAdapter(sess,
service_type=self.SERVICE_TYPE,
user_agent=self.USER_AGENT)
resp, body = adpt.get('/')
self.assertEqual(self.SERVICE_TYPE,
auth.endpoint_arguments['service_type'])
self.assertEqual(resp.text, response)
self.assertEqual(val, body[key])
def test_legacy_binding_non_json_resp(self):
response = uuid.uuid4().hex
self.stub_url('GET', text=response,
headers={'Content-Type': 'text/html'})
auth = CalledAuthPlugin()
sess = client_session.Session(auth=auth)
adpt = adapter.LegacyJsonAdapter(sess,
service_type=self.SERVICE_TYPE,
user_agent=self.USER_AGENT)
resp, body = adpt.get('/')
self.assertEqual(self.SERVICE_TYPE,
auth.endpoint_arguments['service_type'])
self.assertEqual(resp.text, response)
self.assertIsNone(body)
def test_methods(self):
sess = client_session.Session()
adpt = adapter.Adapter(sess)
url = 'http://url'
for method in ['get', 'head', 'post', 'put', 'patch', 'delete']:
with mock.patch.object(adpt, 'request') as m:
getattr(adpt, method)(url)
m.assert_called_once_with(url, method.upper())
def test_setting_endpoint_override(self):
endpoint_override = 'http://overrideurl'
path = '/path'
endpoint_url = endpoint_override + path
auth = CalledAuthPlugin()
sess = client_session.Session(auth=auth)
adpt = adapter.Adapter(sess, endpoint_override=endpoint_override)
response = uuid.uuid4().hex
self.requests_mock.get(endpoint_url, text=response)
resp = adpt.get(path)
self.assertEqual(response, resp.text)
self.assertEqual(endpoint_url, self.requests_mock.last_request.url)
self.assertEqual(endpoint_override, adpt.get_endpoint())
def test_adapter_invalidate(self):
auth = CalledAuthPlugin()
sess = client_session.Session()
adpt = adapter.Adapter(sess, auth=auth)
adpt.invalidate()
self.assertTrue(auth.invalidate_called)
def test_adapter_get_token(self):
auth = CalledAuthPlugin()
sess = client_session.Session()
adpt = adapter.Adapter(sess, auth=auth)
self.assertEqual(self.TEST_TOKEN, adpt.get_token())
self.assertTrue(auth.get_token_called)
def test_adapter_connect_retries(self):
retries = 2
sess = client_session.Session()
adpt = adapter.Adapter(sess, connect_retries=retries)
def _refused_error(request, context):
raise requests.exceptions.ConnectionError()
self.stub_url('GET', text=_refused_error)
with mock.patch('time.sleep') as m:
self.assertRaises(exceptions.ConnectionRefused,
adpt.get, self.TEST_URL)
self.assertEqual(retries, m.call_count)
# we count retries so there will be one initial request + 2 retries
self.assertThat(self.requests_mock.request_history,
matchers.HasLength(retries + 1))
def test_user_and_project_id(self):
auth = AuthPlugin()
sess = client_session.Session()
adpt = adapter.Adapter(sess, auth=auth)
self.assertEqual(auth.TEST_USER_ID, adpt.get_user_id())
self.assertEqual(auth.TEST_PROJECT_ID, adpt.get_project_id())
def test_logger_object_passed(self):
logger = logging.getLogger(uuid.uuid4().hex)
logger.setLevel(logging.DEBUG)
logger.propagate = False
io = six.StringIO()
handler = logging.StreamHandler(io)
logger.addHandler(handler)
auth = AuthPlugin()
sess = client_session.Session(auth=auth)
adpt = adapter.Adapter(sess, auth=auth, logger=logger)
response = uuid.uuid4().hex
self.stub_url('GET', text=response,
headers={'Content-Type': 'text/html'})
resp = adpt.get(self.TEST_URL, logger=logger)
self.assertEqual(response, resp.text)
output = io.getvalue()
self.assertIn(self.TEST_URL, output)
self.assertIn(response, output)
self.assertNotIn(self.TEST_URL, self.logger.output)
self.assertNotIn(response, self.logger.output)
class ConfLoadingTests(utils.TestCase):
GROUP = 'sessiongroup'
def setUp(self):
super(ConfLoadingTests, self).setUp()
self.conf_fixture = self.useFixture(config.Config())
client_session.Session.register_conf_options(self.conf_fixture.conf,
self.GROUP)
def config(self, **kwargs):
kwargs['group'] = self.GROUP
self.conf_fixture.config(**kwargs)
def get_session(self, **kwargs):
with self.deprecations.expect_deprecations_here():
return client_session.Session.load_from_conf_options(
self.conf_fixture.conf,
self.GROUP,
**kwargs)
def test_insecure_timeout(self):
self.config(insecure=True, timeout=5)
s = self.get_session()
self.assertFalse(s.verify)
self.assertEqual(5, s.timeout)
def test_client_certs(self):
cert = '/path/to/certfile'
key = '/path/to/keyfile'
self.config(certfile=cert, keyfile=key)
s = self.get_session()
self.assertTrue(s.verify)
self.assertEqual((cert, key), s.cert)
def test_cacert(self):
cafile = '/path/to/cacert'
self.config(cafile=cafile)
s = self.get_session()
self.assertEqual(cafile, s.verify)
def test_deprecated(self):
def new_deprecated():
return cfg.DeprecatedOpt(uuid.uuid4().hex, group=uuid.uuid4().hex)
opt_names = ['cafile', 'certfile', 'keyfile', 'insecure', 'timeout']
depr = dict([(n, [new_deprecated()]) for n in opt_names])
opts = client_session.Session.get_conf_options(deprecated_opts=depr)
self.assertThat(opt_names, matchers.HasLength(len(opts)))
for opt in opts:
self.assertIn(depr[opt.name][0], opt.deprecated_opts)
class CliLoadingTests(utils.TestCase):
def setUp(self):
super(CliLoadingTests, self).setUp()
self.parser = argparse.ArgumentParser()
client_session.Session.register_cli_options(self.parser)
def get_session(self, val, **kwargs):
args = self.parser.parse_args(val.split())
with self.deprecations.expect_deprecations_here():
return client_session.Session.load_from_cli_options(args, **kwargs)
def test_insecure_timeout(self):
s = self.get_session('--insecure --timeout 5.5')
self.assertFalse(s.verify)
self.assertEqual(5.5, s.timeout)
def test_client_certs(self):
cert = '/path/to/certfile'
key = '/path/to/keyfile'
s = self.get_session('--os-cert %s --os-key %s' % (cert, key))
self.assertTrue(s.verify)
self.assertEqual((cert, key), s.cert)
def test_cacert(self):
cacert = '/path/to/cacert'
s = self.get_session('--os-cacert %s' % cacert)
self.assertEqual(cacert, s.verify)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import random
import os
import sys
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from test_imperative_base import new_program_scope
from paddle.fluid.dygraph.base import to_variable
# Can use Amusic dataset as the DeepCF describes.
DATA_PATH = os.environ.get('DATA_PATH', '')
BATCH_SIZE = int(os.environ.get('BATCH_SIZE', 128))
NUM_BATCHES = int(os.environ.get('NUM_BATCHES', 5))
NUM_EPOCHES = int(os.environ.get('NUM_EPOCHES', 1))
class DMF(fluid.Layer):
def __init__(self, name_scope):
super(DMF, self).__init__(name_scope)
self._user_latent = fluid.FC(self.full_name(), 256)
self._item_latent = fluid.FC(self.full_name(), 256)
self._user_layers = []
self._item_layers = []
self._hid_sizes = [128, 64]
for i in range(len(self._hid_sizes)):
self._user_layers.append(
self.add_sublayer(
'user_layer_%d' % i,
fluid.FC(self.full_name(), self._hid_sizes[i], act='relu')))
self._item_layers.append(
self.add_sublayer(
'item_layer_%d' % i,
fluid.FC(self.full_name(), self._hid_sizes[i], act='relu')))
def forward(self, users, items):
users = self._user_latent(users)
items = self._item_latent(items)
for ul, il in zip(self._user_layers, self._item_layers):
users = ul(users)
items = il(items)
return fluid.layers.elementwise_mul(users, items)
class MLP(fluid.Layer):
def __init__(self, name_scope):
super(MLP, self).__init__(name_scope)
self._user_latent = fluid.FC(self.full_name(), 256)
self._item_latent = fluid.FC(self.full_name(), 256)
self._match_layers = []
self._hid_sizes = [128, 64]
for i in range(len(self._hid_sizes)):
self._match_layers.append(
self.add_sublayer(
'match_layer_%d' % i,
fluid.FC(self.full_name(), self._hid_sizes[i], act='relu')))
def forward(self, users, items):
users = self._user_latent(users)
items = self._item_latent(items)
match_vec = fluid.layers.concat(
[users, items], axis=len(users.shape) - 1)
for l in self._match_layers:
match_vec = l(match_vec)
return match_vec
class DeepCF(fluid.Layer):
def __init__(self, name_scope, num_users, num_items, matrix):
super(DeepCF, self).__init__(name_scope)
self._num_users = num_users
self._num_items = num_items
self._rating_matrix = self.create_parameter(
fluid.ParamAttr(trainable=False),
matrix.shape,
matrix.dtype,
is_bias=False,
default_initializer=fluid.initializer.NumpyArrayInitializer(matrix))
self._rating_matrix.stop_gradient = True
self._mlp = MLP(self.full_name())
self._dmf = DMF(self.full_name())
self._match_fc = fluid.FC(self.full_name(), 1, act='sigmoid')
def forward(self, users, items):
# users_emb = self._user_emb(users)
# items_emb = self._item_emb(items)
users_emb = fluid.layers.gather(self._rating_matrix, users)
items_emb = fluid.layers.gather(
fluid.layers.transpose(self._rating_matrix, [1, 0]), items)
users_emb.stop_gradient = True
items_emb.stop_gradient = True
mlp_predictive = self._mlp(users_emb, items_emb)
dmf_predictive = self._dmf(users_emb, items_emb)
predictive = fluid.layers.concat(
[mlp_predictive, dmf_predictive],
axis=len(mlp_predictive.shape) - 1)
prediction = self._match_fc(predictive)
return prediction
def get_data():
user_ids = []
item_ids = []
labels = []
NUM_USERS = 100
NUM_ITEMS = 1000
matrix = np.zeros([NUM_USERS, NUM_ITEMS], dtype=np.float32)
for uid in range(NUM_USERS):
for iid in range(NUM_ITEMS):
label = float(random.randint(1, 6) == 1)
user_ids.append(uid)
item_ids.append(iid)
labels.append(label)
matrix[uid, iid] = label
indices = np.arange(len(user_ids))
np.random.shuffle(indices)
users_np = np.array(user_ids, dtype=np.int32)[indices]
items_np = np.array(item_ids, dtype=np.int32)[indices]
labels_np = np.array(labels, dtype=np.float32)[indices]
return np.expand_dims(users_np, -1), \
np.expand_dims(items_np, -1), \
np.expand_dims(labels_np, -1), NUM_USERS, NUM_ITEMS, matrix
def load_data(DATA_PATH):
sys.stderr.write('loading from %s\n' % DATA_PATH)
likes = dict()
num_users = -1
num_items = -1
with open(DATA_PATH, 'r') as f:
for l in f.readlines():
uid, iid, rating = [int(v) for v in l.split('\t')]
num_users = max(num_users, uid + 1)
num_items = max(num_items, iid + 1)
if float(rating) > 0.0:
likes[(uid, iid)] = 1.0
user_ids = []
item_ids = []
labels = []
matrix = np.zeros([num_users, num_items], dtype=np.float32)
for uid, iid in likes.keys():
user_ids.append(uid)
item_ids.append(iid)
labels.append(1.0)
matrix[uid, iid] = 1.0
negative = 0
while negative < 3:
nuid = random.randint(0, num_users - 1)
niid = random.randint(0, num_items - 1)
if (nuid, niid) not in likes:
negative += 1
user_ids.append(nuid)
item_ids.append(niid)
labels.append(0.0)
indices = np.arange(len(user_ids))
np.random.shuffle(indices)
users_np = np.array(user_ids, dtype=np.int32)[indices]
items_np = np.array(item_ids, dtype=np.int32)[indices]
labels_np = np.array(labels, dtype=np.float32)[indices]
return np.expand_dims(users_np, -1), \
np.expand_dims(items_np, -1), \
np.expand_dims(labels_np, -1), num_users, num_items, matrix
class TestDygraphDeepCF(unittest.TestCase):
def test_deefcf(self):
seed = 90
if DATA_PATH:
(users_np, items_np, labels_np, num_users, num_items,
matrix) = load_data(DATA_PATH)
else:
(users_np, items_np, labels_np, num_users, num_items,
matrix) = get_data()
startup = fluid.Program()
startup.random_seed = seed
main = fluid.Program()
main.random_seed = seed
scope = fluid.core.Scope()
with new_program_scope(main=main, startup=startup, scope=scope):
users = fluid.layers.data('users', [1], dtype='int32')
items = fluid.layers.data('items', [1], dtype='int32')
labels = fluid.layers.data('labels', [1], dtype='float32')
deepcf = DeepCF('deepcf', num_users, num_items, matrix)
prediction = deepcf(users, items)
loss = fluid.layers.reduce_sum(
fluid.layers.log_loss(prediction, labels))
adam = fluid.optimizer.AdamOptimizer(0.01)
adam.minimize(loss)
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
exe.run(startup)
for e in range(NUM_EPOCHES):
sys.stderr.write('epoch %d\n' % e)
for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE):
if slice + BATCH_SIZE >= users_np.shape[0]:
break
static_loss = exe.run(
main,
feed={
users.name: users_np[slice:slice + BATCH_SIZE],
items.name: items_np[slice:slice + BATCH_SIZE],
labels.name: labels_np[slice:slice + BATCH_SIZE]
},
fetch_list=[loss])[0]
sys.stderr.write('static loss %s\n' % static_loss)
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
deepcf = DeepCF('deepcf', num_users, num_items, matrix)
adam = fluid.optimizer.AdamOptimizer(0.01)
for e in range(NUM_EPOCHES):
sys.stderr.write('epoch %d\n' % e)
for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE):
if slice + BATCH_SIZE >= users_np.shape[0]:
break
prediction = deepcf(
to_variable(users_np[slice:slice + BATCH_SIZE]),
to_variable(items_np[slice:slice + BATCH_SIZE]))
loss = fluid.layers.reduce_sum(
fluid.layers.log_loss(prediction,
to_variable(labels_np[
slice:slice + BATCH_SIZE])))
loss.backward()
adam.minimize(loss)
deepcf.clear_gradients()
dy_loss = loss.numpy()
sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss))
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
deepcf2 = DeepCF('deepcf', num_users, num_items, matrix)
adam2 = fluid.optimizer.AdamOptimizer(0.01)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
for e in range(NUM_EPOCHES):
sys.stderr.write('epoch %d\n' % e)
for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE):
if slice + BATCH_SIZE >= users_np.shape[0]:
break
prediction2 = deepcf2(
to_variable(users_np[slice:slice + BATCH_SIZE]),
to_variable(items_np[slice:slice + BATCH_SIZE]))
loss2 = fluid.layers.reduce_sum(
fluid.layers.log_loss(prediction2,
to_variable(labels_np[
slice:slice + BATCH_SIZE])))
loss2.backward(backward_strategy)
adam2.minimize(loss2)
deepcf2.clear_gradients()
dy_loss2 = loss2.numpy()
sys.stderr.write('dynamic loss: %s %s\n' %
(slice, dy_loss2))
self.assertEqual(static_loss, dy_loss)
self.assertEqual(static_loss, dy_loss2)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or CoinsBazar-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/CoinsBazar/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "CoinsBazar")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUGlobalMetadata(NURESTObject):
""" Represents a GlobalMetadata in the VSD
Notes:
Metadata associated to a entity.
"""
__rest_name__ = "globalmetadata"
__resource_name__ = "globalmetadatas"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a GlobalMetadata instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> globalmetadata = NUGlobalMetadata(id=u'xxxx-xxx-xxx-xxx', name=u'GlobalMetadata')
>>> globalmetadata = NUGlobalMetadata(data=my_dict)
"""
super(NUGlobalMetadata, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._description = None
self._metadata_tag_ids = None
self._network_notification_disabled = None
self._blob = None
self._global_metadata = None
self._entity_scope = None
self._assoc_entity_type = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="metadata_tag_ids", remote_name="metadataTagIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="network_notification_disabled", remote_name="networkNotificationDisabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="blob", remote_name="blob", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="global_metadata", remote_name="global", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="assoc_entity_type", remote_name="assocEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
name of the Metadata.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
name of the Metadata.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def description(self):
""" Get description value.
Notes:
Description of the Metadata.
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the Metadata.
"""
self._description = value
@property
def metadata_tag_ids(self):
""" Get metadata_tag_ids value.
Notes:
metadata tag IDs associated with this metadata you can filter metadata based on this attribute for example X-Nuage-Filter: '2d6fb627-603b-421c-b63a-eb0a6d712761' IN metadataTagIDs
This attribute is named `metadataTagIDs` in VSD API.
"""
return self._metadata_tag_ids
@metadata_tag_ids.setter
def metadata_tag_ids(self, value):
""" Set metadata_tag_ids value.
Notes:
metadata tag IDs associated with this metadata you can filter metadata based on this attribute for example X-Nuage-Filter: '2d6fb627-603b-421c-b63a-eb0a6d712761' IN metadataTagIDs
This attribute is named `metadataTagIDs` in VSD API.
"""
self._metadata_tag_ids = value
@property
def network_notification_disabled(self):
""" Get network_notification_disabled value.
Notes:
specifies metadata changes need to be notified to controller,by default it is notified
This attribute is named `networkNotificationDisabled` in VSD API.
"""
return self._network_notification_disabled
@network_notification_disabled.setter
def network_notification_disabled(self, value):
""" Set network_notification_disabled value.
Notes:
specifies metadata changes need to be notified to controller,by default it is notified
This attribute is named `networkNotificationDisabled` in VSD API.
"""
self._network_notification_disabled = value
@property
def blob(self):
""" Get blob value.
Notes:
Metadata that describes about the entity attached to it.
"""
return self._blob
@blob.setter
def blob(self, value):
""" Set blob value.
Notes:
Metadata that describes about the entity attached to it.
"""
self._blob = value
@property
def global_metadata(self):
""" Get global_metadata value.
Notes:
specifies metadata is global or local
This attribute is named `global` in VSD API.
"""
return self._global_metadata
@global_metadata.setter
def global_metadata(self, value):
""" Set global_metadata value.
Notes:
specifies metadata is global or local
This attribute is named `global` in VSD API.
"""
self._global_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def assoc_entity_type(self):
""" Get assoc_entity_type value.
Notes:
Type of the entity to which the Profile belongs to.
This attribute is named `assocEntityType` in VSD API.
"""
return self._assoc_entity_type
@assoc_entity_type.setter
def assoc_entity_type(self, value):
""" Set assoc_entity_type value.
Notes:
Type of the entity to which the Profile belongs to.
This attribute is named `assocEntityType` in VSD API.
"""
self._assoc_entity_type = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
|
# -*- coding: utf-8 -*-
from __future__ import division
import copy
import functools
import logging
import math
import re
import unicodedata
from framework import sentry
import six
from django.apps import apps
from django.core.paginator import Paginator
from elasticsearch import (ConnectionError, Elasticsearch, NotFoundError,
RequestError, TransportError, helpers)
from framework.celery_tasks import app as celery_app
from framework.mongo.utils import paginated
from modularodm import Q
from osf.models import AbstractNode as Node
from osf.models import OSFUser as User
from osf.models import BaseFileNode
from osf.models import Institution
from website import settings
from website.filters import gravatar
from website.project.licenses import serialize_node_license_record
from website.search import exceptions
from website.search.util import build_query, clean_splitters
from website.util import sanitize
from website.views import validate_page_num
logger = logging.getLogger(__name__)
# These are the doc_types that exist in the search database
ALIASES = {
'project': 'Projects',
'component': 'Components',
'registration': 'Registrations',
'user': 'Users',
'total': 'Total',
'file': 'Files',
'institution': 'Institutions',
}
DOC_TYPE_TO_MODEL = {
'component': Node,
'project': Node,
'registration': Node,
'user': User,
'file': BaseFileNode,
'institution': Institution
}
# Prevent tokenizing and stop word removal.
NOT_ANALYZED_PROPERTY = {'type': 'string', 'index': 'not_analyzed'}
# Perform stemming on the field it's applied to.
ENGLISH_ANALYZER_PROPERTY = {'type': 'string', 'analyzer': 'english'}
INDEX = settings.ELASTIC_INDEX
CLIENT = None
def client():
global CLIENT
if CLIENT is None:
try:
CLIENT = Elasticsearch(
settings.ELASTIC_URI,
request_timeout=settings.ELASTIC_TIMEOUT,
retry_on_timeout=True
)
logging.getLogger('elasticsearch').setLevel(logging.WARN)
logging.getLogger('elasticsearch.trace').setLevel(logging.WARN)
logging.getLogger('urllib3').setLevel(logging.WARN)
logging.getLogger('requests').setLevel(logging.WARN)
CLIENT.cluster.health(wait_for_status='yellow')
except ConnectionError:
message = (
'The SEARCH_ENGINE setting is set to "elastic", but there '
'was a problem starting the elasticsearch interface. Is '
'elasticsearch running?'
)
if settings.SENTRY_DSN:
try:
sentry.log_exception()
sentry.log_message(message)
except AssertionError: # App has not yet been initialized
logger.exception(message)
else:
logger.error(message)
exit(1)
return CLIENT
def requires_search(func):
def wrapped(*args, **kwargs):
if client() is not None:
try:
return func(*args, **kwargs)
except ConnectionError:
raise exceptions.SearchUnavailableError('Could not connect to elasticsearch')
except NotFoundError as e:
raise exceptions.IndexNotFoundError(e.error)
except RequestError as e:
if 'ParseException' in e.error: # ES 1.5
raise exceptions.MalformedQueryError(e.error)
if type(e.error) == dict: # ES 2.0
try:
root_cause = e.error['root_cause'][0]
if root_cause['type'] == 'query_parsing_exception':
raise exceptions.MalformedQueryError(root_cause['reason'])
except (AttributeError, KeyError):
pass
raise exceptions.SearchException(e.error)
except TransportError as e:
# Catch and wrap generic uncaught ES error codes. TODO: Improve fix for https://openscience.atlassian.net/browse/OSF-4538
raise exceptions.SearchException(e.error)
sentry.log_message('Elastic search action failed. Is elasticsearch running?')
raise exceptions.SearchUnavailableError('Failed to connect to elasticsearch')
return wrapped
@requires_search
def get_aggregations(query, doc_type):
query['aggregations'] = {
'licenses': {
'terms': {
'field': 'license.id'
}
}
}
res = client().search(index=INDEX, doc_type=doc_type, search_type='count', body=query)
ret = {
doc_type: {
item['key']: item['doc_count']
for item in agg['buckets']
}
for doc_type, agg in res['aggregations'].iteritems()
}
ret['total'] = res['hits']['total']
return ret
@requires_search
def get_counts(count_query, clean=True):
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
res = client().search(index=INDEX, doc_type=None, search_type='count', body=count_query)
counts = {x['key']: x['doc_count'] for x in res['aggregations']['counts']['buckets'] if x['key'] in ALIASES.keys()}
counts['total'] = sum([val for val in counts.values()])
return counts
@requires_search
def get_tags(query, index):
query['aggregations'] = {
'tag_cloud': {
'terms': {'field': 'tags'}
}
}
results = client().search(index=index, doc_type=None, body=query)
tags = results['aggregations']['tag_cloud']['buckets']
return tags
@requires_search
def search(query, index=None, doc_type='_all', raw=False):
"""Search for a query
:param query: The substring of the username/project name/tag to search for
:param index:
:param doc_type:
:return: List of dictionaries, each containing the results, counts, tags and typeAliases
results: All results returned by the query, that are within the index and search type
counts: A dictionary in which keys are types and values are counts for that type, e.g, count['total'] is the sum of the other counts
tags: A list of tags that are returned by the search query
typeAliases: the doc_types that exist in the search database
"""
index = index or INDEX
tag_query = copy.deepcopy(query)
aggs_query = copy.deepcopy(query)
count_query = copy.deepcopy(query)
for key in ['from', 'size', 'sort']:
try:
del tag_query[key]
del aggs_query[key]
del count_query[key]
except KeyError:
pass
tags = get_tags(tag_query, index)
try:
del aggs_query['query']['filtered']['filter']
del count_query['query']['filtered']['filter']
except KeyError:
pass
aggregations = get_aggregations(aggs_query, doc_type=doc_type)
counts = get_counts(count_query, index)
# Run the real query and get the results
raw_results = client().search(index=index, doc_type=doc_type, body=query)
results = [hit['_source'] for hit in raw_results['hits']['hits']]
return_value = {
'results': raw_results['hits']['hits'] if raw else format_results(results),
'counts': counts,
'aggs': aggregations,
'tags': tags,
'typeAliases': ALIASES
}
return return_value
def format_results(results):
ret = []
for result in results:
if result.get('category') == 'user':
result['url'] = '/profile/' + result['id']
elif result.get('category') == 'file':
parent_info = load_parent(result.get('parent_id'))
result['parent_url'] = parent_info.get('url') if parent_info else None
result['parent_title'] = parent_info.get('title') if parent_info else None
elif result.get('category') in {'project', 'component', 'registration'}:
result = format_result(result, result.get('parent_id'))
elif not result.get('category'):
continue
ret.append(result)
return ret
def format_result(result, parent_id=None):
parent_info = load_parent(parent_id)
formatted_result = {
'contributors': result['contributors'],
'wiki_link': result['url'] + 'wiki/',
# TODO: Remove unescape_entities when mako html safe comes in
'title': sanitize.unescape_entities(result['title']),
'url': result['url'],
'is_component': False if parent_info is None else True,
'parent_title': sanitize.unescape_entities(parent_info.get('title')) if parent_info else None,
'parent_url': parent_info.get('url') if parent_info is not None else None,
'tags': result['tags'],
'is_registration': (result['is_registration'] if parent_info is None
else parent_info.get('is_registration')),
'is_retracted': result['is_retracted'],
'is_pending_retraction': result['is_pending_retraction'],
'embargo_end_date': result['embargo_end_date'],
'is_pending_embargo': result['is_pending_embargo'],
'description': result['description'],
'category': result.get('category'),
'date_created': result.get('date_created'),
'date_registered': result.get('registered_date'),
'n_wikis': len(result['wikis']),
'license': result.get('license'),
'affiliated_institutions': result.get('affiliated_institutions'),
}
return formatted_result
def load_parent(parent_id):
parent = Node.load(parent_id)
if parent is None:
return None
parent_info = {}
if parent is not None and parent.is_public:
parent_info['title'] = parent.title
parent_info['url'] = parent.url
parent_info['is_registration'] = parent.is_registration
parent_info['id'] = parent._id
else:
parent_info['title'] = '-- private project --'
parent_info['url'] = ''
parent_info['is_registration'] = None
parent_info['id'] = None
return parent_info
COMPONENT_CATEGORIES = set(settings.NODE_CATEGORY_MAP.keys())
def get_doctype_from_node(node):
if node.is_registration:
return 'registration'
elif node.parent_node is None:
# ElasticSearch categorizes top-level projects differently than children
return 'project'
elif node.category in COMPONENT_CATEGORIES:
return 'component'
else:
return node.category
@celery_app.task(bind=True, max_retries=5, default_retry_delay=60)
def update_node_async(self, node_id, index=None, bulk=False):
AbstractNode = apps.get_model('osf.AbstractNode')
node = AbstractNode.load(node_id)
try:
update_node(node=node, index=index, bulk=bulk, async=True)
except Exception as exc:
self.retry(exc=exc)
@celery_app.task(bind=True, max_retries=5, default_retry_delay=60)
def update_user_async(self, user_id, index=None):
OSFUser = apps.get_model('osf.OSFUser')
user = OSFUser.objects.get(id=user_id)
try:
update_user(user, index)
except Exception as exc:
self.retry(exc)
def serialize_node(node, category):
NodeWikiPage = apps.get_model('addons_wiki.NodeWikiPage')
elastic_document = {}
parent_id = node.parent_id
try:
normalized_title = six.u(node.title)
except TypeError:
normalized_title = node.title
normalized_title = unicodedata.normalize('NFKD', normalized_title).encode('ascii', 'ignore')
elastic_document = {
'id': node._id,
'contributors': [
{
'fullname': x['fullname'],
'url': '/{}/'.format(x['guids___id']) if x['is_active'] else None
}
for x in node._contributors.filter(contributor__visible=True).order_by('contributor___order')
.values('fullname', 'guids___id', 'is_active')
],
'title': node.title,
'normalized_title': normalized_title,
'category': category,
'public': node.is_public,
'tags': list(node.tags.filter(system=False).values_list('name', flat=True)),
'description': node.description,
'url': node.url,
'is_registration': node.is_registration,
'is_pending_registration': node.is_pending_registration,
'is_retracted': node.is_retracted,
'is_pending_retraction': node.is_pending_retraction,
'embargo_end_date': node.embargo_end_date.strftime('%A, %b. %d, %Y') if node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo,
'registered_date': node.registered_date,
'wikis': {},
'parent_id': parent_id,
'date_created': node.date_created,
'license': serialize_node_license_record(node.license),
'affiliated_institutions': list(node.affiliated_institutions.values_list('name', flat=True)),
'boost': int(not node.is_registration) + 1, # This is for making registered projects less relevant
'extra_search_terms': clean_splitters(node.title),
}
if not node.is_retracted:
for wiki in NodeWikiPage.objects.filter(guids___id__in=node.wiki_pages_current.values()):
# '.' is not allowed in field names in ES2
elastic_document['wikis'][wiki.page_name.replace('.', ' ')] = wiki.raw_text(node)
return elastic_document
@requires_search
def update_node(node, index=None, bulk=False, async=False):
from addons.osfstorage.models import OsfStorageFile
index = index or INDEX
for file_ in paginated(OsfStorageFile, Q('node', 'eq', node)):
update_file(file_, index=index)
if node.is_deleted or not node.is_public or node.archiving or (node.is_spammy and settings.SPAM_FLAGGED_REMOVE_FROM_SEARCH):
delete_doc(node._id, node, index=index)
else:
category = get_doctype_from_node(node)
elastic_document = serialize_node(node, category)
if bulk:
return elastic_document
else:
client().index(index=index, doc_type=category, id=node._id, body=elastic_document, refresh=True)
def bulk_update_nodes(serialize, nodes, index=None):
"""Updates the list of input projects
:param function Node-> dict serialize:
:param Node[] nodes: Projects, components or registrations
:param str index: Index of the nodes
:return:
"""
index = index or INDEX
actions = []
for node in nodes:
serialized = serialize(node)
if serialized:
actions.append({
'_op_type': 'update',
'_index': index,
'_id': node._id,
'_type': get_doctype_from_node(node),
'doc': serialized,
'doc_as_upsert': True,
})
if actions:
return helpers.bulk(client(), actions)
def serialize_contributors(node):
return {
'contributors': [
{
'fullname': x['user__fullname'],
'url': '/{}/'.format(x['user__guids___id'])
} for x in
node.contributor_set.filter(visible=True, user__is_active=True).order_by('_order').values('user__fullname', 'user__guids___id')
]
}
bulk_update_contributors = functools.partial(bulk_update_nodes, serialize_contributors)
@celery_app.task(bind=True, max_retries=5, default_retry_delay=60)
def update_contributors_async(self, user_id):
OSFUser = apps.get_model('osf.OSFUser')
user = OSFUser.objects.get(id=user_id)
p = Paginator(user.visible_contributor_to.order_by('id'), 100)
for page_num in p.page_range:
bulk_update_contributors(p.page(page_num).object_list)
@requires_search
def update_user(user, index=None):
index = index or INDEX
if not user.is_active:
try:
client().delete(index=index, doc_type='user', id=user._id, refresh=True, ignore=[404])
except NotFoundError:
pass
return
names = dict(
fullname=user.fullname,
given_name=user.given_name,
family_name=user.family_name,
middle_names=user.middle_names,
suffix=user.suffix
)
normalized_names = {}
for key, val in names.items():
if val is not None:
try:
val = six.u(val)
except TypeError:
pass # This is fine, will only happen in 2.x if val is already unicode
normalized_names[key] = unicodedata.normalize('NFKD', val).encode('ascii', 'ignore')
user_doc = {
'id': user._id,
'user': user.fullname,
'normalized_user': normalized_names['fullname'],
'normalized_names': normalized_names,
'names': names,
'job': user.jobs[0]['institution'] if user.jobs else '',
'job_title': user.jobs[0]['title'] if user.jobs else '',
'all_jobs': [job['institution'] for job in user.jobs[1:]],
'school': user.schools[0]['institution'] if user.schools else '',
'all_schools': [school['institution'] for school in user.schools],
'category': 'user',
'degree': user.schools[0]['degree'] if user.schools else '',
'social': user.social_links,
'boost': 2, # TODO(fabianvf): Probably should make this a constant or something
}
client().index(index=index, doc_type='user', body=user_doc, id=user._id, refresh=True)
@requires_search
def update_file(file_, index=None, delete=False):
index = index or INDEX
# TODO: Can remove 'not file_.name' if we remove all base file nodes with name=None
if not file_.name or not file_.node.is_public or delete or file_.node.is_deleted or file_.node.archiving:
client().delete(
index=index,
doc_type='file',
id=file_._id,
refresh=True,
ignore=[404]
)
return
# We build URLs manually here so that this function can be
# run outside of a Flask request context (e.g. in a celery task)
file_deep_url = '/{node_id}/files/{provider}{path}/'.format(
node_id=file_.node._id,
provider=file_.provider,
path=file_.path,
)
node_url = '/{node_id}/'.format(node_id=file_.node._id)
guid_url = None
file_guid = file_.get_guid(create=False)
if file_guid:
guid_url = '/{file_guid}/'.format(file_guid=file_guid._id)
file_doc = {
'id': file_._id,
'deep_url': file_deep_url,
'guid_url': guid_url,
'tags': list(file_.tags.filter(system=False).values_list('name', flat=True)),
'name': file_.name,
'category': 'file',
'node_url': node_url,
'node_title': file_.node.title,
'parent_id': file_.node.parent_node._id if file_.node.parent_node else None,
'is_registration': file_.node.is_registration,
'is_retracted': file_.node.is_retracted,
'extra_search_terms': clean_splitters(file_.name),
}
client().index(
index=index,
doc_type='file',
body=file_doc,
id=file_._id,
refresh=True
)
@requires_search
def update_institution(institution, index=None):
index = index or INDEX
id_ = institution._id
if institution.is_deleted:
client().delete(index=index, doc_type='institution', id=id_, refresh=True, ignore=[404])
else:
institution_doc = {
'id': id_,
'url': '/institutions/{}/'.format(institution._id),
'logo_path': institution.logo_path,
'category': 'institution',
'name': institution.name,
}
client().index(index=index, doc_type='institution', body=institution_doc, id=id_, refresh=True)
@requires_search
def delete_all():
delete_index(INDEX)
@requires_search
def delete_index(index):
client().indices.delete(index, ignore=[404])
@requires_search
def create_index(index=None):
'''Creates index with some specified mappings to begin with,
all of which are applied to all projects, components, and registrations.
'''
index = index or INDEX
document_types = ['project', 'component', 'registration', 'user', 'file', 'institution']
project_like_types = ['project', 'component', 'registration']
analyzed_fields = ['title', 'description']
client().indices.create(index, ignore=[400]) # HTTP 400 if index already exists
for type_ in document_types:
mapping = {
'properties': {
'tags': NOT_ANALYZED_PROPERTY,
'license': {
'properties': {
'id': NOT_ANALYZED_PROPERTY,
'name': NOT_ANALYZED_PROPERTY,
# Elasticsearch automatically infers mappings from content-type. `year` needs to
# be explicitly mapped as a string to allow date ranges, which break on the inferred type
'year': {'type': 'string'},
}
}
}
}
if type_ in project_like_types:
analyzers = {field: ENGLISH_ANALYZER_PROPERTY
for field in analyzed_fields}
mapping['properties'].update(analyzers)
if type_ == 'user':
fields = {
'job': {
'type': 'string',
'boost': '1',
},
'all_jobs': {
'type': 'string',
'boost': '0.01',
},
'school': {
'type': 'string',
'boost': '1',
},
'all_schools': {
'type': 'string',
'boost': '0.01'
},
}
mapping['properties'].update(fields)
client().indices.put_mapping(index=index, doc_type=type_, body=mapping, ignore=[400, 404])
@requires_search
def delete_doc(elastic_document_id, node, index=None, category=None):
index = index or INDEX
category = category or 'registration' if node.is_registration else node.project_or_component
client().delete(index=index, doc_type=category, id=elastic_document_id, refresh=True, ignore=[404])
@requires_search
def search_contributor(query, page=0, size=10, exclude=None, current_user=None):
"""Search for contributors to add to a project using elastic search. Request must
include JSON data with a "query" field.
:param query: The substring of the username to search for
:param page: For pagination, the page number to use for results
:param size: For pagination, the number of results per page
:param exclude: A list of User objects to exclude from the search
:param current_user: A User object of the current user
:return: List of dictionaries, each containing the ID, full name,
most recent employment and education, gravatar URL of an OSF user
"""
start = (page * size)
items = re.split(r'[\s-]+', query)
exclude = exclude or []
normalized_items = []
for item in items:
try:
normalized_item = six.u(item)
except TypeError:
normalized_item = item
normalized_item = unicodedata.normalize('NFKD', normalized_item).encode('ascii', 'ignore')
normalized_items.append(normalized_item)
items = normalized_items
query = ' AND '.join('{}*~'.format(re.escape(item)) for item in items) + \
''.join(' NOT id:"{}"'.format(excluded._id) for excluded in exclude)
results = search(build_query(query, start=start, size=size), index=INDEX, doc_type='user')
docs = results['results']
pages = math.ceil(results['counts'].get('user', 0) / size)
validate_page_num(page, pages)
users = []
for doc in docs:
# TODO: use utils.serialize_user
user = User.load(doc['id'])
if current_user and current_user._id == user._id:
n_projects_in_common = -1
elif current_user:
n_projects_in_common = current_user.n_projects_in_common(user)
else:
n_projects_in_common = 0
if user is None:
logger.error('Could not load user {0}'.format(doc['id']))
continue
if user.is_active: # exclude merged, unregistered, etc.
current_employment = None
education = None
if user.jobs:
current_employment = user.jobs[0]['institution']
if user.schools:
education = user.schools[0]['institution']
users.append({
'fullname': doc['user'],
'id': doc['id'],
'employment': current_employment,
'education': education,
'n_projects_in_common': n_projects_in_common,
'gravatar_url': gravatar(
user,
use_ssl=True,
size=settings.PROFILE_IMAGE_MEDIUM
),
'profile_url': user.profile_url,
'registered': user.is_registered,
'active': user.is_active
})
return {
'users': users,
'total': results['counts']['total'],
'pages': pages,
'page': page,
}
|
|
from google.appengine.ext import webapp
from google.appengine.api import memcache
from dbmodel import *
from session import *
from caching import *
import json
import datetime
import logging
class GetDeviceType(webapp.RequestHandler):
def get(self):
# First check session authorized
if not checkSession(self):
self.abort(403)
else:
self.response.set_cookie('session_key', self.request.cookies.get('session_key'), datetime.timedelta(hours=2), path='/')
device_types = []
dev_type_key = self.request.get('key')
dev_types = DeviceType.all()
if dev_type_key:
dev_types.filter("__key__ =", db.Key(dev_type_key))
for dev_type in dev_types:
device_type = {}
device_type['key'] = str(dev_type.key())
device_type['name'] = dev_type.name
device_types.append(device_type)
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.JSONEncoder().encode(device_types))
class GetFeatureCategory(webapp.RequestHandler):
def get(self):
# First check session authorized
if not checkSession(self):
self.abort(403)
else:
self.response.set_cookie('session_key', self.request.cookies.get('session_key'), datetime.timedelta(hours=2), path='/')
feature_categories = []
dev_type_key = self.request.get('device_type')
fc_key = self.request.get('key')
fcs = FeatureCategory.all()
if dev_type_key:
fcs.ancestor(db.Key(dev_type_key))
if fc_key:
fcs.filter("__key__ = ", db.Key(fc_key))
for fc in fcs:
feature_category = {}
dev_type = getBuffObj(fc.key().parent())
feature_category['key'] = str(fc.key())
feature_category['name_eng'] = fc.name_eng
feature_category['name_spa'] = fc.name_spa
feature_category['name_por'] = fc.name_por
feature_category['device_type_name'] = dev_type.name
feature_category['device_type_key'] = str(dev_type.key())
feature_categories.append(feature_category)
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.JSONEncoder().encode(feature_categories))
class GetFeature(webapp.RequestHandler):
def get(self):
# First check session authorized
if not checkSession(self):
self.abort(403)
else:
self.response.set_cookie('session_key', self.request.cookies.get('session_key'), datetime.timedelta(hours=2), path='/')
feat_key = self.request.get('key')
dev_type_key = self.request.get('device_type')
feat_category = self.request.get('feature_category')
features = []
if feat_key:
feats = Feature.all()
feats.filter("__key__ = ", db.Key(feat_key))
else:
if feat_category:
b = memcache.get('featbyfc=' + feat_category)
if b:
feats = b
logging.info('Cache Hit for featbyfc=' + feat_category)
else:
feats = Feature.all()
feats.ancestor(db.Key(feat_category))
feats = list(feats)
memcache.add('featbyfc=' + feat_category, feats, 600)
else:
if dev_type_key:
b = memcache.get('featbydt=' + dev_type_key)
if b:
feats = b
logging.info('Cache Hit for featbydt=' + dev_type_key)
else:
feats = Feature.all()
feats.ancestor(db.Key(dev_type_key))
feats = list(feats)
memcache.add('featbydt=' + dev_type_key, feats, 600)
for feat in feats:
feature = {}
feature['key'] = str(feat.key())
feature['name'] = feat.name
cat = getBuffObj(feat.key().parent())
feature['category_name'] = cat.name_eng
feature['category_key'] = str(cat.key())
dev_type = getBuffObj(cat.key().parent())
feature['device_type_key'] = str(dev_type.key())
feature['desc_eng'] = feat.desc_eng
feature['desc_spa'] = feat.desc_spa
feature['desc_por'] = feat.desc_por
features.append(feature)
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.JSONEncoder().encode(features))
class GetDevice(webapp.RequestHandler):
def get(self):
# First check session authorized
if not checkSession(self):
self.abort(403)
else:
self.response.set_cookie('session_key', self.request.cookies.get('session_key'), datetime.timedelta(hours=2), path='/')
devices = []
dev_type_key = self.request.get('device_type')
dev_key = self.request.get('key')
devs = Device.all()
if dev_type_key:
# Filter devices by Device Type
devs.ancestor(db.Key(dev_type_key))
if dev_key:
devs.filter("__key__ = ", db.Key(dev_key))
for dev in devs:
device = {}
device['key'] = str(dev.key())
device['vendor'] = dev.vendor
device['model'] = dev.model
device['description'] = dev.description
dev_type = getBuffObj(dev.key().parent())
device['device_type_name'] = dev_type.name
device['device_type_key'] = str(dev_type.key())
devices.append(device)
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.JSONEncoder().encode(devices))
class GetDeviceFeatures(webapp.RequestHandler):
def get(self):
# First check session authorized
if not checkSession(self):
self.abort(403)
else:
self.response.set_cookie('session_key', self.request.cookies.get('session_key'), datetime.timedelta(hours=2), path='/')
device = self.request.get('device')
feat_category = self.request.get('feature_category')
if not device:
self.response.out.write('Error')
return
if not feat_category:
out = memcache.get('dfbydev=' + device)
if out:
logging.info('Cache Hit for dfbydev=' + device)
self.response.out.write(out)
return
response = {}
dev = getBuffObj(db.Key(device))
response['device_vendor'] = dev.vendor
response['device_model'] = dev.model
response['device_key'] = str(dev.key())
if feat_category:
b = memcache.get('featbyfc=' + feat_category)
if b:
feats = b
logging.info('Cache Hit for featbyfc=' + feat_category)
else:
feats = Feature.all()
feats.ancestor(db.Key(feat_category))
feats = list(feats)
memcache.add('featbyfc=' + feat_category, feats, 600)
else:
dt = dev.key().parent() # Device Type
b = memcache.get('featbydt=' + str(dt))
if b:
feats = b
logging.info('Cache Hit for featbydt=' + str(dt))
else:
feats = Feature.all()
feats.ancestor(dt) # Filter features per device type
feats = list(feats)
memcache.add('featbydt=' + str(dt), feats, 600)
features = {}
dev_features = []
for dev_feat in dev.features:
dev_features.append(dev_feat)
for feat in feats:
feature = {}
feature['feat_name'] = feat.name
feat_key = feat.key()
if feat_key in dev_features:
feature['supported'] = True
else:
feature['supported'] = False
features[str(feat_key)] = feature
response['features'] = features
out = json.JSONEncoder().encode(response)
memcache.add('dfbydev=' + device, out, 600)
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(out)
class GetToken(webapp.RequestHandler):
def get(self):
tokens = []
tks = AccessToken.all()
for tk in tks:
token = {}
token['key'] = str(tk.key())
token['token'] = tk.token
token['user'] = tk.user
token['email'] = tk.email
token['lastlogin'] = str(tk.lastlogin)
tokens.append(token)
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.JSONEncoder().encode(tokens))
class GetSession(webapp.RequestHandler):
def get(self):
sessions = []
ses = Session.all()
for s in ses:
session = {}
session['start'] = str(s.start)
token = db.get(s.key().parent())
session['user'] = token.user
sessions.append(session)
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json.JSONEncoder().encode(sessions))
app = webapp.WSGIApplication(
[('/get/device_type', GetDeviceType),
('/get/feature_category', GetFeatureCategory),
('/get/feature', GetFeature),
('/get/device', GetDevice),
('/get/device_features', GetDeviceFeatures),
('/get/token', GetToken),
('/get/session', GetSession)],
debug=True)
|
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import array
import base64
import json
import os
import struct
from compas.files.gltf.constants import COMPONENT_TYPE_ENUM
from compas.files.gltf.constants import COMPONENT_TYPE_FLOAT
from compas.files.gltf.constants import COMPONENT_TYPE_UNSIGNED_INT
from compas.files.gltf.constants import NUM_COMPONENTS_BY_TYPE_ENUM
from compas.files.gltf.constants import TYPE_MAT4
from compas.files.gltf.constants import TYPE_SCALAR
from compas.files.gltf.constants import TYPE_VEC2
from compas.files.gltf.constants import TYPE_VEC3
from compas.files.gltf.constants import TYPE_VEC4
# This fails on IronPython 2.7.8 (eg. Rhino 6 on Windows)
# but works on IronPython 2.7.9 (Rhino 6 on Mac)
try:
struct.pack_into('<I', bytearray(4), 0, 0)
USE_BYTEARRAY_BUFFERS = True
except TypeError:
USE_BYTEARRAY_BUFFERS = False
class GLTFExporter(object):
"""Export a glTF or glb file based on the supplied scene and ancillary data.
Parameters
----------
filepath : str
Location where the glTF or glb is to be written. The extension of the filepath
determines which format will be used. If there will be an accompanying binary file,
it will be written in the same directory.
content : :class:`~compas.files.GLTFContent`
embed_data : bool
When ``True``, all mesh and other data will be embedded as data uri's in the glTF json,
with the exception of external image data.
When ``False``, the data will be written to an external binary file or chunk.
"""
def __init__(self, filepath, content, embed_data=False):
self.gltf_filepath = filepath
self._dirname = None
self._filename = None
self._ext = None
self._embed_data = embed_data
self._content = content
self._gltf_dict = {}
self._mesh_index_by_key = {}
self._node_index_by_key = {}
self._scene_index_by_key = {}
self._camera_index_by_key = {}
self._skin_index_by_key = {}
self._material_index_by_key = {}
self._texture_index_by_key = {}
self._sampler_index_by_key = {}
self._image_index_by_key = {}
self._buffer = b''
self.load()
@property
def embed_data(self):
return self._embed_data
@embed_data.setter
def embed_data(self, value):
if value != self._embed_data:
self._embed_data = value
self.load()
def load(self):
"""Creates the json object and the binary data (if any) to be written.
Returns
-------
None
"""
self._content.remove_orphans()
self._content.check_if_forest()
self._set_initial_gltf_dict()
self._mesh_index_by_key = self._get_index_by_key(self._content.meshes)
self._node_index_by_key = self._get_index_by_key(self._content.nodes)
self._scene_index_by_key = self._get_index_by_key(self._content.scenes)
self._camera_index_by_key = self._get_index_by_key(self._content.cameras)
self._skin_index_by_key = self._get_index_by_key(self._content.skins)
self._material_index_by_key = self._get_index_by_key(self._content.materials)
self._texture_index_by_key = self._get_index_by_key(self._content.textures)
self._sampler_index_by_key = self._get_index_by_key(self._content.samplers)
self._image_index_by_key = self._get_index_by_key(self._content.images)
self._buffer = b''
self._set_path_attributes()
self._add_meshes()
self._add_nodes()
self._add_scenes()
self._add_cameras()
self._add_skins()
self._add_materials()
self._add_textures()
self._add_samplers()
self._add_images()
self._add_animations()
self._add_buffer()
def _get_index_by_key(self, d):
return {key: index for index, key in enumerate(d)}
def export(self):
"""Writes the json to *.gltf* or *.glb*, and binary data to *.bin* as required.
Returns
-------
None
"""
gltf_json = json.dumps(self._gltf_dict, indent=4)
if self._ext == '.gltf':
with open(self.gltf_filepath, 'w') as f:
f.write(gltf_json)
if not self._embed_data and len(self._buffer) > 0:
with open(self.get_bin_path(), 'wb') as f:
f.write(self._buffer)
if self._ext == '.glb':
with open(self.gltf_filepath, 'wb') as f:
gltf_data = gltf_json.encode()
length_gltf = len(gltf_data)
spaces_gltf = (4 - (length_gltf & 3)) & 3
length_gltf += spaces_gltf
length_bin = len(self._buffer)
zeros_bin = (4 - (length_bin & 3)) & 3
length_bin += zeros_bin
length = 12 + 8 + length_gltf
if length_bin > 0:
length += 8 + length_bin
f.write('glTF'.encode('ascii'))
f.write(struct.pack('<I', 2))
f.write(struct.pack('<I', length))
f.write(struct.pack('<I', length_gltf))
f.write('JSON'.encode('ascii'))
f.write(gltf_data)
for i in range(0, spaces_gltf):
f.write(' '.encode())
if length_bin > 0:
f.write(struct.pack('<I', length_bin))
f.write('BIN\0'.encode())
f.write(self._buffer)
for i in range(0, zeros_bin):
f.write('\0'.encode())
def _add_images(self):
if not self._content.images:
return
images_list = [None] * len(self._content.images)
for key, image_data in self._content.images.items():
uri = self._construct_image_data_uri(image_data) if self.embed_data else None
buffer_view = self._construct_buffer_view(image_data.data) if not self.embed_data else None
images_list[self._image_index_by_key[key]] = image_data.to_data(uri, buffer_view)
self._gltf_dict['images'] = images_list
def _construct_image_data_uri(self, image_data):
if image_data.data is None:
return None
return (
'data:'
+ (image_data.mime_type if image_data.mime_type else '')
+ ';base64,' + base64.b64encode(image_data.data).decode('ascii')
)
def _add_samplers(self):
if not self._content.samplers:
return
samplers_list = [None] * len(self._content.samplers)
for key, sampler_data in self._content.samplers.items():
samplers_list[self._sampler_index_by_key[key]] = sampler_data.to_data()
self._gltf_dict['samplers'] = samplers_list
def _add_textures(self):
if not self._content.textures:
return
textures_list = [None] * len(self._content.textures)
for key, texture_data in self._content.textures.items():
textures_list[self._texture_index_by_key[key]] = texture_data.to_data(self._sampler_index_by_key, self._image_index_by_key)
self._gltf_dict['textures'] = textures_list
def _add_materials(self):
if not self._content.materials:
return
materials_list = [None] * len(self._content.materials)
for key, material_data in self._content.materials.items():
materials_list[self._material_index_by_key[key]] = material_data.to_data(self._texture_index_by_key)
self._gltf_dict['materials'] = materials_list
def _add_skins(self):
if not self._content.skins:
return
skins_list = [None] * len(self._content.skins)
for key, skin_data in self._content.skins.items():
accessor_index = self._construct_accessor(skin_data.inverse_bind_matrices, COMPONENT_TYPE_FLOAT, TYPE_MAT4)
skins_list[self._skin_index_by_key[key]] = skin_data.to_data(self._node_index_by_key, accessor_index)
self._gltf_dict['skins'] = skins_list
def _add_cameras(self):
if not self._content.cameras:
return
camera_list = [None] * len(self._content.cameras)
for key, camera_data in self._content.cameras.items():
camera_list[self._camera_index_by_key[key]] = camera_data.to_data()
self._gltf_dict['cameras'] = camera_list
def _add_meshes(self):
if not self._content.meshes:
return
mesh_list = [None] * len(self._content.meshes)
for key, mesh_data in self._content.meshes.items():
primitives = self._construct_primitives(mesh_data)
mesh_list[self._mesh_index_by_key[key]] = mesh_data.to_data(primitives)
self._gltf_dict['meshes'] = mesh_list
def _add_buffer(self):
if not self._buffer:
return
buffer = {'byteLength': len(self._buffer)}
if self._embed_data:
buffer['uri'] = 'data:application/octet-stream;base64,' + base64.b64encode(self._buffer).decode('ascii')
elif self._ext == '.gltf':
buffer['uri'] = self.get_bin_filename()
self._gltf_dict['buffers'] = [buffer]
def _add_animations(self):
if not self._content.animations:
return None
animation_list = []
for animation_data in self._content.animations.values():
samplers_list = self._construct_animation_samplers_list(animation_data)
animation_list.append(animation_data.to_data(samplers_list, self._node_index_by_key))
self._gltf_dict['animations'] = animation_list
def _construct_animation_samplers_list(self, animation_data):
sampler_index_by_key = animation_data.get_sampler_index_by_key()
samplers_list = [None] * len(sampler_index_by_key)
for key, sampler_data in animation_data.samplers_dict.items():
input_accessor = self._construct_accessor(sampler_data.input, COMPONENT_TYPE_FLOAT, TYPE_SCALAR, include_bounds=True)
type_ = TYPE_VEC3
if isinstance(sampler_data.output[0], int) or isinstance(sampler_data.output[0], float):
type_ = TYPE_SCALAR
elif len(sampler_data.output[0]) == 4:
type_ = TYPE_VEC4
output_accessor = self._construct_accessor(sampler_data.output, COMPONENT_TYPE_FLOAT, type_)
samplers_list[sampler_index_by_key[key]] = sampler_data.to_data(input_accessor, output_accessor)
return samplers_list
def _set_initial_gltf_dict(self):
asset_dict = {'version': '2.0'}
gltf_dict = {'asset': asset_dict}
if self._content.extras:
gltf_dict['extras'] = self._content.extras
if self._content.extensions:
gltf_dict['extensions'] = self._content.extensions
self._gltf_dict = gltf_dict
def _add_scenes(self):
if not self._content.scenes:
return
if self._content.default_scene_key is not None:
self._gltf_dict['scene'] = self._scene_index_by_key[self._content.default_scene_key]
scene_list = [None] * len(self._content.scenes.values())
for key, scene in self._content.scenes.items():
scene_list[self._scene_index_by_key[key]] = scene.to_data(self._node_index_by_key)
self._gltf_dict['scenes'] = scene_list
def _add_nodes(self):
if not self._content.nodes:
return
node_list = [None] * len(self._content.nodes)
for key, node in self._content.nodes.items():
node_list[self._node_index_by_key[key]] = node.to_data(
self._node_index_by_key,
self._mesh_index_by_key,
self._camera_index_by_key,
self._skin_index_by_key,
)
self._gltf_dict['nodes'] = node_list
def _construct_primitives(self, mesh_data):
primitives = []
for primitive_data in mesh_data.primitive_data_list:
indices_accessor = self._construct_accessor(primitive_data.indices, COMPONENT_TYPE_UNSIGNED_INT, TYPE_SCALAR)
attributes = {}
for attr in primitive_data.attributes:
component_type = COMPONENT_TYPE_UNSIGNED_INT if attr.startswith('JOINT') else COMPONENT_TYPE_FLOAT
type_ = TYPE_VEC3
if len(primitive_data.attributes[attr][0]) == 4:
type_ = TYPE_VEC4
if len(primitive_data.attributes[attr][0]) == 2:
type_ = TYPE_VEC2
attributes[attr] = self._construct_accessor(primitive_data.attributes[attr], component_type, type_, True)
targets = []
for target in primitive_data.targets or []:
target_dict = {}
for attr in target:
component_type = COMPONENT_TYPE_FLOAT
type_ = TYPE_VEC3
target_dict[attr] = self._construct_accessor(target[attr], component_type, type_, True)
targets.append(target_dict)
primitive_dict = primitive_data.to_data(indices_accessor, attributes, targets, self._material_index_by_key)
primitives.append(primitive_dict)
return primitives
def _construct_accessor(self, data, component_type, type_, include_bounds=False):
if data is None:
return None
count = len(data)
fmt_char = COMPONENT_TYPE_ENUM[component_type]
fmt = '<' + fmt_char * NUM_COMPONENTS_BY_TYPE_ENUM[type_]
component_size = struct.calcsize('<' + fmt_char)
if component_type == 'MAT2' and component_size == 1:
fmt = '<FFxxFFxx'.replace('F', fmt_char)
elif component_type == 'MAT3' and component_size == 1:
fmt = '<FFFxFFFxFFFx'.replace('F', fmt_char)
elif component_type == 'MAT3' and component_size == 2:
fmt = '<FFFxxFFFxxFFFxx'.replace('F', fmt_char)
component_len = struct.calcsize(fmt)
# ensure bytes_ length is divisible by 4
size = count * component_len
size += (4 - size % 4) % 4
if USE_BYTEARRAY_BUFFERS:
bytes_ = bytearray(size)
else:
bytes_ = array.array('B', [0] * size)
for i, datum in enumerate(data):
if isinstance(datum, int) or isinstance(datum, float):
struct.pack_into(fmt, bytes_, (i * component_len), datum)
else:
struct.pack_into(fmt, bytes_, (i * component_len), *datum)
buffer_view_index = self._construct_buffer_view(bytes_)
accessor_dict = {
'bufferView': buffer_view_index,
'count': count,
'componentType': component_type,
'type': type_,
}
if include_bounds:
try:
# Here we check if ``data`` contains tuples,
# and compute min/max per coordinate.
_ = [e for e in data[0]]
minimum = tuple(map(min, zip(*data)))
maximum = tuple(map(max, zip(*data)))
except TypeError:
# Here, ``data`` must contain primitives and not tuples,
# so min and max are more simply computed.
minimum = (min(data),)
maximum = (max(data),)
accessor_dict['min'] = minimum
accessor_dict['max'] = maximum
self._gltf_dict.setdefault('accessors', []).append(accessor_dict)
return len(self._gltf_dict['accessors']) - 1
def _construct_buffer_view(self, bytes_):
if not bytes_:
return None
byte_offset = self._update_buffer(bytes_)
buffer_view_dict = {
'buffer': 0,
'byteLength': len(bytes_),
'byteOffset': byte_offset,
}
self._gltf_dict.setdefault('bufferViews', []).append(buffer_view_dict)
return len(self._gltf_dict['bufferViews']) - 1
def _update_buffer(self, bytes_):
byte_offset = len(self._buffer)
# If bytes_ was not created as bytearray, cast now
if not USE_BYTEARRAY_BUFFERS:
bytes_ = bytearray(bytes_)
self._buffer += bytes_
return byte_offset
def _set_path_attributes(self):
dirname, basename = os.path.split(self.gltf_filepath)
root, ext = os.path.splitext(basename)
self._dirname = dirname
self._filename = root
self._ext = ext.lower()
def get_bin_path(self):
return os.path.join(self._dirname, self._filename + '.bin')
def get_bin_filename(self):
return self._filename + '.bin'
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import argparse
import datetime
import glob
import os
import re
import sys
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
BUILD_DIR = os.path.join(NACL_DIR, 'build')
NACL_TOOLCHAIN_DIR = os.path.join(NACL_DIR, 'toolchain')
NACL_TOOLCHAINTARS_DIR = os.path.join(NACL_TOOLCHAIN_DIR, '.tars')
CYGTAR = os.path.join(BUILD_DIR, 'cygtar.py')
PKGVER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
GYPBUILD_DIR = 'gypbuild'
options = None
# Map of: ToolchainName: (PackageName, SDKDir, arch).
TOOLCHAIN_PACKAGE_MAP = {
'arm_glibc': ('nacl_arm_glibc', '%(platform)s_arm_glibc', 'arm'),
'x86_glibc': ('nacl_x86_glibc', '%(platform)s_x86_glibc', 'x86'),
'pnacl': ('pnacl_newlib', '%(platform)s_pnacl', 'pnacl')
}
def GetToolchainDirName(tcname):
"""Return the directory name for a given toolchain"""
return TOOLCHAIN_PACKAGE_MAP[tcname][1] % {'platform': getos.GetPlatform()}
def GetToolchainDir(pepperdir, tcname):
"""Return the full path to a given toolchain within a given sdk root"""
return os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
def GetToolchainLibc(tcname):
if tcname == 'pnacl':
return 'newlib'
for libc in ('glibc', 'newlib', 'host'):
if libc in tcname:
return libc
def GetToolchainNaClInclude(pepperdir, tcname, arch=None):
tcpath = GetToolchainDir(pepperdir, tcname)
if arch is None:
arch = TOOLCHAIN_PACKAGE_MAP[tcname][2]
if arch == 'x86':
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetConfigDir(arch):
if arch.endswith('x64') and getos.GetPlatform() == 'win':
return 'Release_x64'
else:
return 'Release'
def GetNinjaOutDir(arch):
return os.path.join(OUT_DIR, GYPBUILD_DIR + '-' + arch, GetConfigDir(arch))
def GetGypBuiltLib(tcname, arch):
if arch == 'ia32':
lib_suffix = '32'
elif arch == 'x64':
lib_suffix = '64'
elif arch == 'arm':
lib_suffix = 'arm'
else:
lib_suffix = ''
tcdir = 'tc_' + GetToolchainLibc(tcname)
if tcname == 'pnacl':
if arch is None:
lib_suffix = ''
tcdir = 'tc_pnacl_newlib'
arch = 'x64'
else:
arch = 'clang-' + arch
return os.path.join(GetNinjaOutDir(arch), 'gen', tcdir, 'lib' + lib_suffix)
def GetToolchainNaClLib(tcname, tcpath, arch):
if arch == 'ia32':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif arch == 'x64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
elif tcname == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'lib')
def GetOutputToolchainLib(pepperdir, tcname, arch):
tcpath = os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
return GetToolchainNaClLib(tcname, tcpath, arch)
def GetPNaClTranslatorLib(tcpath, arch):
if arch not in ['arm', 'x86-32', 'x86-64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
return os.path.join(tcpath, 'translator', arch, 'lib')
def BuildStepDownloadToolchains(toolchains):
buildbot_common.BuildStep('Running package_version.py')
args = [sys.executable, PKGVER, '--mode', 'nacl_core_sdk']
args.extend(['sync', '--extract'])
buildbot_common.Run(args, cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
dirs_to_remove = (
pepperdir,
pepperdir_old,
os.path.join(OUT_DIR, 'arm_trusted')
)
for dirname in dirs_to_remove:
if os.path.exists(dirname):
buildbot_common.RemoveDir(dirname)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${CHROME_COMMIT_POSITION}',
build_version.ChromeCommitPosition())
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
build_platform = '%s_x86' % platform
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
# Create a list of extract packages tuples, the first part should be
# "$PACKAGE_TARGET/$PACKAGE". The second part should be the destination
# directory relative to pepperdir/toolchain.
extract_packages = []
for toolchain in toolchains:
toolchain_map = TOOLCHAIN_PACKAGE_MAP.get(toolchain, None)
if toolchain_map:
package_name, tcdir, _ = toolchain_map
package_tuple = (os.path.join(build_platform, package_name),
tcdir % {'platform': platform})
extract_packages.append(package_tuple)
# On linux we also want to extract the arm_trusted package which contains
# the ARM libraries we ship in support of sel_ldr_arm.
if platform == 'linux':
extract_packages.append((os.path.join(build_platform, 'arm_trusted'),
'arm_trusted'))
if extract_packages:
# Extract all of the packages into the temp directory.
package_names = [package_tuple[0] for package_tuple in extract_packages]
buildbot_common.Run([sys.executable, PKGVER,
'--packages', ','.join(package_names),
'--tar-dir', NACL_TOOLCHAINTARS_DIR,
'--dest-dir', tmpdir,
'extract'])
# Move all the packages we extracted to the correct destination.
for package_name, dest_dir in extract_packages:
full_src_dir = os.path.join(tmpdir, package_name)
full_dst_dir = os.path.join(pepperdir, 'toolchain', dest_dir)
buildbot_common.Move(full_src_dir, full_dst_dir)
# Cleanup the temporary directory we are no longer using.
buildbot_common.RemoveDir(tmpdir)
# List of toolchain headers to install.
# Source is relative to top of Chromium tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/pthread/pthread.h', ''),
('native_client/src/untrusted/pthread/semaphore.h', ''),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'glibc': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if type(file_spec) == str:
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tcname):
"""Copies NaCl headers to expected locations in the toolchain."""
InstallFiles(SRC_DIR, tc_dst_inc, NACL_HEADER_MAP[GetToolchainLibc(tcname)])
def MakeNinjaRelPath(path):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), path)
TOOLCHAIN_LIBS = {
'newlib' : [
'libppapi.a',
'libppapi_stub.a',
],
'glibc': [
'libppapi.a',
'libppapi.so',
'libppapi_stub.a',
]
}
def GypNinjaInstall(pepperdir, toolchains):
tools_files_32 = [
['sel_ldr', 'sel_ldr_x86_32'],
['irt_core_newlib_x32.nexe', 'irt_core_x86_32.nexe'],
['irt_core_newlib_x64.nexe', 'irt_core_x86_64.nexe'],
]
arm_files = [
['elf_loader_newlib_arm.nexe', 'elf_loader_arm.nexe'],
]
tools_files_64 = []
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files_64 += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
tools_files_64.append(['sel_ldr', 'sel_ldr_x86_64'])
tools_files_64.append(['ncval_new', 'ncval'])
if platform == 'linux':
tools_files_32.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32'])
tools_files_64.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_64'])
tools_files_32.append(['nonsfi_loader_newlib_x32_nonsfi.nexe',
'nonsfi_loader_x86_32'])
tools_dir = os.path.join(pepperdir, 'tools')
buildbot_common.MakeDir(tools_dir)
# Add .exe extensions to all windows tools
for pair in tools_files_32 + tools_files_64:
if platform == 'win' and not pair[0].endswith('.nexe'):
pair[0] += '.exe'
pair[1] += '.exe'
# Add ARM binaries
if platform == 'linux' and not options.no_arm_trusted:
arm_files += [
['irt_core_newlib_arm.nexe', 'irt_core_arm.nexe'],
['nacl_helper_bootstrap', 'nacl_helper_bootstrap_arm'],
['nonsfi_loader_newlib_arm_nonsfi.nexe', 'nonsfi_loader_arm'],
['sel_ldr', 'sel_ldr_arm']
]
InstallFiles(GetNinjaOutDir('x64'), tools_dir, tools_files_64)
InstallFiles(GetNinjaOutDir('ia32'), tools_dir, tools_files_32)
InstallFiles(GetNinjaOutDir('arm'), tools_dir, arm_files)
for tc in toolchains:
if tc in ('host', 'clang-newlib'):
continue
elif tc == 'pnacl':
xarches = (None, 'ia32', 'x64', 'arm')
elif tc in ('x86_glibc', 'x86_newlib'):
xarches = ('ia32', 'x64')
elif tc == 'arm_glibc':
xarches = ('arm',)
else:
raise AssertionError('unexpected toolchain value: %s' % tc)
for xarch in xarches:
src_dir = GetGypBuiltLib(tc, xarch)
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
libc = GetToolchainLibc(tc)
InstallFiles(src_dir, dst_dir, TOOLCHAIN_LIBS[libc])
def GypNinjaBuild_NaCl(rel_out_dir):
gyp_py = os.path.join(NACL_DIR, 'build', 'gyp_nacl')
nacl_core_sdk_gyp = os.path.join(NACL_DIR, 'build', 'nacl_core_sdk.gyp')
all_gyp = os.path.join(NACL_DIR, 'build', 'all.gyp')
out_dir_32 = MakeNinjaRelPath(rel_out_dir + '-ia32')
out_dir_64 = MakeNinjaRelPath(rel_out_dir + '-x64')
out_dir_arm = MakeNinjaRelPath(rel_out_dir + '-arm')
out_dir_clang_32 = MakeNinjaRelPath(rel_out_dir + '-clang-ia32')
out_dir_clang_64 = MakeNinjaRelPath(rel_out_dir + '-clang-x64')
out_dir_clang_arm = MakeNinjaRelPath(rel_out_dir + '-clang-arm')
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_32,
gyp_defines=['use_nacl_clang=0'])
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_64,
gyp_defines=['use_nacl_clang=0'])
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_arm,
gyp_defines=['use_nacl_clang=0'])
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_32, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_64, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk',
out_dir_clang_arm, gyp_defines=['use_nacl_clang=1'])
GypNinjaBuild('x64', gyp_py, all_gyp, 'ncval_new', out_dir_64)
def GypNinjaBuild_Breakpad(rel_out_dir):
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if getos.GetPlatform() == 'win':
return
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'breakpad', 'breakpad.gyp')
build_list = ['dump_syms', 'minidump_dump', 'minidump_stackwalk']
GypNinjaBuild('x64', gyp_py, gyp_file, build_list, out_dir)
def GypNinjaBuild_PPAPI(arch, rel_out_dir, gyp_defines=None):
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client',
'native_client.gyp')
GypNinjaBuild(arch, gyp_py, gyp_file, 'ppapi_lib', out_dir,
gyp_defines=gyp_defines)
def GypNinjaBuild_Pnacl(rel_out_dir, target_arch):
# TODO(binji): This will build the pnacl_irt_shim twice; once as part of the
# Chromium build, and once here. When we move more of the SDK build process
# to gyp, we can remove this.
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client', 'src',
'untrusted', 'pnacl_irt_shim', 'pnacl_irt_shim.gyp')
targets = ['aot']
GypNinjaBuild(target_arch, gyp_py, gyp_file, targets, out_dir)
def GypNinjaBuild(arch, gyp_py_script, gyp_file, targets,
out_dir, gyp_defines=None):
gyp_env = dict(os.environ)
gyp_env['GYP_GENERATORS'] = 'ninja'
gyp_defines = gyp_defines or []
gyp_defines.append('nacl_allow_thin_archives=0')
if not options.no_use_sysroot:
gyp_defines.append('use_sysroot=1')
if options.mac_sdk:
gyp_defines.append('mac_sdk=%s' % options.mac_sdk)
if arch is not None:
gyp_defines.append('target_arch=%s' % arch)
if arch == 'arm':
gyp_env['GYP_CROSSCOMPILE'] = '1'
if options.no_arm_trusted:
gyp_defines.append('disable_cross_trusted=1')
if getos.GetPlatform() == 'mac':
gyp_defines.append('clang=1')
gyp_env['GYP_DEFINES'] = ' '.join(gyp_defines)
# We can't use windows path separators in GYP_GENERATOR_FLAGS since
# gyp uses shlex to parse them and treats '\' as an escape char.
gyp_env['GYP_GENERATOR_FLAGS'] = 'output_dir=%s' % out_dir.replace('\\', '/')
# Print relevant environment variables
for key, value in gyp_env.iteritems():
if key.startswith('GYP') or key in ('CC',):
print ' %s="%s"' % (key, value)
buildbot_common.Run(
[sys.executable, gyp_py_script, gyp_file, '--depth=.'],
cwd=SRC_DIR,
env=gyp_env)
NinjaBuild(targets, out_dir, arch)
def NinjaBuild(targets, out_dir, arch):
if type(targets) is not list:
targets = [targets]
out_config_dir = os.path.join(out_dir, GetConfigDir(arch))
buildbot_common.Run(['ninja', '-C', out_config_dir] + targets, cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains, build, clean):
buildbot_common.BuildStep('SDK Items')
if clean:
for dirname in glob.glob(os.path.join(OUT_DIR, GYPBUILD_DIR + '*')):
buildbot_common.RemoveDir(dirname)
build = True
if build:
GypNinjaBuild_NaCl(GYPBUILD_DIR)
GypNinjaBuild_Breakpad(GYPBUILD_DIR + '-x64')
if set(toolchains) & set(['x86_glibc', 'x86_newlib']):
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR + '-ia32',
['use_nacl_clang=0'])
GypNinjaBuild_PPAPI('x64', GYPBUILD_DIR + '-x64',
['use_nacl_clang=0'])
if 'arm_glibc' in toolchains:
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-arm',
['use_nacl_clang=0'] )
if 'pnacl' in toolchains:
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR + '-clang-ia32',
['use_nacl_clang=1'])
GypNinjaBuild_PPAPI('x64', GYPBUILD_DIR + '-clang-x64',
['use_nacl_clang=1'])
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-clang-arm',
['use_nacl_clang=1'])
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
GypNinjaBuild_Pnacl(build_dir, arch)
GypNinjaInstall(pepperdir, toolchains)
for toolchain in toolchains:
if toolchain not in ('host', 'clang-newlib'):
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, toolchain),
toolchain)
if 'pnacl' in toolchains:
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
if arch == 'ia32':
nacl_arches = ['x86-32', 'x86-64']
elif arch == 'arm':
nacl_arches = ['arm']
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
for nacl_arch in nacl_arches:
release_build_dir = os.path.join(OUT_DIR, build_dir, 'Release',
'gen', 'tc_pnacl_translate',
'lib-' + nacl_arch)
pnacldir = GetToolchainDir(pepperdir, 'pnacl')
pnacl_translator_lib_dir = GetPNaClTranslatorLib(pnacldir, nacl_arch)
if not os.path.isdir(pnacl_translator_lib_dir):
buildbot_common.ErrorExit('Expected %s directory to exist.' %
pnacl_translator_lib_dir)
buildbot_common.CopyFile(
os.path.join(release_build_dir, 'libpnacl_irt_shim.a'),
pnacl_translator_lib_dir)
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'x86'),
'pnacl')
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'arm'),
'pnacl')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
dsc_toolchains = []
for t in toolchains:
if t.startswith('x86_') or t.startswith('arm_'):
if t[4:] not in dsc_toolchains:
dsc_toolchains.append(t[4:])
elif t == 'host':
dsc_toolchains.append(getos.GetPlatform())
else:
dsc_toolchains.append(t)
filters['TOOLS'] = dsc_toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=dsc_toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, directory):
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Debug',
clean=True, config='Debug')
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Release',
clean=True, config='Release')
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
print 'SDK directory: %s' % pepperdir
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def Archive(filename, from_directory, step_link=True):
if buildbot_common.IsSDKBuilder():
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/'
else:
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk_test/'
bucket_path += build_version.ChromeVersion()
buildbot_common.Archive(filename, bucket_path, from_directory, step_link)
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
Archive(tarname, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
Archive(tarname + '.json', OUT_DIR, step_link=False)
def BuildStepBuildPNaClComponent(version, revision):
# Sadly revision can go backwords for a given version since when a version
# is built from master, revision will be a huge number (in the hundreds of
# thousands. Once the branch happens the revision will reset to zero.
# TODO(sbc): figure out how to compensate for this in some way such that
# revisions always go forward for a given version.
buildbot_common.BuildStep('PNaCl Component')
# Version numbers must follow the format specified in:
# https://developer.chrome.com/extensions/manifest/version
# So ensure that rev_major/rev_minor don't overflow and ensure there
# are no leading zeros.
if len(revision) > 4:
rev_minor = int(revision[-4:])
rev_major = int(revision[:-4])
version = "0.%s.%s.%s" % (version, rev_major, rev_minor)
else:
version = "0.%s.0.%s" % (version, revision)
buildbot_common.Run(['./make_pnacl_component.sh',
'pnacl_multicrx_%s.zip' % revision,
version], cwd=SCRIPT_DIR)
def BuildStepArchivePNaClComponent(revision):
buildbot_common.BuildStep('Archive PNaCl Component')
Archive('pnacl_multicrx_%s.zip' % revision, OUT_DIR)
def BuildStepArchiveSDKTools():
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
Archive('sdk_tools.tgz', OUT_DIR, step_link=False)
Archive('nacl_sdk.zip', OUT_DIR, step_link=False)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--qemu', help='Add qemu for ARM.',
action='store_true')
parser.add_argument('--tar', help='Force the tar step.',
action='store_true')
parser.add_argument('--archive', help='Force the archive step.',
action='store_true')
parser.add_argument('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_argument('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_argument('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_argument('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_argument('--no-clean', dest='clean', action='store_false',
help="Don't clean gypbuild directories")
parser.add_argument('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
parser.add_argument('--no-arm-trusted', action='store_true',
help='Disable building of ARM trusted components (sel_ldr, etc).')
parser.add_argument('--no-use-sysroot', action='store_true',
help='Disable building against sysroot.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options = parser.parse_args(args)
buildbot_common.BuildStep('build_sdk')
if buildbot_common.IsSDKBuilder():
options.archive = True
# TODO(binji): re-enable app_engine build when the linux builder stops
# breaking when trying to git clone from github.
# See http://crbug.com/412969.
options.build_app_engine = False
options.tar = True
# NOTE: order matters here. This will be the order that is specified in the
# Makefiles; the first toolchain will be the default.
toolchains = ['pnacl', 'x86_glibc', 'arm_glibc', 'clang-newlib', 'host']
print 'Building: ' + ' '.join(toolchains)
platform = getos.GetPlatform()
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
tarname = 'naclsdk_%s.tar.bz2' % platform
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if platform == 'linux':
# Linux-only: make sure the debian/stable sysroot image is installed
install_script = os.path.join(SRC_DIR, 'build', 'linux', 'sysroot_scripts',
'install-sysroot.py')
buildbot_common.Run([sys.executable, install_script, '--arch=arm'])
buildbot_common.Run([sys.executable, install_script, '--arch=i386'])
buildbot_common.Run([sys.executable, install_script, '--arch=amd64'])
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains(toolchains)
BuildStepUntarToolchains(pepperdir, toolchains)
if platform == 'linux':
buildbot_common.Move(os.path.join(pepperdir, 'toolchain', 'arm_trusted'),
os.path.join(OUT_DIR, 'arm_trusted'))
if platform == 'linux':
# Linux-only: Copy arm libraries from the arm_trusted package. These are
# needed to be able to run sel_ldr_arm under qemu.
arm_libs = [
'lib/arm-linux-gnueabihf/librt.so.1',
'lib/arm-linux-gnueabihf/libpthread.so.0',
'lib/arm-linux-gnueabihf/libgcc_s.so.1',
'lib/arm-linux-gnueabihf/libc.so.6',
'lib/arm-linux-gnueabihf/ld-linux-armhf.so.3',
'lib/arm-linux-gnueabihf/libm.so.6',
'usr/lib/arm-linux-gnueabihf/libstdc++.so.6'
]
arm_lib_dir = os.path.join(pepperdir, 'tools', 'lib', 'arm_trusted', 'lib')
buildbot_common.MakeDir(arm_lib_dir)
for arm_lib in arm_libs:
arm_lib = os.path.join(OUT_DIR, 'arm_trusted', arm_lib)
buildbot_common.CopyFile(arm_lib, arm_lib_dir)
buildbot_common.CopyFile(os.path.join(OUT_DIR, 'arm_trusted', 'qemu-arm'),
os.path.join(pepperdir, 'tools'))
BuildStepBuildToolchains(pepperdir, toolchains,
not options.skip_toolchain,
options.clean)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir, 'src')
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if platform == 'linux':
BuildStepBuildPNaClComponent(pepper_ver, chrome_revision)
if options.build_app_engine and platform == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
if options.qemu:
qemudir = os.path.join(NACL_DIR, 'toolchain', 'linux_arm-trusted')
oshelpers.Copy(['-r', qemudir, pepperdir])
# Archive the results on Google Cloud Storage.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
# Only archive sdk_tools/naclport/pnacl_component on linux.
if platform == 'linux':
BuildStepArchiveSDKTools()
BuildStepArchivePNaClComponent(chrome_revision)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
|
|
# This Python module is part of the PyRate software package.
#
# Copyright 2022 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains tests for the algorithm.py PyRate module.
"""
from datetime import date
from math import pi, cos, sin, radians
from numpy import array, reshape, squeeze
from os.path import join
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
from pyrate.core.algorithm import (least_squares_covariance,
is_square,
unit_vector,
ifg_date_lookup,
get_all_epochs,
get_epochs,
first_second_ids,
factorise_integer,
)
from pyrate.configuration import parse_namelist
from pyrate.core.shared import Ifg, convert_radians_to_mm
from tests.common import small5_mock_ifgs, SML_TEST_TIF, UnitTestAdaptation
class TestLeastSquaresTests(UnitTestAdaptation):
"""
Unit tests for the PyRate least_squares_covariance() implementation.
"""
@staticmethod
def test_least_squares_covariance():
b = array([[13, 7.2, 5.7]]).T
A = array([[1, 0.4, 0.3], [1, 1, 1]]).T
v = array([[1, 1, 1]]).T
r = least_squares_covariance(A, b, v)
exp = [10.1628, 2.8744]
assert_array_almost_equal(r.T.squeeze(), exp, decimal=4)
def test_least_squares_covariance_overdetermined(self):
# must be overdetermined, ie. more observations than params
b = array([[10]]).T
A = array([[1]]).T
v = array([[1]]).T
self.assertRaises(ValueError, least_squares_covariance, A, b, v)
# try non transposed style
b = array([[10]])
A = array([[1]])
v = array([[1]])
self.assertRaises(ValueError, least_squares_covariance, A, b, v)
class TestAlgorithmTests(UnitTestAdaptation):
"""
Misc unittests for functions in the algorithm module.
"""
def test_factorise(self):
self.assertEqual(factorise_integer(1), (1, 1))
self.assertEqual(factorise_integer(2), (2, 1))
self.assertEqual(factorise_integer(4), (2, 2))
self.assertEqual(factorise_integer(9), (3, 3))
self.assertEqual(factorise_integer(76), (4, 19))
self.assertEqual(factorise_integer(76.5), (4, 19))
a, b = factorise_integer(12)
self.assertEqual(type(a), int)
self.assertEqual(type(b), int)
def test_is_square(self):
self.assertTrue(is_square(np.empty((2, 2))))
def test_is_not_square(self):
for shape in [(3, 2), (2, 3)]:
self.assertFalse(is_square(np.empty(shape)))
@staticmethod
def test_phase_conversion():
# ROIPAC interferograms in units of radians, verify conversion to mm
xs, ys = 5, 7
data = (np.arange(xs * ys) - 1.7) * 0.1 # fake a range of values
data = np.where(data == 0, np.nan, data)
wavelen = 0.0562356424
exp = (data * wavelen * 1000) / (4 * pi)
act = convert_radians_to_mm(data, wavelen)
assert_allclose(exp, act)
def test_unit_vector(self):
# last values here simulate a descending pass
incidence = [radians(x) for x in (34.3, 39.3, 29.3, 34.3)]
azimuth = [radians(x) for x in (77.8, 77.9, 80.0, 282.2)]
vert, ns, ew = [], [], []
for i, a in zip(incidence, azimuth):
vert.append(cos(i))
ns.append(sin(i) * cos(a))
ew.append(sin(i) * sin(a))
sh = 4
unitv = [array(ew), array(ns), array(vert)]
unitv = [a.reshape(sh) for a in unitv]
# NB: assumes radian inputs
act = unit_vector(reshape(incidence, sh), reshape(azimuth, sh))
for a, e in zip(act, unitv):
assert_array_almost_equal(squeeze(a), e)
# check unit vec components have correct signs
E, N, V = act
# test E/W component of ascending is +ve
self.assertTrue((E[:-2]).all() > 0)
self.assertTrue(E[-1] < 0) # test E/W component of descending is -ve
self.assertTrue((N > 0).all()) # ensure all north values are positive
# check unit vec components have correct magnitudes
self.assertTrue((abs(V) > abs(E)).all())
self.assertTrue((abs(V) > abs(N)).all())
self.assertTrue((abs(E) > abs(N)).all())
class TestDateLookup(UnitTestAdaptation):
"""
Tests for the algorithm.ifg_date_lookup() function.
"""
@classmethod
def setup_class(cls):
cls.ifgs = small5_mock_ifgs()
def test_ifg_date_lookup(self):
# check reverse lookup of ifg given a first and second date tuple
date_pair = (date(2006, 8, 28), date(2006, 12, 11))
i = ifg_date_lookup(self.ifgs, date_pair)
self.assertEqual(self.ifgs[0], i)
# test with reversed date tuple, should reorder it according to age
date_pair = (date(2006, 12, 11), date(2006, 11, 6))
i = ifg_date_lookup(self.ifgs, date_pair)
self.assertEqual(self.ifgs[1], i)
def test_ifg_date_lookup_failure(self):
# error when lookup cannot find an ifg given a date pair
dates = (date(2006, 12, 11), date(2007, 3, 26))
self.assertRaises(ValueError, ifg_date_lookup, self.ifgs, dates)
def test_date_lookup_bad_inputs(self):
# test some bad inputs to date lookup
inputs = [(None, None), (1, 10), (34.56, 345.93),
(date(2007, 3, 26), ""), (date(2007, 3, 26), None)]
for d in inputs:
self.assertRaises(ValueError, ifg_date_lookup, self.ifgs, d)
# TODO: InitialModelTests
#class InitialModelTests(unittest.TestCase):
# def test_initial_model(self):
# 1. fake an RSC file with coords
# 2. fake a ones(shape) # could also make a ramp etc
# data is single band of DISPLACEMENT
#raise NotImplementedError
class TestEpochs(UnitTestAdaptation):
"""
Unittests for the EpochList class.
"""
def test_get_epochs(self):
def str2date(s):
segs = s[:4], s[4:6], s[6:] # year, month, day
return date(*[int(sg) for sg in segs])
raw_date = ['20060619', '20060828', '20061002', '20061106', '20061211',
'20070115', '20070219', '20070326', '20070430', '20070604',
'20070709', '20070813', '20070917']
exp_dates = [str2date(d) for d in raw_date]
exp_repeat = [1, 1, 3, 3, 4, 3, 3, 3, 3, 3, 3, 2, 2]
exp_spans = [0, 0.1916, 0.2875, 0.3833, 0.4791, 0.5749, 0.6708, 0.7666,
0.8624, 0.9582, 1.0541, 1.1499, 1.2457]
ifms = join(SML_TEST_TIF, "ifms_17")
ifgs = [Ifg(join(SML_TEST_TIF, p)) for p in parse_namelist(ifms)]
for i in ifgs:
i.open()
epochs = get_epochs(ifgs)[0]
self.assertTrue((exp_dates == epochs.dates).all())
self.assertTrue((exp_repeat == epochs.repeat).all())
assert_array_almost_equal(exp_spans, epochs.spans, decimal=4)
def test_get_all_epochs(self):
# test function to extract all dates from sequence of ifgs
ifgs = small5_mock_ifgs()
for i in ifgs:
i.nodata_value = 0
dates = [date(2006, 8, 28), date(2006, 11, 6), date(2006, 12, 11),
date(2007, 1, 15), date(2007, 3, 26), date(2007, 9, 17)]
self.assertEqual(dates, sorted(set(get_all_epochs(ifgs))))
def test_get_epoch_count(self):
self.assertEqual(6, len(set(get_all_epochs(small5_mock_ifgs()))))
def test_first_second_ids(self):
d0 = date(2006, 6, 19)
d1 = date(2006, 8, 28)
d2 = date(2006, 10, 2)
d3 = date(2006, 11, 6)
exp = {d0: 0, d1: 1, d2: 2, d3: 3}
# test unordered and with duplicates
self.assertEqual(exp, first_second_ids([d3, d0, d2, d1]))
self.assertEqual(exp, first_second_ids([d3, d0, d2, d1, d3, d0]))
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides access to LDAP servers, along with some basic functionality required for Hue and
User Admin to work seamlessly with LDAP.
"""
from builtins import str, object
import logging
import sys
LOG = logging.getLogger(__name__)
try:
import ldap
import ldap.filter
from ldap import SCOPE_SUBTREE
except ImportError:
LOG.warning('ldap module not found')
SCOPE_SUBTREE = None
import re
import desktop.conf
from desktop.lib.python_util import CaseInsensitiveDict
from useradmin.models import User
if sys.version_info[0] > 2:
from django.utils.encoding import smart_str
else:
from django.utils.encoding import smart_text as smart_str
CACHED_LDAP_CONN = None
class LdapBindException(Exception):
pass
class LdapSearchException(Exception):
pass
def get_connection_from_server(server=None):
ldap_servers = desktop.conf.LDAP.LDAP_SERVERS.get()
if server and ldap_servers:
ldap_config = ldap_servers[server]
else:
ldap_config = desktop.conf.LDAP
return get_connection(ldap_config)
def get_connection(ldap_config):
global CACHED_LDAP_CONN
if CACHED_LDAP_CONN is not None:
return CACHED_LDAP_CONN
ldap_url = ldap_config.LDAP_URL.get()
username = ldap_config.BIND_DN.get()
password = desktop.conf.get_ldap_bind_password(ldap_config)
ldap_cert = ldap_config.LDAP_CERT.get()
search_bind_authentication = ldap_config.SEARCH_BIND_AUTHENTICATION.get()
if ldap_url is None:
raise Exception('No LDAP URL was specified')
if search_bind_authentication:
return LdapConnection(ldap_config, ldap_url, username, password, ldap_cert)
else:
return LdapConnection(ldap_config, ldap_url, get_ldap_username(username, ldap_config.NT_DOMAIN.get()), password, ldap_cert)
def get_auth(ldap_config):
ldap_url = ldap_config.LDAP_URL.get()
if ldap_url is None:
raise Exception('No LDAP URL was specified')
username = ldap_config.BIND_DN.get()
password = ldap_config.BIND_PASSWORD.get()
if not password:
password = ldap_config.BIND_PASSWORD_SCRIPT.get()
ldap_cert = ldap_config.LDAP_CERT.get()
search_bind_authentication = ldap_config.SEARCH_BIND_AUTHENTICATION.get()
if search_bind_authentication:
ldap_conf = (ldap_url, username, password, ldap_cert)
else:
ldap_conf = (ldap_url, get_ldap_username(username, ldap_config.NT_DOMAIN.get()), password, ldap_cert)
return ldap_conf
def get_ldap_username(username, nt_domain):
if nt_domain:
return '%s@%s' % (username, nt_domain)
else:
return username
def get_ldap_user_kwargs(username):
if desktop.conf.LDAP.IGNORE_USERNAME_CASE.get():
return {
'username__iexact': username
}
else:
return {
'username': username
}
def get_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
return User.objects.get(**username_kwargs)
def get_or_create_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
users = User.objects.filter(**username_kwargs)
if users.exists():
return User.objects.get(**username_kwargs), False
else:
if desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get():
username = username.lower()
elif desktop.conf.LDAP.FORCE_USERNAME_UPPERCASE.get():
username = username.upper()
return User.objects.create(username=username), True
class LdapConnection(object):
"""
Constructor creates LDAP connection. Contains methods
to easily query an LDAP server.
"""
def __init__(self, ldap_config, ldap_url, bind_user=None, bind_password=None, cert_file=None):
"""
Constructor initializes the LDAP connection
"""
self.ldap_config = ldap_config
self._ldap_url = ldap_url
self._username = bind_user
self._ldap_cert = cert_file
# Certificate-related config settings
if ldap_config.LDAP_CERT.get():
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, ldap_config.LDAP_CERT.get())
else:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
if self.ldap_config.FOLLOW_REFERRALS.get():
ldap.set_option(ldap.OPT_REFERRALS, 1)
else:
ldap.set_option(ldap.OPT_REFERRALS, 0)
if ldap_config.DEBUG.get():
ldap.set_option(ldap.OPT_DEBUG_LEVEL, ldap_config.DEBUG_LEVEL.get())
self.ldap_handle = ldap.initialize(uri=ldap_url, trace_level=ldap_config.TRACE_LEVEL.get())
if self.ldap_config.USE_START_TLS.get() and not ldap_url.lower().startswith('ldaps'):
self.ldap_handle.start_tls_s()
if bind_user:
try:
self.ldap_handle.simple_bind_s(bind_user, bind_password)
except Exception as e:
self.handle_bind_exception(e, bind_user)
else:
try:
# Do anonymous bind
self.ldap_handle.simple_bind_s('', '')
except Exception as e:
self.handle_bind_exception(e)
def handle_bind_exception(self, exception, bind_user=None):
LOG.error("LDAP access bind error: %s" % exception)
if 'Can\'t contact LDAP server' in str(exception):
msg = "Can\'t contact LDAP server"
else:
if bind_user:
msg = "Failed to bind to LDAP server"
else:
msg = "Failed to bind to LDAP server anonymously"
raise LdapBindException(msg)
def _get_search_params(self, name, attr, find_by_dn=False):
"""
if we are to find this ldap object by full distinguished name,
then search by setting search_dn to the 'name'
rather than by filtering by 'attr'.
"""
base_dn = self._get_root_dn()
if find_by_dn:
search_dn = re.sub(r'(\w+=)', lambda match: match.group(0).upper(), name)
if not search_dn.upper().endswith(base_dn.upper()):
raise LdapSearchException("Distinguished Name provided does not contain configured Base DN. Base DN: %(base_dn)s, DN: %(dn)s" % {
'base_dn': base_dn,
'dn': search_dn
})
return (search_dn, '')
else:
return (base_dn, '(' + attr + '=' + name + ')')
@classmethod
def _transform_find_user_results(cls, result_data, user_name_attr):
"""
:param result_data: List of dictionaries that have ldap attributes and their associated values.
Generally the result list from an ldapsearch request.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username``
in the return dictionary.
:returns list of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
"""
user_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if user_name_attr not in data:
LOG.warning('Could not find %s in ldap attributes' % user_name_attr)
continue
ldap_info = {
'dn': dn,
'username': smart_str(data[user_name_attr][0])
}
if 'givenName' in data:
first_name = smart_str(data['givenName'][0])
if len(first_name) > 30:
LOG.warning('First name is truncated to 30 characters for [<User: %s>].' % ldap_info['username'])
ldap_info['first'] = first_name[:30]
if 'sn' in data:
last_name = smart_str(data['sn'][0])
if len(last_name) > 30:
LOG.warning('Last name is truncated to 30 characters for [<User: %s>].' % ldap_info['username'])
ldap_info['last'] = last_name[:30]
if 'mail' in data:
ldap_info['email'] = smart_str(data['mail'][0])
# memberOf and isMemberOf should be the same if they both exist
if 'memberOf' in data:
ldap_info['groups'] = [smart_str(member) for member in data['memberOf']]
if 'isMemberOf' in data:
ldap_info['groups'] = [smart_str(member) for member in data['isMemberOf']]
user_info.append(ldap_info)
return user_info
def _transform_find_group_results(self, result_data, group_name_attr, group_member_attr):
group_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if group_name_attr not in data:
LOG.warning('Could not find %s in ldap attributes' % group_name_attr)
continue
group_name = data[group_name_attr][0]
if desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get():
group_name = group_name.lower()
elif desktop.conf.LDAP.FORCE_USERNAME_UPPERCASE.get():
group_name = group_name.upper()
ldap_info = {
'dn': dn,
'name': group_name
}
if group_member_attr in data and group_member_attr.lower() != 'memberuid':
ldap_info['members'] = data[group_member_attr]
else:
LOG.warning('Skipping import of non-posix users from group %s since group_member_attr '
'is memberUid or group did not contain any members' % group_name)
ldap_info['members'] = []
if 'posixgroup' in (item.lower() for item in data['objectClass']) and 'memberUid' in data:
ldap_info['posix_members'] = data['memberUid']
else:
LOG.warning('Skipping import of posix users from group %s since posixGroup '
'not an objectClass or no memberUids found' % group_name)
ldap_info['posix_members'] = []
group_info.append(ldap_info)
return group_info
def find_users(self, username_pattern, search_attr=None, user_name_attr=None, user_filter=None, find_by_dn=False, scope=SCOPE_SUBTREE):
"""
LDAP search helper method finding users. This supports searching for users
by distinguished name, or the configured username attribute.
:param username_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none.
:param search_attr: The ldap attribute to search for ``username_pattern``. Defaults to LDAP -> USERS -> USER_NAME_ATTR config value.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
:param find_by_dn: Search by distinguished name.
:param scope: ldapsearch scope.
:returns: List of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
``
"""
if not search_attr:
search_attr = self.ldap_config.USERS.USER_NAME_ATTR.get()
if not user_name_attr:
user_name_attr = search_attr
if not user_filter:
user_filter = self.ldap_config.USERS.USER_FILTER.get()
if not user_filter.startswith('('):
user_filter = '(' + user_filter + ')'
# Allow wild cards on non distinguished names
sanitized_name = ldap.filter.escape_filter_chars(username_pattern).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
sanitized_name = sanitized_name.replace(r'\5c,', r'\2c')
search_dn, user_name_filter = self._get_search_params(sanitized_name, search_attr, find_by_dn)
ldap_filter = user_filter
if user_name_filter:
if ldap_filter.lower() in ('(objectclass=*)', 'objectclass=*'):
ldap_filter = ''
ldap_filter = '(&' + ldap_filter + user_name_filter + ')'
attrlist = ['objectClass', 'isMemberOf', 'memberOf', 'givenName', 'sn', 'mail', 'dn', user_name_attr]
self._search_dn = search_dn
self._ldap_filter = ldap_filter
self._attrlist = attrlist
try:
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return self._transform_find_user_results(result_data, user_name_attr)
else:
return []
except ldap.LDAPError as e:
LOG.warning("LDAP Error: %s" % e)
return None
def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None,
group_member_attr=None, group_filter=None, find_by_dn=False, scope=SCOPE_SUBTREE):
"""
LDAP search helper method for finding groups
:param groupname_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none.
:param search_attr: The ldap attribute to search for ``groupname_pattern``. Defaults to LDAP -> GROUPS -> GROUP_NAME_ATTR config value.
:param group_name_attr: The ldap attribute that is returned by the server to map to ``name`` in the return dictionary.
:param find_by_dn: Search by distinguished name.
:param scope: ldapsearch scope.
:returns: List of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'name': <ldap attribute associated with group_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
"""
if not search_attr:
search_attr = self.ldap_config.GROUPS.GROUP_NAME_ATTR.get()
if not group_name_attr:
group_name_attr = search_attr
if not group_member_attr:
group_member_attr = self.ldap_config.GROUPS.GROUP_MEMBER_ATTR.get()
if not group_filter:
group_filter = self.ldap_config.GROUPS.GROUP_FILTER.get()
if not group_filter.startswith('('):
group_filter = '(' + group_filter + ')'
# Allow wild cards on non distinguished names
sanitized_name = ldap.filter.escape_filter_chars(groupname_pattern).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
sanitized_name = sanitized_name.replace(r'\5c,', r'\2c')
search_dn, group_name_filter = self._get_search_params(sanitized_name, search_attr, find_by_dn)
ldap_filter = '(&' + group_filter + group_name_filter + ')'
attrlist = ['objectClass', 'dn', 'memberUid', group_member_attr, group_name_attr]
self._search_dn = search_dn
self._ldap_filter = ldap_filter
self._attrlist = attrlist
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return self._transform_find_group_results(result_data, group_name_attr, group_member_attr)
else:
return []
def find_members_of_group(self, dn, search_attr, ldap_filter, scope=SCOPE_SUBTREE):
if ldap_filter and not ldap_filter.startswith('('):
ldap_filter = '(' + ldap_filter + ')'
# Allow wild cards on non distinguished names
dn = ldap.filter.escape_filter_chars(dn).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
dn = dn.replace(r'\5c,', r'\2c')
search_dn, _ = self._get_search_params(dn, search_attr)
ldap_filter = '(&%(ldap_filter)s(|(isMemberOf=%(group_dn)s)(memberOf=%(group_dn)s)))' % {'group_dn': dn, 'ldap_filter': ldap_filter}
attrlist = ['objectClass', 'isMemberOf', 'memberOf', 'givenName', 'sn', 'mail', 'dn', search_attr]
self._search_dn = search_dn
self._ldap_filter = ldap_filter
self._attrlist = attrlist
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return result_data
else:
return []
def find_users_of_group(self, dn):
ldap_filter = self.ldap_config.USERS.USER_FILTER.get()
name_attr = self.ldap_config.USERS.USER_NAME_ATTR.get()
result_data = self.find_members_of_group(dn, name_attr, ldap_filter)
return self._transform_find_user_results(result_data, name_attr)
def find_groups_of_group(self, dn):
ldap_filter = self.ldap_config.GROUPS.GROUP_FILTER.get()
name_attr = self.ldap_config.GROUPS.GROUP_NAME_ATTR.get()
member_attr = self.ldap_config.GROUPS.GROUP_MEMBER_ATTR.get()
result_data = self.find_members_of_group(dn, name_attr, ldap_filter)
return self._transform_find_group_results(result_data, name_attr, member_attr)
def _get_root_dn(self):
return self.ldap_config.BASE_DN.get()
def ldapsearch_cmd(self):
ldapsearch = 'ldapsearch -x -LLL -H {ldap_url} -D "{binddn}" -w "********" -b "{base}" ' \
'"{filterstring}" {attr}'.format(ldap_url=self._ldap_url,
binddn=self._username,
base=self._search_dn,
filterstring=self._ldap_filter,
attr=" ".join(self._attrlist))
return ldapsearch
|
|
import py, sys
import _cffi_backend as _cffi1_backend
def test_ffi_new():
ffi = _cffi1_backend.FFI()
p = ffi.new("int *")
p[0] = -42
assert p[0] == -42
def test_ffi_subclass():
class FOO(_cffi1_backend.FFI):
def __init__(self, x):
self.x = x
foo = FOO(42)
assert foo.x == 42
p = foo.new("int *")
assert p[0] == 0
def test_ffi_no_argument():
py.test.raises(TypeError, _cffi1_backend.FFI, 42)
def test_ffi_cache_type():
ffi = _cffi1_backend.FFI()
t1 = ffi.typeof("int **")
t2 = ffi.typeof("int *")
assert t2.item is t1.item.item
assert t2 is t1.item
assert ffi.typeof("int[][10]") is ffi.typeof("int[][10]")
assert ffi.typeof("int(*)()") is ffi.typeof("int(*)()")
def test_ffi_type_not_immortal():
import weakref, gc
ffi = _cffi1_backend.FFI()
t1 = ffi.typeof("int **")
t2 = ffi.typeof("int *")
w1 = weakref.ref(t1)
w2 = weakref.ref(t2)
del t1, ffi
gc.collect()
assert w1() is None
assert w2() is t2
ffi = _cffi1_backend.FFI()
assert ffi.typeof(ffi.new("int **")[0]) is t2
#
ffi = _cffi1_backend.FFI()
t1 = ffi.typeof("int ***")
t2 = ffi.typeof("int **")
w1 = weakref.ref(t1)
w2 = weakref.ref(t2)
del t2, ffi
gc.collect()
assert w1() is t1
assert w2() is not None # kept alive by t1
ffi = _cffi1_backend.FFI()
assert ffi.typeof("int * *") is t1.item
def test_ffi_cache_type_globally():
ffi1 = _cffi1_backend.FFI()
ffi2 = _cffi1_backend.FFI()
t1 = ffi1.typeof("int *")
t2 = ffi2.typeof("int *")
assert t1 is t2
def test_ffi_invalid():
ffi = _cffi1_backend.FFI()
# array of 10 times an "int[]" is invalid
py.test.raises(ValueError, ffi.typeof, "int[10][]")
def test_ffi_docstrings():
# check that all methods of the FFI class have a docstring.
check_type = type(_cffi1_backend.FFI.new)
for methname in dir(_cffi1_backend.FFI):
if not methname.startswith('_'):
method = getattr(_cffi1_backend.FFI, methname)
if isinstance(method, check_type):
assert method.__doc__, "method FFI.%s() has no docstring" % (
methname,)
def test_ffi_NULL():
NULL = _cffi1_backend.FFI.NULL
assert _cffi1_backend.FFI().typeof(NULL).cname == "void *"
def test_ffi_no_attr():
ffi = _cffi1_backend.FFI()
py.test.raises(AttributeError, "ffi.no_such_name")
py.test.raises(AttributeError, "ffi.no_such_name = 42")
py.test.raises(AttributeError, "del ffi.no_such_name")
def test_ffi_string():
ffi = _cffi1_backend.FFI()
p = ffi.new("char[]", init=b"foobar\x00baz")
assert ffi.string(p) == b"foobar"
assert ffi.string(cdata=p, maxlen=3) == b"foo"
def test_ffi_errno():
# xxx not really checking errno, just checking that we can read/write it
ffi = _cffi1_backend.FFI()
ffi.errno = 42
assert ffi.errno == 42
def test_ffi_alignof():
ffi = _cffi1_backend.FFI()
assert ffi.alignof("int") == 4
assert ffi.alignof("int[]") == 4
assert ffi.alignof("int[41]") == 4
assert ffi.alignof("short[41]") == 2
assert ffi.alignof(ffi.new("int[41]")) == 4
assert ffi.alignof(ffi.new("int[]", 41)) == 4
def test_ffi_sizeof():
ffi = _cffi1_backend.FFI()
assert ffi.sizeof("int") == 4
py.test.raises(ffi.error, ffi.sizeof, "int[]")
assert ffi.sizeof("int[41]") == 41 * 4
assert ffi.sizeof(ffi.new("int[41]")) == 41 * 4
assert ffi.sizeof(ffi.new("int[]", 41)) == 41 * 4
def test_ffi_callback():
ffi = _cffi1_backend.FFI()
assert ffi.callback("int(int)", lambda x: x + 42)(10) == 52
assert ffi.callback("int(*)(int)", lambda x: x + 42)(10) == 52
assert ffi.callback("int(int)", lambda x: x + "", -66)(10) == -66
assert ffi.callback("int(int)", lambda x: x + "", error=-66)(10) == -66
def test_ffi_callback_decorator():
ffi = _cffi1_backend.FFI()
assert ffi.callback(ffi.typeof("int(*)(int)"))(lambda x: x + 42)(10) == 52
deco = ffi.callback("int(int)", error=-66)
assert deco(lambda x: x + "")(10) == -66
assert deco(lambda x: x + 42)(10) == 52
def test_ffi_callback_onerror():
ffi = _cffi1_backend.FFI()
seen = []
def oops(*args):
seen.append(args)
@ffi.callback("int(int)", onerror=oops)
def fn1(x):
return x + ""
assert fn1(10) == 0
@ffi.callback("int(int)", onerror=oops, error=-66)
def fn2(x):
return x + ""
assert fn2(10) == -66
assert len(seen) == 2
exc, val, tb = seen[0]
assert exc is TypeError
assert isinstance(val, TypeError)
assert tb.tb_frame.f_code.co_name == "fn1"
exc, val, tb = seen[1]
assert exc is TypeError
assert isinstance(val, TypeError)
assert tb.tb_frame.f_code.co_name == "fn2"
#
py.test.raises(TypeError, ffi.callback, "int(int)",
lambda x: x, onerror=42) # <- not callable
def test_ffi_getctype():
ffi = _cffi1_backend.FFI()
assert ffi.getctype("int") == "int"
assert ffi.getctype("int", 'x') == "int x"
assert ffi.getctype("int*") == "int *"
assert ffi.getctype("int*", '') == "int *"
assert ffi.getctype("int*", 'x') == "int * x"
assert ffi.getctype("int", '*') == "int *"
assert ffi.getctype("int", replace_with=' * x ') == "int * x"
assert ffi.getctype(ffi.typeof("int*"), '*') == "int * *"
assert ffi.getctype("int", '[5]') == "int[5]"
assert ffi.getctype("int[5]", '[6]') == "int[6][5]"
assert ffi.getctype("int[5]", '(*)') == "int(*)[5]"
# special-case for convenience: automatically put '()' around '*'
assert ffi.getctype("int[5]", '*') == "int(*)[5]"
assert ffi.getctype("int[5]", '*foo') == "int(*foo)[5]"
assert ffi.getctype("int[5]", ' ** foo ') == "int(** foo)[5]"
def test_addressof():
ffi = _cffi1_backend.FFI()
a = ffi.new("int[10]")
b = ffi.addressof(a, 5)
b[2] = -123
assert a[7] == -123
def test_handle():
ffi = _cffi1_backend.FFI()
x = [2, 4, 6]
xp = ffi.new_handle(x)
assert ffi.typeof(xp) == ffi.typeof("void *")
assert ffi.from_handle(xp) is x
yp = ffi.new_handle([6, 4, 2])
assert ffi.from_handle(yp) == [6, 4, 2]
def test_ffi_cast():
ffi = _cffi1_backend.FFI()
assert ffi.cast("int(*)(int)", 0) == ffi.NULL
ffi.callback("int(int)") # side-effect of registering this string
py.test.raises(ffi.error, ffi.cast, "int(int)", 0)
def test_ffi_invalid_type():
ffi = _cffi1_backend.FFI()
e = py.test.raises(ffi.error, ffi.cast, "", 0)
assert str(e.value) == ("identifier expected\n"
"\n"
"^")
e = py.test.raises(ffi.error, ffi.cast, "struct struct", 0)
assert str(e.value) == ("struct or union name expected\n"
"struct struct\n"
" ^")
e = py.test.raises(ffi.error, ffi.cast, "struct never_heard_of_s", 0)
assert str(e.value) == ("undefined struct/union name\n"
"struct never_heard_of_s\n"
" ^")
e = py.test.raises(ffi.error, ffi.cast, "\t\n\x01\x1f~\x7f\x80\xff", 0)
marks = "?" if sys.version_info < (3,) else "??"
assert str(e.value) == ("identifier expected\n"
" ??~?%s%s\n"
" ^" % (marks, marks))
e = py.test.raises(ffi.error, ffi.cast, "X" * 600, 0)
assert str(e.value) == ("undefined type name")
def test_ffi_buffer():
ffi = _cffi1_backend.FFI()
a = ffi.new("signed char[]", [5, 6, 7])
assert ffi.buffer(a)[:] == b'\x05\x06\x07'
assert ffi.buffer(cdata=a, size=2)[:] == b'\x05\x06'
def test_ffi_from_buffer():
import array
ffi = _cffi1_backend.FFI()
a = array.array('H', [10000, 20000, 30000])
c = ffi.from_buffer(a)
assert ffi.typeof(c) is ffi.typeof("char[]")
ffi.cast("unsigned short *", c)[1] += 500
assert list(a) == [10000, 20500, 30000]
def test_memmove():
ffi = _cffi1_backend.FFI()
p = ffi.new("short[]", [-1234, -2345, -3456, -4567, -5678])
ffi.memmove(p, p + 1, 4)
assert list(p) == [-2345, -3456, -3456, -4567, -5678]
p[2] = 999
ffi.memmove(p + 2, p, 6)
assert list(p) == [-2345, -3456, -2345, -3456, 999]
ffi.memmove(p + 4, ffi.new("char[]", b"\x71\x72"), 2)
if sys.byteorder == 'little':
assert list(p) == [-2345, -3456, -2345, -3456, 0x7271]
else:
assert list(p) == [-2345, -3456, -2345, -3456, 0x7172]
def test_memmove_buffer():
import array
ffi = _cffi1_backend.FFI()
a = array.array('H', [10000, 20000, 30000])
p = ffi.new("short[]", 5)
ffi.memmove(p, a, 6)
assert list(p) == [10000, 20000, 30000, 0, 0]
ffi.memmove(p + 1, a, 6)
assert list(p) == [10000, 10000, 20000, 30000, 0]
b = array.array('h', [-1000, -2000, -3000])
ffi.memmove(b, a, 4)
assert b.tolist() == [10000, 20000, -3000]
assert a.tolist() == [10000, 20000, 30000]
p[0] = 999
p[1] = 998
p[2] = 997
p[3] = 996
p[4] = 995
ffi.memmove(b, p, 2)
assert b.tolist() == [999, 20000, -3000]
ffi.memmove(b, p + 2, 4)
assert b.tolist() == [997, 996, -3000]
p[2] = -p[2]
p[3] = -p[3]
ffi.memmove(b, p + 2, 6)
assert b.tolist() == [-997, -996, 995]
def test_memmove_readonly_readwrite():
ffi = _cffi1_backend.FFI()
p = ffi.new("signed char[]", 5)
ffi.memmove(p, b"abcde", 3)
assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0]
ffi.memmove(p, bytearray(b"ABCDE"), 2)
assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0]
py.test.raises((TypeError, BufferError), ffi.memmove, b"abcde", p, 3)
ba = bytearray(b"xxxxx")
ffi.memmove(dest=ba, src=p, n=3)
assert ba == bytearray(b"ABcxx")
def test_ffi_types():
CData = _cffi1_backend.FFI.CData
CType = _cffi1_backend.FFI.CType
ffi = _cffi1_backend.FFI()
assert isinstance(ffi.cast("int", 42), CData)
assert isinstance(ffi.typeof("int"), CType)
def test_ffi_getwinerror():
if sys.platform != "win32":
py.test.skip("for windows")
ffi = _cffi1_backend.FFI()
n = (1 << 29) + 42
code, message = ffi.getwinerror(code=n)
assert code == n
def test_ffi_new_allocator_1():
ffi = _cffi1_backend.FFI()
alloc1 = ffi.new_allocator()
alloc2 = ffi.new_allocator(should_clear_after_alloc=False)
for retry in range(100):
p1 = alloc1("int[10]")
p2 = alloc2("int[10]")
combination = 0
for i in range(10):
assert p1[i] == 0
combination |= p2[i]
p1[i] = -42
p2[i] = -43
if combination != 0:
break
del p1, p2
import gc; gc.collect()
else:
raise AssertionError("cannot seem to get an int[10] not "
"completely cleared")
def test_ffi_new_allocator_2():
ffi = _cffi1_backend.FFI()
seen = []
def myalloc(size):
seen.append(size)
return ffi.new("char[]", b"X" * size)
def myfree(raw):
seen.append(raw)
alloc1 = ffi.new_allocator(myalloc, myfree)
alloc2 = ffi.new_allocator(alloc=myalloc, free=myfree,
should_clear_after_alloc=False)
p1 = alloc1("int[10]")
p2 = alloc2("int[]", 10)
assert seen == [40, 40]
assert ffi.typeof(p1) == ffi.typeof("int[10]")
assert ffi.sizeof(p1) == 40
assert ffi.typeof(p2) == ffi.typeof("int[]")
assert ffi.sizeof(p2) == 40
assert p1[5] == 0
assert p2[6] == ord('X') * 0x01010101
raw1 = ffi.cast("char *", p1)
raw2 = ffi.cast("char *", p2)
del p1, p2
retries = 0
while len(seen) != 4:
retries += 1
assert retries <= 5
import gc; gc.collect()
assert seen == [40, 40, raw1, raw2]
assert repr(seen[2]) == "<cdata 'char[]' owning 41 bytes>"
assert repr(seen[3]) == "<cdata 'char[]' owning 41 bytes>"
def test_ffi_new_allocator_3():
ffi = _cffi1_backend.FFI()
seen = []
def myalloc(size):
seen.append(size)
return ffi.new("char[]", b"X" * size)
alloc1 = ffi.new_allocator(myalloc) # no 'free'
p1 = alloc1("int[10]")
assert seen == [40]
assert ffi.typeof(p1) == ffi.typeof("int[10]")
assert ffi.sizeof(p1) == 40
assert p1[5] == 0
def test_ffi_new_allocator_4():
ffi = _cffi1_backend.FFI()
py.test.raises(TypeError, ffi.new_allocator, free=lambda x: None)
#
def myalloc2(size):
raise LookupError
alloc2 = ffi.new_allocator(myalloc2)
py.test.raises(LookupError, alloc2, "int[5]")
#
def myalloc3(size):
return 42
alloc3 = ffi.new_allocator(myalloc3)
e = py.test.raises(TypeError, alloc3, "int[5]")
assert str(e.value) == "alloc() must return a cdata object (got int)"
#
def myalloc4(size):
return ffi.cast("int", 42)
alloc4 = ffi.new_allocator(myalloc4)
e = py.test.raises(TypeError, alloc4, "int[5]")
assert str(e.value) == "alloc() must return a cdata pointer, not 'int'"
#
def myalloc5(size):
return ffi.NULL
alloc5 = ffi.new_allocator(myalloc5)
py.test.raises(MemoryError, alloc5, "int[5]")
|
|
"""Interconvert between json and other (cif, mol, smi, etc.) files."""
from collections import Counter
try:
from fractions import gcd # < Python 3.9
except ImportError:
from math import gcd # >= Python 3.9
import logging
from functools import reduce
try:
from openbabel import openbabel as ob
from openbabel import pybel
HAS_OB = True
except ImportError:
logging.warning("Open Babel >3.0 not found. Format conversion disabled.")
HAS_OB = False
import imolecule.json_formatter as json
def convert(data, in_format, out_format, name=None, pretty=False):
"""Convert between two inputted chemical formats.
Args:
data: A string representing the chemical file to be converted. If the
`in_format` is "json", this can also be a Python object
in_format: The format of the `data` string. Can be "json" or any format
recognized by Open Babel
out_format: The format to convert to. Can be "json" or any format
recognized by Open Babel
name: (Optional) If `out_format` is "json", will save the specified
value in a "name" property
pretty: (Optional) If True and `out_format` is "json", will pretty-
print the output for human readability
Returns:
A string representing the inputted `data` in the specified `out_format`
"""
# Decide on a json formatter depending on desired prettiness
dumps = json.dumps if pretty else json.compress
# Shortcut for avoiding pybel dependency
if not HAS_OB and in_format == 'json' and out_format == 'json':
return dumps(json.loads(data) if isinstance(data, str) else data)
elif not HAS_OB:
raise ImportError("Chemical file format conversion requires pybel.")
# Bring up with open babel dev: mmcif seems to be a better parser than cif
if in_format == 'cif':
in_format = 'mmcif'
# These use the open babel library to interconvert, with additions for json
if in_format == 'json':
mol = json_to_pybel(json.loads(data) if isinstance(data, str)
else data)
elif in_format == 'pybel':
mol = data
else:
mol = pybel.readstring(in_format, data)
# Infer structure in cases where the input format has no specification
if not mol.OBMol.HasNonZeroCoords():
mol.make3D()
# Make P1 if that's a thing, recalculating bonds in process
if in_format == 'mmcif' and hasattr(mol, 'unitcell'):
mol.unitcell.FillUnitCell(mol.OBMol)
mol.OBMol.ConnectTheDots()
mol.OBMol.PerceiveBondOrders()
mol.OBMol.Center()
if out_format == 'pybel':
return mol
elif out_format == 'object':
return pybel_to_json(mol, name)
elif out_format == 'json':
return dumps(pybel_to_json(mol, name))
else:
return mol.write(out_format)
def json_to_pybel(data, infer_bonds=False):
"""Convert python data structure to pybel.Molecule.
This will infer bond data if not specified.
Args:
data: The loaded json data of a molecule, as a Python object
infer_bonds (Optional): If no bonds specified in input, infer them
Returns:
An instance of `pybel.Molecule`
"""
obmol = ob.OBMol()
obmol.BeginModify()
for atom in data['atoms']:
obatom = obmol.NewAtom()
obatom.SetAtomicNum(ob.GetAtomicNum(str(atom['element'])))
obatom.SetVector(*atom['location'])
if 'label' in atom:
pd = ob.OBPairData()
pd.SetAttribute('_atom_site_label')
pd.SetValue(atom['label'])
obatom.CloneData(pd)
# If there is no bond data, try to infer them
if 'bonds' not in data or not data['bonds']:
if infer_bonds:
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
# Otherwise, use the bonds in the data set
else:
for bond in data['bonds']:
if 'atoms' not in bond:
continue
obmol.AddBond(bond['atoms'][0] + 1, bond['atoms'][1] + 1,
bond['order'])
# Check for unit cell data
if 'unitcell' in data:
uc = ob.OBUnitCell()
uc.SetData(*(ob.vector3(*v) for v in data['unitcell']))
uc.SetSpaceGroup('P1')
obmol.CloneData(uc)
obmol.EndModify()
mol = pybel.Molecule(obmol)
# Add partial charges
if all('charge' in atom for atom in data['atoms']):
mol.OBMol.SetPartialChargesPerceived()
for atom, pyatom in zip(data['atoms'], mol.atoms):
pyatom.OBAtom.SetPartialCharge(atom['charge'])
return mol
def pybel_to_json(molecule, name=None):
"""Convert a pybel molecule to json.
Args:
molecule: An instance of `pybel.Molecule`
name: (Optional) If specified, will save a "name" property
Returns:
A Python dictionary containing atom and bond data
"""
# Save atom element type and 3D location.
atoms = [{'element': ob.GetSymbol(atom.atomicnum),
'location': list(atom.coords)}
for atom in molecule.atoms]
# Recover auxiliary data, if exists
for json_atom, pybel_atom in zip(atoms, molecule.atoms):
if pybel_atom.partialcharge != 0:
json_atom['charge'] = pybel_atom.partialcharge
if pybel_atom.OBAtom.HasData('_atom_site_label'):
obatom = pybel_atom.OBAtom
json_atom['label'] = obatom.GetData('_atom_site_label').GetValue()
if pybel_atom.OBAtom.HasData('color'):
obatom = pybel_atom.OBAtom
json_atom['color'] = obatom.GetData('color').GetValue()
# Save number of bonds and indices of endpoint atoms
bonds = [{'atoms': [b.GetBeginAtom().GetIndex(),
b.GetEndAtom().GetIndex()],
'order': b.GetBondOrder()}
for b in ob.OBMolBondIter(molecule.OBMol)]
output = {'atoms': atoms, 'bonds': bonds, 'units': {}}
# If there's unit cell data, save it to the json output
if hasattr(molecule, 'unitcell'):
uc = molecule.unitcell
output['unitcell'] = [[v.GetX(), v.GetY(), v.GetZ()]
for v in uc.GetCellVectors()]
density = (sum(atom.atomicmass for atom in molecule.atoms)
/ (uc.GetCellVolume() * 0.6022))
output['density'] = density
output['units']['density'] = 'kg / L'
# Save the formula to json. Use Hill notation, just to have a standard.
element_count = Counter(ob.GetSymbol(a.atomicnum) for a in molecule)
hill_count = []
for element in ['C', 'H']:
if element in element_count:
hill_count += [(element, element_count[element])]
del element_count[element]
hill_count += sorted(element_count.items())
# If it's a crystal, then reduce the Hill formula
div = (reduce(gcd, (c[1] for c in hill_count))
if hasattr(molecule, 'unitcell') else 1)
output['formula'] = ''.join(n if c / div == 1 else f'{n}{c // div}'
for n, c in hill_count)
output['molecular_weight'] = molecule.molwt / div
output['units']['molecular_weight'] = 'g / mol'
# If the input has been given a name, add that
if name:
output['name'] = name
return output
if __name__ == '__main__':
# Lazy converter to test this out
import sys
in_data, in_format, out_format = sys.argv[1:]
try:
with open(in_data) as in_file:
data = in_file.read()
except IOError:
data = in_data
print(convert(data, in_format, out_format, pretty=True))
|
|
# -*- coding: utf-8 -*-
"""
Pyweed utility functions.
:copyright:
Mazama Science, IRIS
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function)
# Basic packages
import os
import logging
import re
from pyproj import Geod
from obspy import UTCDateTime
from obspy.taup.tau import TauPyModel
from urllib.parse import urlencode
LOGGER = logging.getLogger(__name__)
GEOD = Geod(ellps='WGS84')
TAUP = TauPyModel()
# Rough meters/degree calculation
M_PER_DEG = (GEOD.inv(0, 0, 0, 1)[2] + GEOD.inv(0, 0, 1, 0)[2]) / 2
class OutputFormat(object):
"""
Simple output format definition.
"""
def __init__(self, value, label=None, extension=None):
#: This is the name used by ObsPy, which we treat as the "real" value
self.value = value
#: This is the label used in the UI, it defaults to value
self.label = label or value
#: This is the file extension, it defaults to lowercased value
self.extension = extension or value.lower()
# List of the output formats we support
OUTPUT_FORMATS = [
OutputFormat('MSEED', 'MiniSEED'),
OutputFormat('SAC'),
OutputFormat('SACXY', 'SAC ASCII', 'sac.txt'),
OutputFormat('SLIST', 'ASCII (1 column)', 'ascii1.txt'),
OutputFormat('TSPAIR', 'ASCII (2 column)', 'ascii2.txt'),
]
# Map of a format to the file extension to use
OUTPUT_FORMAT_EXTENSIONS = dict(((f.value, f.extension) for f in OUTPUT_FORMATS))
class Phase(object):
"""
Simple phase definition
"""
def __init__(self, name, label):
self.name = name
self.label = label
# List of phases we can use for time windows
EVENT_TIME_PHASE = 'Event'
PHASES = [
Phase('P', 'P wave arrival'),
Phase('S', 'S wave arrival'),
Phase(EVENT_TIME_PHASE, 'Event time')
]
# Actual phase values retrieved from TauP, this should give us a good P and S value for any input (I hope!)
TAUP_PHASES = ['P', 'PKIKP', 'Pdiff', 'S', 'SKIKS', 'SKS', 'p', 's']
def manage_cache(download_dir, cache_size):
"""
Maintain a cache directory at a certain size (MB) by removing the oldest files.
"""
try:
# Compile statistics on all files in the output directory and subdirectories
stats = []
total_size = 0
for root, dirs, files in os.walk(download_dir):
for file in files:
path = os.path.join(root, file)
stat_list = os.stat(path)
# path, size, atime
new_stat_list = [path, stat_list.st_size, stat_list.st_atime]
total_size = total_size + stat_list.st_size
# don't want hidden files like .htaccess so don't add stuff that starts with .
if not file.startswith('.'):
stats.append(new_stat_list)
# Sort file stats by last access time
stats = sorted(stats, key=lambda file: file[2])
# Delete old files until we get under cache_size (configured in megabytes)
deletion_count = 0
while total_size > cache_size * 1000000:
# front of stats list is the file with the smallest (=oldest) access time
last_accessed_file = stats[0]
# index 1 is where size is
total_size = total_size - last_accessed_file[1]
# index 0 is where path is
os.remove(last_accessed_file[0])
# remove the file from the stats list
stats.pop(0)
deletion_count = deletion_count + 1
LOGGER.debug('Removed %d files to keep %s below %.0f megabytes' % (deletion_count, download_dir, cache_size))
except Exception as e:
LOGGER.error(str(e))
def iter_channels(inventory, dedupe=True):
"""
Iterate over every channel in an inventory.
For each channel, yields (network, station, channel)
If dedupe=True, repeated channels are filtered out -- this can occur if the inventory includes
multiple epochs for a given channel. Only the first channel will be included in this case.
"""
last_sncl = None
if inventory:
for network in inventory.networks:
for station in network.stations:
for channel in station.channels:
if dedupe:
sncl = get_sncl(network, station, channel)
if sncl == last_sncl:
continue
last_sncl = sncl
yield (network, station, channel)
def get_sncl(network, station, channel):
"""
Generate the SNCL for the given network/station/channel
"""
return '.'.join((network.code, station.code, channel.location_code, channel.code))
def get_event_id(event):
"""
Get a unique ID for a given event
Event IDs are given differently by different data centers.
Examples compiled by [email protected]:
IRIS
<event publicID="smi:service.iris.edu/fdsnws/event/1/query?eventid=3337497">
NCEDC
<event publicID="quakeml:nc.anss.org/Event/NC/71377596"
catalog:datasource="nc" catalog:dataid="nc71377596"
catalog:eventsource="nc" catalog:eventid="71377596">
SCEDC
<event publicID="quakeml:service.scedc.caltech.edu/fdsnws/event/1/query?eventid=37300872"
catalog:datasource="ci" catalog:dataid="ci37300872"
catalog:eventsource="ci" catalog:eventid="37300872">
USGS
<event catalog:datasource="us" catalog:eventsource="us"
catalog:eventid="c000lvb5"
publicID="quakeml:earthquake.usgs.gov/fdsnws/event/1/query?eventid=usc000lvb5&format=quakeml">
ETHZ
<event publicID="smi:ch.ethz.sed/sc3a/2017eemfch">
INGV
<event publicID="smi:webservices.ingv.it/fdsnws/event/1/query?eventId=863301">
ISC
<event publicID="smi:ISC/evid=600516598">
"""
# Look for "eventid=" as a URL query parameter
m = re.search(r'eventid=([^\&]+)', event.resource_id.id, re.IGNORECASE)
if m:
return m.group(1)
# Otherwise, return the trailing segment of alphanumerics
return re.sub(r'^.*?(\w+)\W*$', r'\1', event.resource_id.id)
def get_event_name(event):
time_str = get_event_time_str(event)
mag_str = get_event_mag_str(event)
description = get_event_description(event)
return "%s | %s | %s" % (time_str, mag_str, description)
def format_time_str(time):
return time.isoformat(sep=' ').split('.')[0]
def get_event_time_str(event):
origin = get_preferred_origin(event)
return format_time_str(origin.time)
def get_event_mag_str(event):
mag = get_preferred_magnitude(event)
return "%s%s" % (mag.mag, mag.magnitude_type)
def get_event_description(event):
return str(event.event_descriptions[0].text).title()
def get_preferred_origin(event):
"""
Get the preferred origin for the event, or None if not defined
"""
origin = event.preferred_origin()
if not origin:
LOGGER.error("No preferred origin found for event %s", event.resource_id)
if len(event.origins):
origin = event.origins[0]
return origin
def get_preferred_magnitude(event):
"""
Get the preferred magnitude for the event, or None if not defined
"""
magnitude = event.preferred_magnitude()
if not magnitude:
LOGGER.error("No preferred magnitude found for event %s", event.resource_id)
if len(event.magnitudes):
magnitude = event.magnitudes[0]
return magnitude
class TimeWindow(object):
"""
Represents a time window for data based on phase arrivals at a particular location
"""
start_offset = 0
end_offset = 0
start_phase = None
end_phase = None
def __init__(self, start_offset=0, end_offset=0, start_phase=PHASES[0].name, end_phase=PHASES[0].name):
self.update(start_offset, end_offset, start_phase, end_phase)
def update(self, start_offset, end_offset, start_phase, end_phase):
"""
Set all values. Phases can be specified by name or label.
"""
self.start_offset = start_offset
self.end_offset = end_offset
self.start_phase = start_phase
self.end_phase = end_phase
def calculate_window(self, event_time, arrivals):
"""
Given an event time and a dictionary of arrival times (see Distances below)
calculate the full time window
"""
start_offset = arrivals.get(self.start_phase, 0) - self.start_offset
end_offset = arrivals.get(self.end_phase, 0) + self.end_offset
return (
# Start time
UTCDateTime(event_time + start_offset),
# End time
UTCDateTime(event_time + end_offset),
)
def __eq__(self, other):
"""
Compare two TimeWindows
"""
if not isinstance(other, TimeWindow):
return False
return (other.start_offset == self.start_offset and
other.end_offset == self.end_offset and
other.start_phase == self.start_phase and
other.end_phase == self.end_phase)
def get_distance(lat1, lon1, lat2, lon2):
"""
Get the distance between two points in degrees
"""
# NOTE that GEOD takes longitude first!
_az, _baz, meters = GEOD.inv(lon1, lat1, lon2, lat2)
return meters / M_PER_DEG
def get_arrivals(distance, event_depth):
"""
Calculate phase arrival times
:param distance: distance in degrees
:param event_depth: event depth in km
"""
arrivals = TAUP.get_travel_times(
event_depth,
distance,
TAUP_PHASES
)
# From the travel time and origin, calculate the actual first arrival time for each basic phase type
first_arrivals = {}
for arrival in arrivals:
# The basic phase name is the uppercase first letter of the full phase name
# We assume this matches a Phase.name defined in PHASES
phase_name = arrival.name[0].upper()
if phase_name not in first_arrivals:
first_arrivals[phase_name] = arrival.time
return first_arrivals
def get_bounding_circle(lat, lon, radius, num_points=36):
"""
Returns groups of lat/lon pairs representing a circle on the map
"""
radius_meters = radius * M_PER_DEG
# NOTE that GEOD takes longitude first!
trans = GEOD.fwd(
[lon] * num_points,
[lat] * num_points,
list(((i * 360) / num_points) for i in range(num_points)),
[radius_meters] * num_points
)
points = list(zip(trans[1], trans[0]))
# We need to complete the circle by adding the first point again as the last point
points.append(points[0])
return points
def get_service_url(client, service, parameters):
"""
Figure out the URL for the given service call. This isn't publicly available from the ObsPy client,
we need to use internal APIs, so those messy details are encapsulated here.
"""
try:
return client._create_url_from_parameters(service, {}, parameters)
except:
return "%s %s %s" % (
client.base_url, service, urlencode(parameters)
)
class CancelledException(Exception):
"""
An exception to return in an event notification indicating that an operation was cancelled.
See `StationsHandler` for an example.
"""
def __str__(self, *args, **kwargs):
s = super(CancelledException, self).__str__(*args, **kwargs)
if s == '':
return 'Cancelled'
else:
return s
class DataRequest(object):
"""
Wrapper object for a data request, which may or may not be more than a single web service query.
"""
# The client to use
client = None
# List of option dictionaries, one for each sub-request required
sub_requests = None
def __init__(self, client, *requests):
self.client = client
self.sub_requests = requests
def process_result(self, result):
"""
Subclasses can define behavior here to do post-processing on the resulting data
"""
return result
|
|
import collections.abc
import threading
import immutables
__all__ = ('ContextVar', 'Context', 'Token', 'copy_context')
_NO_DEFAULT = object()
# !!!! Rigetti edit !!!!
# This backport inspects objects' __module__ attribute to prevent subclassing outside of
# this module. The backport assumes it will be installed as `contextvars` and not as
# `pyquil.external.contextvars`. So all comparisons to the module name "contextvars"
# has been replaced
THIS_MODULE = "pyquil.external.contextvars"
class ContextMeta(type(collections.abc.Mapping)):
# contextvars.Context is not subclassable.
def __new__(mcls, names, bases, dct):
cls = super().__new__(mcls, names, bases, dct)
if cls.__module__ != THIS_MODULE or cls.__name__ != 'Context':
raise TypeError("type 'Context' is not an acceptable base type")
return cls
class Context(collections.abc.Mapping, metaclass=ContextMeta):
def __init__(self):
self._data = immutables.Map()
self._prev_context = None
def run(self, callable, *args, **kwargs):
if self._prev_context is not None:
raise RuntimeError(
'cannot enter context: {} is already entered'.format(self))
self._prev_context = _get_context()
try:
_set_context(self)
return callable(*args, **kwargs)
finally:
_set_context(self._prev_context)
self._prev_context = None
def copy(self):
new = Context()
new._data = self._data
return new
def __getitem__(self, var):
if not isinstance(var, ContextVar):
raise TypeError(
"a ContextVar key was expected, got {!r}".format(var))
return self._data[var]
def __contains__(self, var):
if not isinstance(var, ContextVar):
raise TypeError(
"a ContextVar key was expected, got {!r}".format(var))
return var in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
class ContextVarMeta(type):
# contextvars.ContextVar is not subclassable.
def __new__(mcls, names, bases, dct):
cls = super().__new__(mcls, names, bases, dct)
if cls.__module__ != THIS_MODULE or cls.__name__ != 'ContextVar':
raise TypeError("type 'ContextVar' is not an acceptable base type")
return cls
def __getitem__(cls, name):
return
class ContextVar(metaclass=ContextVarMeta):
def __init__(self, name, *, default=_NO_DEFAULT):
if not isinstance(name, str):
raise TypeError("context variable name must be a str")
self._name = name
self._default = default
@property
def name(self):
return self._name
def get(self, default=_NO_DEFAULT):
ctx = _get_context()
try:
return ctx[self]
except KeyError:
pass
if default is not _NO_DEFAULT:
return default
if self._default is not _NO_DEFAULT:
return self._default
raise LookupError
def set(self, value):
ctx = _get_context()
data = ctx._data
try:
old_value = data[self]
except KeyError:
old_value = Token.MISSING
updated_data = data.set(self, value)
ctx._data = updated_data
return Token(ctx, self, old_value)
def reset(self, token):
if token._used:
raise RuntimeError("Token has already been used once")
if token._var is not self:
raise ValueError(
"Token was created by a different ContextVar")
if token._context is not _get_context():
raise ValueError(
"Token was created in a different Context")
ctx = token._context
if token._old_value is Token.MISSING:
ctx._data = ctx._data.delete(token._var)
else:
ctx._data = ctx._data.set(token._var, token._old_value)
token._used = True
def __repr__(self):
r = '<ContextVar name={!r}'.format(self.name)
if self._default is not _NO_DEFAULT:
r += ' default={!r}'.format(self._default)
return r + ' at {:0x}>'.format(id(self))
class TokenMeta(type):
# contextvars.Token is not subclassable.
def __new__(mcls, names, bases, dct):
cls = super().__new__(mcls, names, bases, dct)
if cls.__module__ != THIS_MODULE or cls.__name__ != 'Token':
raise TypeError("type 'Token' is not an acceptable base type")
return cls
class Token(metaclass=TokenMeta):
MISSING = object()
def __init__(self, context, var, old_value):
self._context = context
self._var = var
self._old_value = old_value
self._used = False
@property
def var(self):
return self._var
@property
def old_value(self):
return self._old_value
def __repr__(self):
r = '<Token '
if self._used:
r += ' used'
r += ' var={!r} at {:0x}>'.format(self._var, id(self))
return r
def copy_context():
return _get_context().copy()
def _get_context():
ctx = getattr(_state, 'context', None)
if ctx is None:
ctx = Context()
_state.context = ctx
return ctx
def _set_context(ctx):
_state.context = ctx
_state = threading.local()
|
|
# Copyright (c) 2014 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from blazarclient import command
from blazarclient import exception
class ListNetworks(command.ListCommand):
"""Print a list of networks."""
resource = 'network'
log = logging.getLogger(__name__ + '.ListNetworks')
list_columns = ['id', 'network_type', 'physical_network', 'segment_id']
def get_parser(self, prog_name):
parser = super(ListNetworks, self).get_parser(prog_name)
parser.add_argument(
'--sort-by', metavar="<network_column>",
help='column name used to sort result',
default='id'
)
return parser
class ShowNetwork(command.ShowCommand):
"""Show network details."""
resource = 'network'
json_indent = 4
log = logging.getLogger(__name__ + '.ShowNetwork')
class CreateNetwork(command.CreateCommand):
"""Create a network."""
resource = 'network'
json_indent = 4
log = logging.getLogger(__name__ + '.CreateNetwork')
def get_parser(self, prog_name):
parser = super(CreateNetwork, self).get_parser(prog_name)
parser.add_argument(
'--network-type',
help='Type of physical mechanism associated with the network '
'segment. For example: flat, geneve, gre, local, vlan, '
'vxlan.'
)
parser.add_argument(
'--physical-network',
default=None,
help='Name of the physical network in which the network segment '
'is available, required for VLAN networks'
)
parser.add_argument(
'--segment',
dest='segment_id',
help='VLAN ID for VLAN networks or Tunnel ID for GENEVE/GRE/VXLAN '
'networks'
)
parser.add_argument(
'--extra', metavar='<key>=<value>',
action='append',
dest='extra_capabilities',
default=[],
help='Extra capabilities key/value pairs to add for the network'
)
return parser
def args2body(self, parsed_args):
params = {}
if parsed_args.network_type:
params['network_type'] = parsed_args.network_type
else:
raise exception.IncorrectNetwork("--network-type is required")
if parsed_args.physical_network:
if params.get('network_type') == 'vlan':
params['physical_network'] = parsed_args.physical_network
else:
err_msg = "--physical-network is only valid for VLAN segments"
raise exception.IncorrectNetwork(err_msg)
else:
if params.get('network_type') == 'vlan':
err_msg = "--physical-network is required for VLAN segments"
raise exception.IncorrectNetwork(err_msg)
else:
params['physical_network'] = None
if parsed_args.segment_id:
params['segment_id'] = parsed_args.segment_id
else:
raise exception.IncorrectNetwork("--segment is required")
extras = {}
if parsed_args.extra_capabilities:
for capa in parsed_args.extra_capabilities:
key, _sep, value = capa.partition('=')
# NOTE(sbauza): multiple copies of the same capability will
# result in only the last value to be stored
extras[key] = value
params.update(extras)
return params
class UpdateNetwork(command.UpdateCommand):
"""Update attributes of a network."""
resource = 'network'
json_indent = 4
log = logging.getLogger(__name__ + '.UpdateNetwork')
def get_parser(self, prog_name):
parser = super(UpdateNetwork, self).get_parser(prog_name)
parser.add_argument(
'--extra', metavar='<key>=<value>',
action='append',
dest='extra_capabilities',
default=[],
help='Extra capabilities key/value pairs to update for the network'
)
return parser
def args2body(self, parsed_args):
params = {}
extras = {}
if parsed_args.extra_capabilities:
for capa in parsed_args.extra_capabilities:
key, _sep, value = capa.partition('=')
# NOTE(sbauza): multiple copies of the same capability will
# result in only the last value to be stored
extras[key] = value
params['values'] = extras
return params
class DeleteNetwork(command.DeleteCommand):
"""Delete a network."""
resource = 'network'
log = logging.getLogger(__name__ + '.DeleteNetwork')
class ShowNetworkAllocation(command.ShowAllocationCommand):
"""Show network allocation details."""
resource = 'network'
allow_names = False
json_indent = 4
log = logging.getLogger(__name__ + '.ShowNetworkAllocation')
class ListNetworkAllocations(command.ListAllocationCommand):
"""List network allocations."""
resource = 'network'
allow_names = False
log = logging.getLogger(__name__ + '.ListNetworkAllocations')
list_columns = ['resource_id', 'reservations']
def get_parser(self, prog_name):
parser = super(ListNetworkAllocations, self).get_parser(prog_name)
parser.add_argument(
'--sort-by', metavar="<network_column>",
help='column name used to sort result',
default='resource_id'
)
return parser
class ShowNetworkCapability(command.ShowCapabilityCommand):
"""Show network capability."""
resource = 'network'
json_indent = 4
log = logging.getLogger(__name__ + '.ShowNetworkCapability')
class ListNetworkCapabilities(command.ListCommand):
"""List network capabilities."""
resource = 'network'
log = logging.getLogger(__name__ + '.ListNetworkCapabilities')
list_columns = ['property', 'private', 'capability_values']
def args2body(self, parsed_args):
params = {'detail': parsed_args.detail}
if parsed_args.sort_by:
if parsed_args.sort_by in self.list_columns:
params['sort_by'] = parsed_args.sort_by
else:
msg = 'Invalid sort option %s' % parsed_args.sort_by
raise exception.BlazarClientException(msg)
return params
def retrieve_list(self, parsed_args):
"""Retrieve a list of resources from Blazar server."""
blazar_client = self.get_client()
body = self.args2body(parsed_args)
resource_manager = getattr(blazar_client, self.resource)
data = resource_manager.list_capabilities(**body)
return data
def get_parser(self, prog_name):
parser = super(ListNetworkCapabilities, self).get_parser(prog_name)
parser.add_argument(
'--detail',
action='store_true',
help='Return capabilities with values and attributes.',
default=False
)
parser.add_argument(
'--sort-by', metavar="<extra_capability_column>",
help='column name used to sort result',
default='property'
)
return parser
class UpdateNetworkCapability(command.UpdateCapabilityCommand):
"""Update attributes of a network capability."""
resource = 'network'
json_indent = 4
log = logging.getLogger(__name__ + '.UpdateNetworkCapability')
|
|
import unittest
import mock
import numpy
import six
import chainer
from chainer import _backprop_utils
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
def make_array(start, shape, dtype):
size = numpy.product(shape, dtype='i')
a = numpy.arange(start, start + size)
a = a.reshape(shape)
a = a.astype(dtype, copy=False)
return a
class FuncWithBackward(chainer.FunctionNode):
def backward(self, target_input_indexes, grad_outputs):
return self._mock_backward(target_input_indexes, grad_outputs)
class FuncWithBackwardAccumulate(chainer.FunctionNode):
def backward_accumulate(self, target_input_indexes, grad_outputs,
grad_inputs):
"""Computes gradients w.r.t.\\ specified inputs and accumulates them.
This method provides a way to fuse the backward computation and the
gradient accumulations in the case that the multiple functions are
applied to the same variable.
Users have to override either of this method or :meth:`backward`.
It is often simpler to implement :meth:`backward` and is recommended
if you do not need to provide efficient gradient accumulation.
Args:
target_input_indexes (tuple of int): Indices of the input variables
w.r.t. which the gradients are required. It is guaranteed that
this tuple contains at least one element.
grad_outputs (tuple of Variable): Gradients w.r.t. the output
variables. If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
grad_inputs (tuple of Variable): Gradients w.r.t. the input
variables specified by ``target_input_indexes``. These values
are computed by other computation paths. If there is no
gradient value existing for the variable, the corresponding
element is ``None``. See also the note below.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. Unlike :meth:`backward`, the length of the tuple
**must** be same as that of ``target_input_indices``.
.. note::
When the same variable is passed to the multiple input arguments of
a function, only the first position of ``grad_inputs`` corresponding
to these input arguments may contain the gradient variable
corresponding to that input variable, and other entries are set to
``None``. This is an implementation-detail convention to avoid the
complication of correctly accumulating gradients in such a case.
This behavior might be changed in a future version.
"""
assert isinstance(target_input_indexes, tuple)
assert isinstance(grad_outputs, tuple)
assert isinstance(grad_inputs, tuple)
# The default implementation uses backward(). You can override this
# method without using backward().
gxs = self._mock_backward(target_input_indexes, grad_outputs)
len_gxs = len(gxs)
if len_gxs == len(self.inputs):
gxs = tuple([gxs[i] for i in target_input_indexes])
elif len_gxs != len(target_input_indexes):
raise ValueError(
'number of gradients returned by %s (%s) is incorrect.'
% (self._impl_name, self.label))
return tuple([gx if g_input is None else
g_input if gx is None else
gx + g_input
for gx, g_input in six.moves.zip(gxs, grad_inputs)])
@testing.parameterize(*testing.product({
'y_shape': [(4,), (0,), (2, 3), ()],
'x_shape': [(3,), (0,), (4, 1), ()],
'override': ['backward', 'backward_accumulate'],
}))
class TestFunctionNode(unittest.TestCase):
def _get_method(self, prefix, gpu):
suffix = 'gpu' if gpu else 'cpu'
return getattr(self.f, prefix + '_' + suffix)
def setUp(self):
y_shape = self.y_shape
x_shape = self.x_shape
y1 = make_array(1, y_shape, numpy.float32)
y2 = make_array(2, y_shape, numpy.float32)
gx1 = chainer.Variable(
make_array(1, x_shape, numpy.float32))
gx2 = None
gy1 = make_array(1, y_shape, numpy.float32)
gy2 = make_array(1, y_shape, numpy.float32)
f = {
'backward': FuncWithBackward,
'backward_accumulate': FuncWithBackwardAccumulate,
}[self.override]()
f._mock_backward = mock.MagicMock(return_value=(gx1, gx2))
f.check_type_forward = mock.MagicMock()
f.forward_cpu = mock.MagicMock(return_value=(y1, y2))
f.forward_gpu = mock.MagicMock()
self.f = f
self.x1 = make_array(0, x_shape, numpy.float32)
self.x2 = make_array(0, x_shape, numpy.int32)
self.y1 = y1
self.y2 = y2
self.gx1 = gx1
self.gx2 = gx2
self.gx1_orig = chainer.Variable(
make_array(3, x_shape, numpy.float32))
self.gx2_orig = chainer.Variable(
make_array(2, x_shape, numpy.float32))
self.gx1_accum = gx1 + self.gx1_orig
self.gy1 = gy1
self.gy2 = gy2
def tearDown(self):
# Set None to delete cuda array
self.f = None
self.y1 = None
self.y2 = None
self.gx1 = None
def setup_gpu(self):
self.x1 = cuda.to_gpu(self.x1)
self.x2 = cuda.to_gpu(self.x2)
self.y1 = cuda.to_gpu(self.y1)
self.y2 = cuda.to_gpu(self.y2)
self.gx1.to_gpu()
self.gx1_orig.to_gpu()
self.gx2_orig.to_gpu()
self.gx1_accum.to_gpu()
self.gy1 = cuda.to_gpu(self.gy1)
self.gy2 = cuda.to_gpu(self.gy2)
self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
self.f._mock_backward = mock.MagicMock(
return_value=(self.gx1, self.gx2))
def check_backprop_step(self, gxs):
flag_none = gxs[0] is None
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
self.f.inputs = (x1.node, x2.node)
gxrefs = [[gx] if gx is not None else [] for gx in gxs]
grad_outputs = (self.gy1, self.gy2)
grad_inputs = dict(zip(self.f.inputs, gxrefs))
_backprop_utils.backprop_step(
self.f, (0, 1), grad_outputs, grad_inputs, True)
if not chainer.configuration.config.lazy_grad_sum:
# assert eager grad sum
for gxref in gxrefs:
self.assertLessEqual(len(gxref), 1)
gx1 = _backprop_utils._reduce(gxrefs[0])
gx2 = _backprop_utils._reduce(gxrefs[1])
if flag_none:
numpy.testing.assert_array_equal(cuda.to_cpu(gx1.data),
cuda.to_cpu(self.gx1.data))
self.assertIsNone(gx2)
else:
numpy.testing.assert_array_equal(cuda.to_cpu(gx1.data),
cuda.to_cpu(self.gx1_accum.data))
numpy.testing.assert_array_equal(cuda.to_cpu(gx2.data),
cuda.to_cpu(self.gx2_orig.data))
def test_backprop_step_none_cpu(self):
self.check_backprop_step((None, None))
@attr.gpu
def test_backprop_step_none_gpu(self):
self.setup_gpu()
self.check_backprop_step((None, None))
def test_backprop_step_cpu(self):
self.check_backprop_step((self.gx1_orig, self.gx2_orig))
@attr.gpu
def test_backprop_step_gpu(self):
self.setup_gpu()
self.check_backprop_step((self.gx1_orig, self.gx2_orig))
testing.run_module(__name__, __file__)
|
|
"""
Support for Yamaha Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.yamaha/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA, SUPPORT_PAUSE, SUPPORT_STOP,
SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK, SUPPORT_PLAY,
MEDIA_TYPE_MUSIC,
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_NAME, CONF_HOST, STATE_OFF, STATE_ON,
STATE_PLAYING, STATE_IDLE)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['rxv==0.4.0']
_LOGGER = logging.getLogger(__name__)
SUPPORT_YAMAHA = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
CONF_SOURCE_NAMES = 'source_names'
CONF_SOURCE_IGNORE = 'source_ignore'
CONF_ZONE_IGNORE = 'zone_ignore'
DEFAULT_NAME = 'Yamaha Receiver'
KNOWN = 'yamaha_known_receivers'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_SOURCE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ZONE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SOURCE_NAMES, default={}): {cv.string: cv.string},
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Yamaha platform."""
import rxv
# keep track of configured receivers so that we don't end up
# discovering a receiver dynamically that we have static config
# for.
if hass.data.get(KNOWN, None) is None:
hass.data[KNOWN] = set()
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
source_ignore = config.get(CONF_SOURCE_IGNORE)
source_names = config.get(CONF_SOURCE_NAMES)
zone_ignore = config.get(CONF_ZONE_IGNORE)
if discovery_info is not None:
name = discovery_info[0]
model = discovery_info[1]
ctrl_url = discovery_info[2]
desc_url = discovery_info[3]
if ctrl_url in hass.data[KNOWN]:
_LOGGER.info("%s already manually configured", ctrl_url)
return
receivers = rxv.RXV(
ctrl_url,
model_name=model,
friendly_name=name,
unit_desc_url=desc_url).zone_controllers()
_LOGGER.info("Receivers: %s", receivers)
# when we are dynamically discovered config is empty
zone_ignore = []
elif host is None:
receivers = []
for recv in rxv.find():
receivers.extend(recv.zone_controllers())
else:
ctrl_url = "http://{}:80/YamahaRemoteControl/ctrl".format(host)
receivers = rxv.RXV(ctrl_url, name).zone_controllers()
for receiver in receivers:
if receiver.zone not in zone_ignore:
hass.data[KNOWN].add(receiver.ctrl_url)
add_devices([
YamahaDevice(name, receiver, source_ignore, source_names)])
class YamahaDevice(MediaPlayerDevice):
"""Representation of a Yamaha device."""
def __init__(self, name, receiver, source_ignore, source_names):
"""Initialize the Yamaha Receiver."""
self._receiver = receiver
self._muted = False
self._volume = 0
self._pwstate = STATE_OFF
self._current_source = None
self._source_list = None
self._source_ignore = source_ignore or []
self._source_names = source_names or {}
self._reverse_mapping = None
self._playback_support = None
self._is_playback_supported = False
self._play_status = None
self.update()
self._name = name
self._zone = receiver.zone
def update(self):
"""Get the latest details from the device."""
self._play_status = self._receiver.play_status()
if self._receiver.on:
if self._play_status is None:
self._pwstate = STATE_ON
elif self._play_status.playing:
self._pwstate = STATE_PLAYING
else:
self._pwstate = STATE_IDLE
else:
self._pwstate = STATE_OFF
self._muted = self._receiver.mute
self._volume = (self._receiver.volume / 100) + 1
if self.source_list is None:
self.build_source_list()
current_source = self._receiver.input
self._current_source = self._source_names.get(
current_source, current_source)
self._playback_support = self._receiver.get_playback_support()
self._is_playback_supported = self._receiver.is_playback_supported(
self._current_source)
def build_source_list(self):
"""Build the source list."""
self._reverse_mapping = {alias: source for source, alias in
self._source_names.items()}
self._source_list = sorted(
self._source_names.get(source, source) for source in
self._receiver.inputs()
if source not in self._source_ignore)
@property
def name(self):
"""Return the name of the device."""
name = self._name
if self._zone != "Main_Zone":
# Zone will be one of Main_Zone, Zone_2, Zone_3
name += " " + self._zone.replace('_', ' ')
return name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = SUPPORT_YAMAHA
supports = self._playback_support
mapping = {'play': (SUPPORT_PLAY | SUPPORT_PLAY_MEDIA),
'pause': SUPPORT_PAUSE,
'stop': SUPPORT_STOP,
'skip_f': SUPPORT_NEXT_TRACK,
'skip_r': SUPPORT_PREVIOUS_TRACK}
for attr, feature in mapping.items():
if getattr(supports, attr, False):
supported_features |= feature
return supported_features
def turn_off(self):
"""Turn off media player."""
self._receiver.on = False
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
receiver_vol = 100 - (volume * 100)
negative_receiver_vol = -receiver_vol
self._receiver.volume = negative_receiver_vol
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._receiver.mute = mute
def turn_on(self):
"""Turn the media player on."""
self._receiver.on = True
self._volume = (self._receiver.volume / 100) + 1
def media_play(self):
"""Send play commmand."""
self._call_playback_function(self._receiver.play, "play")
def media_pause(self):
"""Send pause command."""
self._call_playback_function(self._receiver.pause, "pause")
def media_stop(self):
"""Send stop command."""
self._call_playback_function(self._receiver.stop, "stop")
def media_previous_track(self):
"""Send previous track command."""
self._call_playback_function(self._receiver.previous, "previous track")
def media_next_track(self):
"""Send next track command."""
self._call_playback_function(self._receiver.next, "next track")
def _call_playback_function(self, function, function_text):
import rxv
try:
function()
except rxv.exceptions.ResponseException:
_LOGGER.warning(
'Failed to execute %s on %s', function_text, self._name)
def select_source(self, source):
"""Select input source."""
self._receiver.input = self._reverse_mapping.get(source, source)
def play_media(self, media_type, media_id, **kwargs):
"""Play media from an ID.
This exposes a pass through for various input sources in the
Yamaha to direct play certain kinds of media. media_type is
treated as the input type that we are setting, and media id is
specific to it.
"""
if media_type == "NET RADIO":
self._receiver.net_radio(media_id)
@property
def media_artist(self):
"""Artist of current playing media."""
if self._play_status is not None:
return self._play_status.artist
@property
def media_album_name(self):
"""Album of current playing media."""
if self._play_status is not None:
return self._play_status.album
@property
def media_content_type(self):
"""Content type of current playing media."""
# Loose assumption that if playback is supported, we are playing music
if self._is_playback_supported:
return MEDIA_TYPE_MUSIC
return None
@property
def media_title(self):
"""Artist of current playing media."""
if self._play_status is not None:
song = self._play_status.song
station = self._play_status.station
# If both song and station is available, print both, otherwise
# just the one we have.
if song and station:
return '{}: {}'.format(station, song)
else:
return song or station
|
|
#!/usr/bin/env python
# coding=utf-8
## @package biopredyn
## Copyright: [2012-2019] Cosmo Tech, All Rights Reserved
## License: BSD 3-Clause
import copy
import libsbml
import libsedml
import libsbmlsim
import algorithm, result, statistics
import numpy as np
from cobra.io.sbml import create_cobra_model_from_sbml_doc
from COPASI import *
import libfbc
## Base representation of the execution of an algorithm, independent from the
## model or data set it has to be run with.
class Simulation:
## @var algorithm
# KiSAO identifier of the algorithm to execute.
## @var id
# A unique identifier for this object.
## @var name
# Name of this object.
## @var type
# Type of simulation.
## Constructor; either 'simulation' or 'idf' and 's_type' must be passed as
## keyword arguments.
# @param self The object pointer.
# @param simulation A libsedml.SedSimulation object; optional (default: None).
# @param idf A unique identifier; optional (default: None).
# @param name A name for 'self'; optional (default: None).
# @param s_type The type of simulation encoded in 'self'. Possible values for
# s_type are: 'uniformTimeCourse', 'oneStep', 'steadyState' and 'simulation'.
# Optional (default: None).
def __init__(self, simulation=None, idf=None, name=None, s_type=None):
if (simulation is None) and (idf is None or s_type is None):
raise RuntimeError("Either 'simulation' or 'idf' and 's_type' must be " +
"passed as keyword arguments.")
else:
if simulation is not None:
self.set_algorithm(algorithm.Algorithm(simulation.getAlgorithm()))
self.id = simulation.getId()
self.name = simulation.getName()
self.type = simulation.getElementName()
elif idf is not None and s_type is not None:
self.id = idf
self.name = name
self.type = s_type
## String representation of this. Displays it as a hierarchy.
# @param self The object pointer.
# @return A string representing this as a hierarchy.
def __str__(self):
tree = " |-" + self.type + " id=" + self.id + " name=" + self.name + "\n"
tree += " |-algorithm " + self.algorithm.get_kisao_id() + "\n"
return tree
## Getter. Returns self.algorithm.
# @param self The object pointer.
# @return self.algorithm
def get_algorithm(self):
return self.algorithm
## Getter. Returns self.id.
# @param self The object pointer.
# @return self.id
def get_id(self):
return self.id
## Setter for self.algorithm.
# @param self The object pointer.
# @param algo A biopredyn.algorithm.Algorithm object.
def set_algorithm(self, algo):
self.algorithm = algo
## Setter for self.id.
# @param self The object pointer.
# @param id New value for self.id.
def set_id(self, id):
self.id = id
## Getter. Returns self.name.
# @param self The object pointer.
def get_name(self):
return self.name
## Setter for self.name.
# @param self The object pointer.
# @param name New value for self.name.
def set_name(self, name):
self.name = name
## Getter. Returns self.type.
# @param self The object pointer.
# @return self.type
def get_type(self):
return self.type
## Simulation-derived class for one step simulations.
class OneStep(Simulation):
## @var step
# Value of the time step to be considered.
## Overridden constructor; either 'simulation' or 'idf' and 'step'
## must be passed as keyword arguments.
# @param self The object pointer.
# @param simulation A libsedml.SedOneStep element; optional (default: None).
# @param idf A unique identifier; optional (default: None).
# @param name A name for 'self'; optional (default: None).
# @param step Size of the time step to integrate; optional (default: None).
def __init__(self, simulation=None, idf=None, name=None, step=None):
if simulation is None and (idf is None or step is None):
raise RuntimeError("Either 'simulation' or 'idf' and 'step' must be " +
"passed as keyword arguments.")
else:
if simulation is not None:
Simulation.__init__(self, simulation=simulation)
self.step = simulation.getStep()
else:
Simulation.__init__(self, idf=idf, name=name, s_type='oneStep')
self.step = step
## Getter. Returns self.step.
# @param self The object pointer.
# @return self.step
def get_step(self):
return self.step
## Run the simulation encoded in self on the input model using the input tool.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param tool Name of the tool to use as simulation engine (string).
# @param res A biopredyn.result.TimeSeries object.
# @return A biopredyn.result.TimeSeries object.
def run(self, model, tool, res):
# tool selection - by default copasi is chosen
if tool is None or tool == 'copasi':
self.run_as_copasi_one_step(model, res)
else:
raise NameError("Invalid tool name; only 'copasi' is available as a " +
"simulation engine.")
return res
## Run the simulation encoded in self as a Copasi model.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param res A biopredyn.result.TimeSeries object.
# @return A biopredyn.result.TimeSeries object.
def run_as_copasi_one_step(self, model, res):
data_model = CCopasiDataModel()
data_model.importSBMLFromString(model.get_sbml_doc().toSBML())
task = data_model.addTask(CTrajectoryTask.timeCourse)
task.setMethodType(CCopasiMethod.deterministic)
task.processStep(self.get_step())
res.import_from_copasi_time_series(task.getTimeSeries(),
model.get_species_copasi_ids())
return res
## Setter for self.step.
# @param self The object pointer.
# @param step New value for self.step.
def set_step(self, step):
self.step = step
## Returns the libsedml.SedOneStep representation of this.
# @param self The object pointer.
# @param level Level of SED-ML language to be used.
# @param version Version of SED-ML language to be used.
# @return A libsedml.SedOneStep object.
def to_sedml(self, level, version):
one = libsedml.SedOneStep(level, version)
one.setId(self.get_id())
if self.get_name() is not None:
one.setName(str(self.get_name()))
one.setStep(self.get_step())
one.setAlgorithm(self.get_algorithm().to_sedml(level, version))
return one
## Simulation-derived class for steady state simulations.
class SteadyState(Simulation):
## Overridden constructor; either 'simulation' or 'idf'
## must be passed as keyword arguments.
# @param self The object pointer.
# @param simulation A libsedml.SedOneStep element; optional (default: None).
# @param idf A unique identifier; optional (default: None).
# @param name A name for 'self'; optional (default: None).
def __init__(self, simulation=None, idf=None, name=None):
if simulation is None and idf is None:
raise RuntimeError("Either 'simulation' or 'idf' must be " +
"passed as keyword arguments.")
else:
if simulation is not None:
Simulation.__init__(self, simulation=simulation)
else:
Simulation.__init__(self, idf=idf, name=name, s_type='steadyState')
## Run the simulation encoded in self on the input model using the input tool.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param tool Name of the tool to use as simulation engine (string).
# @param res A biopredyn.result.Fluxes object.
# @return A biopredyn.result.Fluxes object.
def run(self, model, tool, res):
# tool selection - by default cobrapy is chosen
if tool is None or tool == 'cobrapy':
self.run_as_cobrapy_problem(model, res)
elif tool == 'libfbc':
self.run_as_libfbc_problem(model, res)
else:
raise NameError("Invalid tool name; available names are 'cobrapy' and " +
" 'libfbc'.")
return res
## Run the simulation encoded in self as a CobraPy model.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param res A biopredyn.result.Fluxes object.
# @return A biopredyn.result.Fluxes object.
def run_as_cobrapy_problem(self, model, res):
if res is None:
res = result.Fluxes()
# Case where the encoded simulation is a FBA
if self.algorithm.get_kisao_id() == "KISAO:0000437":
# Run a basic FBA with cobrapy
cobra_model = create_cobra_model_from_sbml_doc(model.get_sbml_doc())
# Optional model parameters are set
obj = self.algorithm.get_parameter_by_name('objective_function')
sense = self.algorithm.get_parameter_by_name('objective_sense')
if obj is not None:
cobra_model.change_objective([obj.get_value()])
if sense is not None:
cobra_model.optimize(objective_sense=sense.get_value())
else:
cobra_model.optimize()
else:
raise NameError("Invalid KiSAO identifier for a steady state " +
"simulation; see http://bioportal.bioontology.org/ontologies/KISAO " +
"for more information about the KiSAO ontology.")
res.import_from_cobrapy_fba(cobra_model.solution)
return res
## Run the simulation encoded in self as a libFBC problem.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param res A biopredyn.result.Fluxes object.
# @return A biopredyn.result.Fluxes object
def run_as_libfbc_problem(self, model, res):
if res is None:
res = result.Fluxes()
# Case where the encoded simulation is a FBA
if self.algorithm.get_kisao_id() == "KISAO:0000437":
fbc_model = libfbc.FBAProblem()
fbc_model.initFromSBMLString(model.get_sbml_doc().toSBML())
fbc_model.solveProblem()
else:
raise NameError("Invalid KiSAO identifier for a steady state " +
"simulation; see http://bioportal.bioontology.org/ontologies/KISAO " +
"for more information about the KiSAO ontology.")
res.import_from_libfbc_fba(fbc_model.getSolution())
return res
## Returns the libsedml.SedSteadyState representation of this.
# @param self The object pointer.
# @param level Level of SED-ML language to be used.
# @param version Version of SED-ML language to be used.
# @return A libsedml.SedSteadyState object.
def to_sedml(self, level, version):
st = libsedml.SedSteadyState(level, version)
st.setId(self.get_id())
if self.get_name() is not None:
st.setName(str(self.get_name()))
st.setAlgorithm(self.get_algorithm().to_sedml(level, version))
return st
## Simulation-derived class for uniform time course simulations.
class UniformTimeCourse(Simulation):
## @var initial_time
# Time point where the simulation begins.
## @var number_of_points
# Number of time points to consider between output_start_time and
# output_end_time.
## @var output_end_time
# Time point where both the simulation and the result collection end.
## @var output_start_time
# Time point where the result collection starts; not necessarily the same as
# initial_time.
## Overridden constructor; either 'simulation' or 'idf', 'start', 'end',
## 'out_st' and 'pts' must be passed as keyword arguments.
# @param self The object pointer.
# @param simulation A libsedml.SedUniformTimeCourse element; optional
# (default: None).
# @param idf A unique identifier; optional (default: None).
# @param name A name for 'self'; optional (default: None).
# @param start Time point where the simulation begins; optional (default:
# None).
# @param end Time point where both the simulation and the result collection
# end; optional (default: None).
# @param out_st Time point where the result collection starts; optional
# (default: None).
# @param pts Number of time points between 'out_st' and 'end'; optional
# (default: None).
def __init__(self, simulation=None, idf=None, name=None, start=None, end=None,
out_st=None, pts=None):
if simulation is None and (idf is None or start is None or end is None or
out_st is None or pts is None):
raise RuntimeError("Either 'simulation' or 'idf', 'start', 'end', " +
"'out_st' and 'pts' must be passed as keyword arguments.")
else:
if simulation is not None:
Simulation.__init__(self, simulation=simulation)
self.initial_time = simulation.getInitialTime()
self.number_of_points = simulation.getNumberOfPoints()
self.output_end_time = simulation.getOutputEndTime()
self.output_start_time = simulation.getOutputStartTime()
else:
Simulation.__init__(self, idf=idf, name=name,
s_type='uniformTimeCourse')
self.initial_time = start
self.number_of_points = pts
self.output_end_time = end
self.output_start_time = out_st
## Overridden string representation of this. Displays it as a hierarchy.
# @param self The object pointer.
# @return A string representing this as a hierarchy.
def __str__(self):
tree = " |-" + self.type + " id=" + self.id + " name=" + self.name
tree += " initialTime" + str(self.initial_time)
tree += " numberOfPoints" + str(self.number_of_points)
tree += " outputEndTime" + str(self.output_end_time)
tree += " outputStartTime" + str(self.output_start_time) + "\n"
tree += " |-algorithm " + self.algorithm.get_kisao_id() + "\n"
return tree
## Getter. Returns self.initial_time.
# @param self The object pointer.
# @return self.initial_time
def get_initial_time(self):
return self.initial_time
## Getter. Returns self.number_of_points.
# @param self The object pointer.
# @return self.number_of_points
def get_number_of_points(self):
return self.number_of_points
## Getter. Returns self.output_end_time.
# @param self The object pointer.
# @return self.output_end_time
def get_output_end_time(self):
return self.output_end_time
## Getter. Returns self.output_start_time.
# @param self The object pointer.
# @return self.output_start_time
def get_output_start_time(self):
return self.output_start_time
## Run the simulation encoded in self on the input model using the input tool,
## and returns its output as a biopredyn.result.TimeSeries object.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param tool Name of the tool to use as simulation engine (string).
# @param res A biopredyn.result.TimeSeries object.
# @return A biopredyn.result.TimeSeries object.
def run(self, model, tool, res):
# tool selection - by default libsbmlsim is chosen
if tool is None or tool == 'libsbmlsim':
self.run_as_libsbmlsim_time_course(model, res)
elif tool == 'copasi':
self.run_as_copasi_time_course(model, res)
else:
raise NameError("Invalid tool name; available names are 'copasi' and 'libsbmlsim'.")
return res
## Run this as a COPASI time course and import its result.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param res A biopredyn.result.TimeSeries object where simulation results
# will be written.
# @param unknowns A list of N identifiers corresponding to the IDs of unknown
# parameters in model. If not None, the simulation will be run with the
# values listed in fitted_values for the unknown parameters. Default: None.
# @param fitted_values A list of N values corresponding to the N unknowns.
# @return A biopredyn.result.TimeSeries object.
def run_as_copasi_time_course(
self, model, res, unknowns=None, fitted_values=None):
if res is None:
res = result.TimeSeries()
steps = self.get_number_of_points()
start = self.get_initial_time()
o_start = self.get_output_start_time()
end = self.get_output_end_time()
step = (end - o_start) / steps
duration = end - start
mod = model.get_sbml_doc()
# Importing model to COPASI
data_model = CCopasiDataModel()
data_model.importSBMLFromString(mod.toSBML())
cop_model = data_model.getModel()
# unknown parameter assignment
if unknowns is not None:
for u in range(len(unknowns)):
unknown = unknowns[u]
for r in range(cop_model.getReactions().size()):
reaction = cop_model.getReaction(r)
for p in range(reaction.getParameters().size()):
param = reaction.getParameters().getParameter(p)
if param.getObjectName() == unknown:
if reaction.isLocalParameter(p): # local case
reaction.setParameterValue(unknown, fitted_values[u])
else: # global case
cop_model.getModelValues().getByName(unknown).setInitialValue(
fitted_values[u])
task = data_model.addTask(CTrajectoryTask.timeCourse)
pbm = task.getProblem()
# Set the parameters
pbm.setOutputStartTime(o_start)
pbm.setStepSize(step)
pbm.setDuration(duration)
pbm.setTimeSeriesRequested(True)
# TODO: acquire KiSAO description of the algorithm
task.setMethodType(CCopasiMethod.deterministic)
# Execution - initial values are used
task.processWithOutputFlags(True, CCopasiTask.ONLY_TIME_SERIES)
# Time series extraction
res.import_from_copasi_time_series(task.getTimeSeries(),
model.get_species_copasi_ids())
return res
## Run this as a libSBMLSim time course and import its result.
# @param self The object pointer.
# @param model A biopredyn.model.Model object.
# @param res A biopredyn.result.TimeSeries object where simulation results
# will be written.
# @return A biopredyn.result.TimeSeries object.
# TODO: add option for setting parameter values before running
def run_as_libsbmlsim_time_course(self, model, res):
if res is None:
res = result.TimeSeries()
steps = self.get_number_of_points()
start = self.get_output_start_time()
end = self.get_output_end_time()
step = (end - start) / steps
mod = model.get_sbml_doc()
# TODO: acquire KiSAO description of the algorithm
r = libsbmlsim.simulateSBMLFromString(
mod.toSBML(),
end,
step,
1,
0,
libsbmlsim.MTHD_RUNGE_KUTTA,
0)
res.import_from_libsbmlsim(r, start)
return res
## Use the parameter of the simulation to estimate the input model parameters
## with respect to the input data file. Uses COPASI as simulation engine.
# @param self The object pointer.
# @param mod A biopredyn.model.Model object.
# @param cal_data Path to a column-aligned CSV file containing the
# calibration data.
# @param val_data Path to a column-aligned CSV file containing the
# validation data.
# @param observables A list of identifier corresponding to the IDs of the
# observables to consider (both in model and data file).
# @param unknowns A list of identifier corresponding to the IDs of the
# parameters to be estimated in the input model.
# @param min_unknown_values A list of numerical values; lower bound of the
# parameter value ranges.
# @param max_unknown_values A list of numerical values; upper bound of the
# parameter value ranges.
# @param algorithm A CCopasiMethod::SubType object describing the algorithm
# to be used.
# @param rm A biopredyn.resources.ResourceManager object.
# return statistics A biopredyn.statistics.Statistics object.
def run_as_parameter_estimation(self, mod, cal_data, val_data, observables,
unknowns, min_unknown_values, max_unknown_values, algorithm, rm):
data_model = CCopasiDataModel()
data_model.importSBMLFromString(mod.get_sbml_doc().toSBML())
# importing data
data = result.TimeSeries()
metabolites = data.import_from_csv_file(cal_data, rm)
steps = len(data.get_time_steps())
# task definition
fit_task = data_model.addTask(CFitTask.parameterFitting)
fit_problem = fit_task.getProblem()
# experiment definition
experiment_set = fit_problem.getParameter("Experiment Set")
experiment = CExperiment(data_model)
experiment.setFileName(cal_data)
experiment.setSeparator(",")
experiment.setFirstRow(1) # offset due to header
experiment.setLastRow(steps + 1)
experiment.setHeaderRow(1)
experiment.setExperimentType(CCopasiTask.timeCourse)
experiment.setNumColumns(len(metabolites))
object_map = experiment.getObjectMap()
object_map.setNumCols(len(metabolites))
model = data_model.getModel()
# assigning roles and names with respect to the content of the data file
index = 0
for name in metabolites:
if str.lower(name).__contains__("time"):
# case where the current 'metabolite' is time
object_map.setRole(index, CExperiment.time)
time_reference = model.getObject(CCopasiObjectName("Reference=Time"))
object_map.setObjectCN(index, time_reference.getCN().getString())
elif name in observables:
# case where the current metabolite is an observable
for m in range(model.getMetabolites().size()):
meta = model.getMetabolites().get(m)
if (meta.getSBMLId() == name):
metab_object = meta.getObject(
CCopasiObjectName("Reference=Concentration"))
object_map.setRole(index, CExperiment.dependent)
object_map.setObjectCN(index, metab_object.getCN().getString())
index += 1
experiment_set.addExperiment(experiment)
experiment = experiment_set.getExperiment(0)
# definition of the fitted object - i.e. the parameters listed in unknowns
opt_item_group = fit_problem.getParameter("OptimizationItemList")
for u in range(len(unknowns)):
unknown = unknowns[u]
for r in range(model.getReactions().size()):
reaction = model.getReaction(r)
for p in range(reaction.getParameters().size()):
param = reaction.getParameters().getParameter(p)
if param.getObjectName() == unknown:
if reaction.isLocalParameter(p): # case of a local parameter
fit_item = CFitItem(data_model)
fit_item.setObjectCN(
param.getObject(CCopasiObjectName("Reference=Value")).getCN())
fit_item.setStartValue(param.getValue())
fit_item.setLowerBound(
CCopasiObjectName(str(min_unknown_values[u])))
fit_item.setUpperBound(
CCopasiObjectName(str(max_unknown_values[u])))
opt_item_group.addParameter(fit_item)
else: # case of a global parameter
parameter = model.getModelValues().getByName(unknown)
exists = False
for fit in range(opt_item_group.size()):
if opt_item_group.getParameter(fit).getCN() == parameter.getCN():
exists = True # parameter already exists as a CFitItem
break
if not exists:
fit_item = CFitItem(data_model)
fit_item.setObjectCN(parameter.getObject(CCopasiObjectName(
"Reference=InitialValue")).getCN())
fit_item.setStartValue(param.getValue())
fit_item.setLowerBound(
CCopasiObjectName(str(min_unknown_values[u])))
fit_item.setUpperBound(
CCopasiObjectName(str(max_unknown_values[u])))
opt_item_group.addParameter(fit_item)
fit_task.setMethodType(algorithm)
fit_task.processWithOutputFlags(True, CCopasiTask.ONLY_TIME_SERIES)
# extracting values of the fitted parameters
fitted_param = []
for p in range(opt_item_group.size()):
opt_item = opt_item_group.getParameter(p)
fitted_param.append(opt_item.getLocalValue())
# extracting Fisher Information Matrix from fit_problem
fisher = fit_problem.getFisher()
f_mat = []
for row in range(fisher.numRows()):
r = []
for col in range(fisher.numCols()):
r.append(fisher.get(row, col))
f_mat.append(r)
f_mat = np.mat(f_mat)
stats = statistics.Statistics(
val_data, data, copy.deepcopy(self), mod, fit_problem.getSolutionValue(),
observables, unknowns, fitted_param, f_mat, rm)
return stats
## Setter. Assign a new value to self.initial_time.
# @param self The object pointer.
# @param initial_time New value for self.initial_time.
def set_initial_time(self, initial_time):
self.initial_time = initial_time
## Setter. Assign a new value to self.number_of_points.
# @param self The object pointer.
# @param number_of_points New value of self.number_of_points.
def set_number_of_points(self, number_of_points):
self.number_of_points = number_of_points
## Setter. Assign a new value to self.output_end_time.
# @param self The object pointer.
# @param output_end_time New value of self.output_end_time.
def set_output_end_time(self, output_end_time):
self.output_end_time = output_end_time
## Setter. Assign a new value to self.output_start_time.
# @param self The object pointer.
# @param output_start_time New value for self.output_start_time.
def set_output_start_time(self, output_start_time):
self.output_start_time = output_start_time
## Returns the libsedml.SedUniformTimeCourse representation of this.
# @param self The object pointer.
# @param level Level of SED-ML language to be used.
# @param version Version of SED-ML language to be used.
# @return A libsedml.SedUniformTimeCourse object.
def to_sedml(self, level, version):
sim = libsedml.SedUniformTimeCourse(level, version)
sim.setId(self.get_id())
if self.get_name() is not None:
sim.setName(str(self.get_name()))
sim.setInitialTime(self.get_initial_time())
sim.setOutputStartTime(self.get_output_start_time())
sim.setOutputEndTime(self.get_output_end_time())
sim.setNumberOfPoints(self.get_number_of_points())
sim.setAlgorithm(self.get_algorithm().to_sedml(level, version))
return sim
|
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
class classinstancemethod(object):
"""
Acts like a class method when called from a class, like an
instance method when called by an instance. The method should
take two arguments, 'self' and 'cls'; one of these will be None
depending on how the method was called.
"""
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, obj, type=None):
return _methodwrapper(self.func, obj=obj, type=type)
class _methodwrapper(object):
def __init__(self, func, obj, type):
self.func = func
self.obj = obj
self.type = type
def __call__(self, *args, **kw):
assert not kw.has_key('self') and not kw.has_key('cls'), (
"You cannot use 'self' or 'cls' arguments to a "
'classinstancemethod')
return self.func(*((self.obj, self.type) + args), **kw)
def __repr__(self):
if self.obj is None:
return ('<bound class method %s.%s>'
% (self.type.__name__, self.func.func_name))
else:
return ('<bound method %s.%s of %r>'
% (self.type.__name__, self.func.func_name, self.obj))
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
A file monitor and server restarter.
Use this like:
..code-block:: Python
import reloader
reloader.install()
Then make sure your server is installed with a shell script like::
err=3
while test "$err" -eq 3 ; do
python server.py
err="$?"
done
or is run from this .bat file (if you use Windows)::
@echo off
:repeat
python server.py
if %errorlevel% == 3 goto repeat
or run a monitoring process in Python (``paster serve --reload`` does
this).
Use the ``watch_file(filename)`` function to cause a reload/restart for
other other non-Python files (e.g., configuration files). If you have
a dynamic set of files that grows over time you can use something like::
def watch_config_files():
return CONFIG_FILE_CACHE.keys()
paste.reloader.add_file_callback(watch_config_files)
Then every time the reloader polls files it will call
``watch_config_files`` and check all the filenames it returns.
"""
import os
import sys
import time
import threading
import traceback
from paste.util.classinstance import classinstancemethod
def install(poll_interval=1):
"""
Install the reloading monitor.
On some platforms server threads may not terminate when the main
thread does, causing ports to remain open/locked. The
``raise_keyboard_interrupt`` option creates a unignorable signal
which causes the whole application to shut-down (rudely).
"""
mon = Monitor(poll_interval=poll_interval)
t = threading.Thread(target=mon.periodic_reload)
t.setDaemon(True)
t.start()
class Monitor(object):
instances = []
global_extra_files = []
global_file_callbacks = []
def __init__(self, poll_interval):
self.module_mtimes = {}
self.keep_running = True
self.poll_interval = poll_interval
self.extra_files = list(self.global_extra_files)
self.instances.append(self)
self.file_callbacks = list(self.global_file_callbacks)
def periodic_reload(self):
while True:
if not self.check_reload():
# use os._exit() here and not sys.exit() since within a
# thread sys.exit() just closes the given thread and
# won't kill the process; note os._exit does not call
# any atexit callbacks, nor does it do finally blocks,
# flush open files, etc. In otherwords, it is rude.
os._exit(3)
break
time.sleep(self.poll_interval)
def check_reload(self):
filenames = list(self.extra_files)
for file_callback in self.file_callbacks:
try:
filenames.extend(file_callback())
except:
print >> sys.stderr, 'Error calling paste.reloader callback %r:' % file_callback
traceback.print_exc()
for module in sys.modules.values():
try:
filename = module.__file__
except (AttributeError, ImportError), exc:
continue
if filename is not None:
filenames.append(filename)
for filename in filenames:
try:
stat = os.stat(filename)
if stat:
mtime = stat.st_mtime
else:
mtime = 0
except (OSError, IOError):
continue
if filename.endswith('.pyc') and os.path.exists(filename[:-1]):
mtime = max(os.stat(filename[:-1]).st_mtime, mtime)
elif filename.endswith('$py.class') and \
os.path.exists(filename[:-9] + '.py'):
mtime = max(os.stat(filename[:-9] + '.py').st_mtime, mtime)
if not self.module_mtimes.has_key(filename):
self.module_mtimes[filename] = mtime
elif self.module_mtimes[filename] < mtime:
from datetime import datetime
print >> sys.stderr, (
'\n%s changed\n%s\nreloading...' % (datetime.now(), filename)
)
return False
return True
def watch_file(self, cls, filename):
"""Watch the named file for changes"""
filename = os.path.abspath(filename)
if self is None:
for instance in cls.instances:
instance.watch_file(filename)
cls.global_extra_files.append(filename)
else:
self.extra_files.append(filename)
watch_file = classinstancemethod(watch_file)
def add_file_callback(self, cls, callback):
"""Add a callback -- a function that takes no parameters -- that will
return a list of filenames to watch for changes."""
if self is None:
for instance in cls.instances:
instance.add_file_callback(callback)
cls.global_file_callbacks.append(callback)
else:
self.file_callbacks.append(callback)
add_file_callback = classinstancemethod(add_file_callback)
if sys.platform.startswith('java'):
try:
from _systemrestart import SystemRestart
except ImportError:
pass
else:
class JythonMonitor(Monitor):
"""
Monitor that utilizes Jython's special
``_systemrestart.SystemRestart`` exception.
When raised from the main thread it causes Jython to reload
the interpreter in the existing Java process (avoiding
startup time).
Note that this functionality of Jython is experimental and
may change in the future.
"""
def periodic_reload(self):
while True:
if not self.check_reload():
raise SystemRestart()
time.sleep(self.poll_interval)
watch_file = Monitor.watch_file
add_file_callback = Monitor.add_file_callback
|
|
from fabric.api import hide, run, settings, sudo
from fabric.context_managers import shell_env
from fabric.contrib.files import exists
def _run_cmd(func, cmd, verbose):
"""
Utility function to run commands respecting `use_sudo` and `verbose`.
"""
with shell_env(DEBIAN_FRONTEND='noninteractive'):
if verbose:
return func(cmd)
with settings(hide('everything')):
return func(cmd)
def install(packages, assume_yes=True, no_install_recommends=False,
install_suggests=False, use_sudo=True, verbose=True):
"""
Install packages on the remote host via Apt.
Args:
packages (list or str): The packages to install.
no_install_recommends (bool): Apt will not consider recommended packages
as a dependencies for installing. (Default: `True`)
install_suggests (bool): Apt will consider suggested packages as a
dependency for installing. (Default: `False`)
assume_yes (bool): If `True`, Apt will assume "yes" as answer to all
prompts and run non-interactively. (Default: `True`)
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `True`)
verbose (bool): If `False`, hide all output. (Default: `True`)
"""
if not isinstance(packages, str):
packages = ' '.join(packages)
if assume_yes:
yes = '--yes'
else:
yes = ''
if no_install_recommends:
recommends = '--no-install-recommends'
else:
recommends = ''
if install_suggests:
suggests = '--install-suggests'
else:
suggests = ''
func = use_sudo and sudo or run
cmd = 'apt-get install {0} {1} {2} {3}'.format(yes,
recommends,
suggests,
packages)
return _run_cmd(func, cmd, verbose)
def update(use_sudo=True, verbose=True):
"""
Update Apt's package index files on the remote host.
Args:
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `True`)
verbose (bool): If `False`, hide all output. (Default: `True`)
"""
func = use_sudo and sudo or run
cmd = 'apt-get update'
return _run_cmd(func, cmd, verbose)
def upgrade(assume_yes=True, use_sudo=True, verbose=True):
"""
Install the newest versions of all packages on the remote host.
Args:
assume_yes (bool): If `True`, Apt will assume "yes" as answer to all
prompts and run non-interactively. (Default: `True`)
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `True`)
verbose (bool): If `False`, hide all output. (Default: `True`)
"""
if assume_yes:
yes = '--yes'
else:
yes = ''
func = use_sudo and sudo or run
cmd = 'apt-get upgrade {0}'.format(yes)
return _run_cmd(func, cmd, verbose)
def dist_upgrade(assume_yes=True, use_sudo=True, verbose=True):
"""
Same as `upgrade`, but Apt will attempt to intelligently handle changing
dependencies, installing new dependencies as needed.
Args:
assume_yes (bool): If `True`, Apt will assume "yes" as answer to all
prompts and run non-interactively. (Default: `True`)
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `True`)
verbose (bool): If `False`, hide all output. (Default: `True`)
"""
if assume_yes:
yes = '--yes'
else:
yes = ''
func = use_sudo and sudo or run
cmd = 'apt-get dist-upgrade {0}'.format(yes)
return _run_cmd(func, cmd, verbose)
def remove(packages, purge=False, assume_yes=True, use_sudo=True,
verbose=True):
"""
Remove a package or list of packages from the remote host.
Args:
packages (list or str): The packages to install.
purge (bool): If `True` any configuration files are deleted too.
(Default: `False`)
assume_yes (bool): If `True`, Apt will assume "yes" as answer to all
prompts and run non-interactively. (Default: `True`)
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `True`)
verbose (bool): If `False`, hide all output. (Default: `True`)
"""
if not isinstance(packages, str):
packages = ' '.join(packages)
if assume_yes:
yes = '--yes'
else:
yes = ''
if purge:
purge = '--purge'
else:
purge = ''
func = use_sudo and sudo or run
cmd = 'apt-get remove {0} {1} {2}'.format(yes, purge, packages)
return _run_cmd(func, cmd, verbose)
def clean(use_sudo=True, verbose=True):
"""
Clears out retrieved package files.
Args:
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `True`)
verbose (bool): If `False`, hide all output. (Default: `True`)
"""
func = use_sudo and sudo or run
cmd = 'apt-get clean'
return _run_cmd(func, cmd, verbose)
def autoclean(use_sudo=True, verbose=True):
"""
Like `clean`, but only removes package files that can no longer
be downloaded.
Args:
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `True`)
verbose (bool): If `False`, hide all output. (Default: `True`)
"""
func = use_sudo and sudo or run
cmd = 'apt-get autoclean'
return _run_cmd(func, cmd, verbose)
def autoremove(assume_yes=True, use_sudo=True, verbose=True):
"""
Args:
assume_yes (bool): If `True`, Apt will assume "yes" as answer to all
prompts and run non-interactively. (Default: `True`)
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `True`)
verbose (bool): If `False`, hide all output. (Default: `True`)
"""
if assume_yes:
yes = '--yes'
else:
yes = ''
func = use_sudo and sudo or run
cmd = 'apt-get autoremove {0}'.format(yes)
return _run_cmd(func, cmd, verbose)
def source(package, download_only=False, use_sudo=False, verbose=True):
"""
Download a given source package.
Args:
package (str): The source package to download.
download_only (bool): If `True`, the source package will not be
unpacked. (Default: `False`)
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `False`)
verbose (bool): If `False`, hide all output. (Default: `True`)
"""
if download_only:
download = '--download-only'
else:
download = ''
func = use_sudo and sudo or run
cmd = 'apt-get source {0} {1}'.format(download, package)
return _run_cmd(func, cmd, verbose)
def build_dep(package, assume_yes=True, use_sudo=True, verbose=True):
"""
Install the build dependencies for a given source package.
Args:
package (str): The package whose build dependencies will be installed.
assume_yes (bool): If `True`, Apt will assume "yes" as answer to all
prompts and run non-interactively. (Default: `True`)
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `True`)
verbose (bool): If `False`, hide all output. (Default: `True`)
"""
if assume_yes:
yes = '--yes'
else:
yes = ''
func = use_sudo and sudo or run
cmd = 'apt-get build-dep {0} {1}'.format(yes, package)
return _run_cmd(func, cmd, verbose)
def reboot_required(use_sudo=False, verbose=False):
"""
Check if a reboot is required after intalling updates.
Returns `True` if a reboot is required, `False` if not.
Args:
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `False`)
verbose (bool): If `False`, hide all output. (Default: `False`)
"""
return exists('/var/run/reboot-required',
use_sudo=use_sudo,
verbose=verbose)
def installed(package, use_sudo=True):
"""
Check if a package is installed on the system.
Returns `True` if installed, `False` if it is not.
Args:
package (str): The package to check if installed.
use_sudo (bool): If `True`, will use `sudo` instead of `run`.
(Default: `False`)
"""
func = use_sudo and sudo or run
cmd = "dpkg -s {0}".format(package)
with settings(warn_only=True):
installed = _run_cmd(func, cmd, verbose=False)
if installed.find("install ok installed") > -1:
return True
return False
|
|
from colormath import color_objects, color_conversions
from spectra.grapefruit import Color as GC
convert_color = color_conversions.convert_color
COLOR_SPACES = {
"lab": color_objects.LabColor,
"rgb": color_objects.sRGBColor,
"lch": color_objects.LCHabColor,
"xyz": color_objects.XYZColor,
"hsl": color_objects.HSLColor,
"hsv": color_objects.HSVColor,
"cmy": color_objects.CMYColor,
"cmyk": color_objects.CMYKColor
}
class Color(object):
"""
Represents a color in a given color space.
"""
def __init__(self, space, *values):
"""
:param str space: Name of the color space.
"""
self.values = values
self.space = space
self.color_object = COLOR_SPACES[space](*values)
_rgb = self.color_object if space == "rgb" else self.to("rgb").color_object
self.rgb = _rgb.get_value_tuple()
self.clamped_rgb = (_rgb.clamped_rgb_r, _rgb.clamped_rgb_g, _rgb.clamped_rgb_b)
self.rbg_clamped = self.clamped_rgb
@classmethod
def from_html(cls, html_string):
"""
Create sRGB color from a web-color name or hexcode.
:param str html_string: Web-color name or hexcode.
:rtype: Color
:returns: A spectra.Color in the sRGB color space.
"""
rgb = GC.NewFromHtml(html_string).rgb
return cls("rgb", *rgb)
def to(self, space):
"""
Convert color to a different color space.
:param str space: Name of the color space.
:rtype: Color
:returns: A new spectra.Color in the given color space.
"""
if space == self.space: return self
new_color = convert_color(self.color_object, COLOR_SPACES[space])
return self.__class__(space, *new_color.get_value_tuple())
@property
def hexcode(self):
"""
Get this color's corresponding RGB hex.
:rtype: str
:returns: A six-character string.
"""
return COLOR_SPACES["rgb"](*self.clamped_rgb).get_rgb_hex()
def blend(self, other, ratio=0.5):
"""
Blend this color with another color in the same color space.
By default, blends the colors half-and-half (ratio: 0.5).
:param Color other: The color to blend.
:param float ratio: How much to blend (0 -> 1).
:rtype: Color
:returns: A new spectra.Color
"""
keep = 1.0 - ratio
if not self.space == other.space:
raise Exception("Colors must belong to the same color space.")
values = tuple(((u * keep) + (v * ratio)
for u, v in zip(self.values, other.values)))
return self.__class__(self.space, *values)
def brighten(self, amount=10):
"""
Brighten this color by `amount` luminance.
Converts this color to the LCH color space, and then
increases the `L` parameter by `amount`.
:param float amount: Amount to increase the luminance.
:rtype: Color
:returns: A new spectra.Color
"""
lch = self.to("lch")
l, c, h = lch.values
new_lch = self.__class__("lch", l + amount, c, h)
return new_lch.to(self.space)
def darken(self, amount=10):
"""
Darken this color by `amount` luminance.
Converts this color to the LCH color space, and then
decreases the `L` parameter by `amount`.
:param float amount: Amount to decrease the luminance.
:rtype: Color
:returns: A new spectra.Color
"""
return self.brighten(amount=-amount)
def saturate(self, amount=10):
"""
Saturate this color by `amount` chroma.
Converts this color to the LCH color space, and then
increases the `C` parameter by `amount`.
:param float amount: Amount to increase the chroma.
:rtype: Color
:returns: A new spectra.Color
"""
lch = self.to("lch")
l, c, h = lch.values
new_lch = self.__class__("lch", l, c + amount, h)
return new_lch.to(self.space)
def desaturate(self, amount=10):
"""
Desaturate this color by `amount` chroma.
Converts this color to the LCH color space, and then
decreases the `C` parameter by `amount`.
:param float amount: Amount to decrease the chroma.
:rtype: Color
:returns: A new spectra.Color
"""
return self.saturate(amount=-amount)
class Scale(object):
"""
Represents a color scale.
"""
def __init__(self, colors, domain=None):
"""
:param list colors: List of two or more spectra.Colors, or web-color/hexcode strings.
:param domain: List of two or more numbers.
:type domain: list or None
"""
_colors = [ c if isinstance(c, Color) else Color.from_html(c)
for c in colors ]
self.colors = _colors
# Set domain
n = len(_colors)
self._domain = domain or [ float(x) / (n - 1) for x in range(n) ]
# Check whether domain is correct length.
if len(self._domain) != n:
raise ValueError("len(domain) must equal len(colors)")
def __call__(self, number):
"""
Return the color corresponding to the given `number`.
:param float number: The number to color-ify.
:rtype: Color
:returns: A spectra.Color
"""
if number < self._domain[0] or number > self._domain[-1]:
msg = "Number ({0}) not in domain ({1} -> {2})."
raise ValueError(msg.format(number, self._domain[0], self._domain[-1]))
segments = zip(self._domain[:-1], self._domain[1:])
for i, seg in enumerate(segments):
x0, x1 = seg
if number >= x0 and number <= x1:
num_range = x1 - x0
prop = float(number - x0) / num_range
return self.colors[i].blend(self.colors[i+1], prop)
def domain(self, domain):
"""
Create a new scale with the given domain.
:param list domain: A list of floats.
:rtype: Scale
:returns: A new color.Scale object.
"""
return self.__class__(self.colors, domain)
def get_domain(self):
"""
List this scale's domain.
:rtype: list
:returns: A list of numbers.
"""
return self._domain
def colorspace(self, space):
"""
Create a new scale in the given color space.
:param str space: The new color space.
:rtype: Scale
:returns: A new color.Scale object.
"""
new_colors = [ c.to(space) for c in self.colors ]
return self.__class__(new_colors, self._domain)
def range(self, count):
"""
Create a list of colors evenly spaced along this scale's domain.
:param int count: The number of colors to return.
:rtype: list
:returns: A list of spectra.Color objects.
"""
if count <= 1:
raise ValueError("Range size must be greater than 1.")
dom = self._domain
distance = dom[-1] - dom[0]
props = [ self(dom[0] + distance * float(x)/(count-1))
for x in range(count) ]
return props
|
|
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest, functional
from botocore.exceptions import ClientError
import json, time
class TestSqsAction(BaseTest):
@functional
def test_sqs_delete(self):
session_factory = self.replay_flight_data(
'test_sqs_delete')
client = session_factory().client('sqs')
client.create_queue(QueueName='test-sqs')
queue_url = client.get_queue_url(QueueName='test-sqs')['QueueUrl']
p = self.load_policy({
'name': 'sqs-delete',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertRaises(
ClientError,
client.purge_queue, QueueUrl=queue_url)
@functional
def test_sqs_set_encryption(self):
session_factory = self.replay_flight_data(
'test_sqs_set_encryption')
client_sqs = session_factory().client('sqs')
client_sqs.create_queue(QueueName='sqs-test')
queue_url = client_sqs.get_queue_url(QueueName='sqs-test')['QueueUrl']
self.addCleanup(client_sqs.delete_queue, QueueUrl=queue_url)
client_kms = session_factory().client('kms')
key_id = client_kms.create_key(Description='West SQS encryption key')['KeyMetadata']['KeyId']
client_kms.create_alias(
AliasName='alias/new-key-test-sqs',
TargetKeyId=key_id)
self.addCleanup(client_kms.disable_key, KeyId=key_id)
p = self.load_policy({
'name': 'sqs-delete',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'set-encryption',
'key': 'new-key-test-sqs'}]},
session_factory=session_factory)
resources = p.run()
check_master_key = client_sqs.get_queue_attributes(
QueueUrl=queue_url,
AttributeNames=['All'])['Attributes']['KmsMasterKeyId']
self.assertEqual(check_master_key, 'c4816d44-73c3-4eed-a7cc-d52a74fa3294')
@functional
def test_sqs_remove_matched(self):
session_factory = self.replay_flight_data('test_sqs_remove_matched')
client = session_factory().client('sqs')
name = 'test-sqs-remove-matched-1'
queue_url = client.create_queue(QueueName=name)['QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
client.set_queue_attributes(
QueueUrl=queue_url,
Attributes={'Policy':json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::123456789012:root"
},
"Action": [
"sqs:Subscribe"
]
},
{
"Sid": "Public",
"Effect": "Allow",
"Principal": "*",
"Action": [
"sqs:GetqueueAttributes"
]
}
]
})}
)
p = self.load_policy({
'name': 'sqs-rm-matched',
'resource': 'sqs',
'filters': [
{'QueueUrl': queue_url},
{'type': 'cross-account',
'whitelist': ["123456789012"]}
],
'actions': [
{'type': 'remove-statements',
'statement_ids': 'matched'}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual([r['QueueUrl'] for r in resources], [queue_url])
data = json.loads(client.get_queue_attributes(QueueUrl=resources[0]['QueueUrl'], AttributeNames=['Policy'])['Attributes']['Policy'])
self.assertEqual(
[s['Sid'] for s in data.get('Statement', ())],
['SpecificAllow'])
@functional
def test_sqs_remove_named(self):
session_factory = self.replay_flight_data('test_sqs_remove_named')
client = session_factory().client('sqs')
name = 'test-sqs-remove-named'
queue_url = client.create_queue(QueueName=name)['QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
client.set_queue_attributes(
QueueUrl=queue_url,
Attributes={'Policy':json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::644160558196:root"
},
"Action": ["sqs:Subscribe"]
},
{
"Sid": "RemoveMe",
"Effect": "Allow",
"Principal": "*",
"Action": ["sqs:GetqueueAttributes"]
}
]
})}
)
p = self.load_policy({
'name': 'sqs-rm-named',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'remove-statements',
'statement_ids': ['RemoveMe']}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(client.get_queue_attributes(QueueUrl=resources[0]['QueueUrl'], AttributeNames=['Policy'])['Attributes']['Policy'])
self.assertTrue('RemoveMe' not in [s['Sid'] for s in data.get('Statement', ())])
@functional
def test_sqs_mark_for_op(self):
session_factory = self.replay_flight_data('test_sqs_mark_for_op')
client = session_factory().client('sqs')
name = 'test-sqs'
queue_url = client.create_queue(QueueName=name)['QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
p = self.load_policy({
'name': 'sqs-mark-for-op',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'mark-for-op',
'tag': 'tag-for-op',
'op': 'delete',
'days': 1}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
tags_after_run = client.list_queue_tags(
QueueUrl=queue_url).get('Tags', {})
self.assertTrue("tag-for-op" in tags_after_run)
@functional
def test_sqs_tag(self):
session_factory = self.replay_flight_data('test_sqs_tags')
client = session_factory().client('sqs')
name = 'test-sqs'
queue_url = client.create_queue(QueueName=name)['QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
p = self.load_policy({
'name': 'sqs-mark-for-op',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'tag',
'key': 'tag-this-queue',
'value': 'This queue has been tagged'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
tags_after_run = client.list_queue_tags(
QueueUrl=queue_url).get('Tags', {})
self.assertTrue("tag-this-queue" in tags_after_run)
@functional
def test_sqs_remove_tag(self):
session_factory = self.replay_flight_data('test_sqs_remove_tag')
client = session_factory().client('sqs')
name = 'test-sqs'
queue_url = client.create_queue(QueueName=name)['QueueUrl']
client.tag_queue(
QueueUrl=queue_url,
Tags={
'remove-this-tag': 'tag to be removed'
})
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
p = self.load_policy({
'name': 'sqs-mark-for-op',
'resource': 'sqs',
'filters': [{'QueueUrl': queue_url}],
'actions': [
{'type': 'remove-tag',
'tags': ['remove-this-tag']}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
tags_after_run = client.list_queue_tags(
QueueUrl=queue_url).get('Tags', {})
self.assertTrue("remove-this-tag" not in tags_after_run)
@functional
def test_sqs_marked_for_op(self):
session_factory = self.replay_flight_data('test_sqs_marked_for_op')
client = session_factory().client('sqs')
name = 'test-sqs'
queue_url = client.create_queue(QueueName=name)['QueueUrl']
client.tag_queue(
QueueUrl=queue_url,
Tags={
'tag-for-op': 'Resource does not meet policy: delete@2017/11/01'
})
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
p = self.load_policy({
'name': 'sqs-marked-for-op',
'resource': 'sqs',
'filters': [
{'type': 'marked-for-op', 'tag': 'tag-for-op',
'op': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
|
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# helper.py
"""
Helper and utility functions.
"""
from __future__ import division
import numpy as np
def parse_smi(files):
"""
Return parsed files as list of dicts.
.. note:
parse_smi uses the x and y coordinate of the left eye.
Parameters
----------
files : sequence of str
file names. For every subject one tab separated file.
Returns
-------
subjects : sequence of dicts
every dict has a value for "data", "msg" and "file_name".
* A value in "data" contains of the x, y and time coordinate.
* A value in "msg" contains of the time coordinate and a string.
* "file_name" contains the file name
Raises
------
IOError : wrong format
If the format of the tab separated file does not fit the assumptions
parse_smi raises an IOError.
"""
subjects = list()
for f in files:
with open(f, "r") as data_file:
header_found = False
data = list()
msg = list()
for line in data_file:
# skip all commentary
if line[0] == "#":
continue
parts = line.split("\t")
if parts[0] == "Time":
header_found = True
print(line)
# check header
if (parts[3] != "L POR X [px]" or
parts[4] != "L POR Y [px]"):
raise IOError("Header of %s has wrong format." % f)
continue
if parts[1] == "MSG":
print(line)
msg_point = (float(parts[0]), parts[3])
msg.append(msg_point)
continue
if parts[1] == "SMP":
if not header_found:
raise IOError("No header was found before the first line of data.")
data_point = (float(parts[3]), # X left eye
float(parts[4]), # Y left eye
float(parts[0])) # time
data.append(data_point)
continue
subjects.append({"data": data, "msg": msg, "file_name": f})
return subjects
def parse_coord(files):
"""
Return parsed files as list of np.arrays.
Parameters
----------
files : sequence of str
file names. For every subject one tab separated file.
Returns
-------
Sequence of np.arrays. One array per subject. With (x, y, t)
"""
subjects = list()
for f in files:
raw = np.loadtxt(f, skiprows=2)
clean = raw[raw[:, 3] != 0]
unordered = clean[:, :3] # remove last column
data = unordered[:, (1, 2, 0)] # (t, x, y) -> (x, y, t)
subjects.append(data)
return subjects
def extract_video_data(subjects, video):
"""
extract eye samples for video by collecting all samples between the
message, that the video starts up to the next message.
.. note::
The exported smi text file must include the messages.
Parameters
----------
subjects : sequence
sequence of dicts including the data and the messages for each subject
(the format smi_parse returns)
video : string
name of the video used in the message output
Returns
-------
Sequence of np.arrays. The first n_1 arrays for the first subject (when
n_1, n_2,... are the number of repetitions of each subject) the second n_2
arrays correspond to the second subject etc.
"""
gaze_data = list()
for subject in subjects:
data = np.array(subject["data"])
msgs = np.array(subject["msg"])
starts = list()
stops = list()
for i, msg in enumerate(msgs):
if msg[1].split(" ")[2].strip() == video:
starts.append(float(msg[0]))
try:
stops.append(float(msgs[i+1][0]))
except IndexError:
pass
for i, start in enumerate(starts):
try:
stop = stops[i]
# slice intervall
tmp = data[data[:,2] > start,:]
tmp = tmp[tmp[:,2] < stop,:]
# adjust time
tmp[:,2] -= start
gaze_data.append(tmp)
except IndexError:
# slice intervall
tmp = data[data[:,2] > start,:]
# adjust time
tmp[:,2] -= start
gaze_data.append(tmp)
return gaze_data
def slice_time_window(gaze_data, t=112.5, dt=225):
"""
Returns a sliced np.array.
The slice in gaze_data have the center time of t and
an interval of length dt i. e. dt/2 in both directions.
Parameters
----------
gaze_data : np.array
data to be sliced with columns x, y, t
t : float
center time of the slice
dt : float
width of the slice. The slice will extend dt/2 in both directions of t.
Returns
-------
slice : np.array
slice of gaza_data
"""
slice_ = gaze_data[gaze_data[:,2] >= t - dt/2]
if len(slice_) == 0:
print("WARNING: empty slice")
return slice_
slice_ = slice_[slice_[:,2] <= t + dt/2]
if len(slice_) == 0:
print("WARNING: empty slice")
return slice_
def random_uniform_sample(n, screen_res, t, dt):
norm_sample_x = np.random.uniform(0, screen_res[0], n)
norm_sample_y = np.random.uniform(0, screen_res[1], n)
norm_sample_t = np.random.uniform(t - dt/2, t + dt/2, n)
return np.array((norm_sample_x, norm_sample_y, norm_sample_t)).transpose()
def velocity(gaze_data, dt_max, euclidean=False):
"""
Calculates the velocity of the gazes in gaze_data.
Parameters
----------
gaze_data : np.array
gaze data that are used to generate the velocities
dt_max : float
if the time difference between to measurements in gaza_data is larger
than dt_max the corresponding velocity is dropped
Returns
-------
velocities : np.array
v_r, v_phi, t where v_r and v_phi are the components of the velocity in
polar coordinates and t is mean of the times of the underling locations
(v_x, v_y, v_t if euclidean=True)
"""
sorted_gaze_data = gaze_data[np.argsort(gaze_data[:, 2])] # sort along time
dt = sorted_gaze_data[1:, 2] - sorted_gaze_data[:-1, 2] # might produce zeros
dx = sorted_gaze_data[1:, 0] - sorted_gaze_data[:-1, 0]
dy = sorted_gaze_data[1:, 1] - sorted_gaze_data[:-1, 1]
# keep only velocities where dt is strictly greater than zero and less or
# equal to dt_max
mask = (0 < dt) & (dt <= dt_max)
dt = dt[mask]
v_x = dx[mask] / dt
v_y = dy[mask] / dt
v_t = sorted_gaze_data[:-1, 2][mask] + dt / 2
del mask
if euclidean:
return np.array((v_x, v_y, v_t)).transpose()
# else use polar coordinates
v_r = np.sqrt(v_x ** 2 + v_y ** 2)
v_phi = np.empty_like(v_r)
mask = v_r != 0
notmask = v_r == 0
v_phi[mask] = np.arctan2(v_y[mask], v_x[mask])
v_phi[notmask] = np.NaN
return np.array((v_r, v_phi, v_t)).transpose()
|
|
import sys
import unittest
from datetime import datetime
import mock
from libcloud.common.aws import AWSRequestSignerAlgorithmV4
from libcloud.common.aws import SignedAWSConnection
from libcloud.common.aws import UNSIGNED_PAYLOAD
from libcloud.test import LibcloudTestCase
class EC2MockDriver(object):
region_name = 'my_region'
class AWSRequestSignerAlgorithmV4TestCase(LibcloudTestCase):
def setUp(self):
SignedAWSConnection.driver = EC2MockDriver()
SignedAWSConnection.service_name = 'my_service'
SignedAWSConnection.version = '2013-10-15'
self.connection = SignedAWSConnection('my_key', 'my_secret')
self.signer = AWSRequestSignerAlgorithmV4(access_key='my_key',
access_secret='my_secret',
version='2013-10-15',
connection=self.connection)
SignedAWSConnection.action = '/my_action/'
SignedAWSConnection.driver = EC2MockDriver()
self.now = datetime(2015, 3, 4, hour=17, minute=34, second=52)
def test_v4_signature(self):
params = {
'Action': 'DescribeInstances',
'Version': '2013-10-15'
}
headers = {
'Host': 'ec2.eu-west-1.amazonaws.com',
'Accept-Encoding': 'gzip,deflate',
'X-AMZ-Date': '20150304T173452Z',
'User-Agent': 'libcloud/0.17.0 (Amazon EC2 (eu-central-1)) '
}
dt = self.now
sig = self.signer._get_authorization_v4_header(params=params,
headers=headers,
dt=dt,
method='GET',
path='/my_action/')
self.assertEqual(sig, 'AWS4-HMAC-SHA256 '
'Credential=my_key/20150304/my_region/my_service/aws4_request, '
'SignedHeaders=accept-encoding;host;user-agent;x-amz-date, '
'Signature=f9868f8414b3c3f856c7955019cc1691265541f5162b9b772d26044280d39bd3')
def test_v4_signature_contains_user_id(self):
sig = self.signer._get_authorization_v4_header(params={}, headers={},
dt=self.now)
self.assertIn('Credential=my_key/', sig)
def test_v4_signature_contains_credential_scope(self):
with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_credential_scope') as mock_get_creds:
mock_get_creds.return_value = 'my_credential_scope'
sig = self.signer._get_authorization_v4_header(params={}, headers={}, dt=self.now)
self.assertIn('Credential=my_key/my_credential_scope, ', sig)
def test_v4_signature_contains_signed_headers(self):
with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_signed_headers') as mock_get_headers:
mock_get_headers.return_value = 'my_signed_headers'
sig = self.signer._get_authorization_v4_header({}, {}, self.now,
method='GET',
path='/')
self.assertIn('SignedHeaders=my_signed_headers, ', sig)
def test_v4_signature_contains_signature(self):
with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_signature') as mock_get_signature:
mock_get_signature.return_value = 'my_signature'
sig = self.signer._get_authorization_v4_header({}, {}, self.now)
self.assertIn('Signature=my_signature', sig)
def test_get_signature_(self):
def _sign(key, msg, hex=False):
if hex:
return 'H|%s|%s' % (key, msg)
else:
return '%s|%s' % (key, msg)
with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_key_to_sign_with') as mock_get_key:
with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_string_to_sign') as mock_get_string:
with mock.patch('libcloud.common.aws._sign', new=_sign):
mock_get_key.return_value = 'my_signing_key'
mock_get_string.return_value = 'my_string_to_sign'
sig = self.signer._get_signature({}, {}, self.now,
method='GET', path='/', data=None)
self.assertEqual(sig, 'H|my_signing_key|my_string_to_sign')
def test_get_string_to_sign(self):
with mock.patch('hashlib.sha256') as mock_sha256:
mock_sha256.return_value.hexdigest.return_value = 'chksum_of_canonical_request'
to_sign = self.signer._get_string_to_sign({}, {}, self.now,
method='GET', path='/', data=None)
self.assertEqual(to_sign,
'AWS4-HMAC-SHA256\n'
'20150304T173452Z\n'
'20150304/my_region/my_service/aws4_request\n'
'chksum_of_canonical_request')
def test_get_key_to_sign_with(self):
def _sign(key, msg, hex=False):
return '%s|%s' % (key, msg)
with mock.patch('libcloud.common.aws._sign', new=_sign):
key = self.signer._get_key_to_sign_with(self.now)
self.assertEqual(key, 'AWS4my_secret|20150304|my_region|my_service|aws4_request')
def test_get_signed_headers_contains_all_headers_lowercased(self):
headers = {'Content-Type': 'text/plain', 'Host': 'my_host', 'X-Special-Header': ''}
signed_headers = self.signer._get_signed_headers(headers)
self.assertIn('content-type', signed_headers)
self.assertIn('host', signed_headers)
self.assertIn('x-special-header', signed_headers)
def test_get_signed_headers_concats_headers_sorted_lexically(self):
headers = {'Host': 'my_host', 'X-Special-Header': '', '1St-Header': '2', 'Content-Type': 'text/plain'}
signed_headers = self.signer._get_signed_headers(headers)
self.assertEqual(signed_headers, '1st-header;content-type;host;x-special-header')
def test_get_credential_scope(self):
scope = self.signer._get_credential_scope(self.now)
self.assertEqual(scope, '20150304/my_region/my_service/aws4_request')
def test_get_canonical_headers_joins_all_headers(self):
headers = {
'accept-encoding': 'gzip,deflate',
'host': 'my_host',
}
self.assertEqual(self.signer._get_canonical_headers(headers),
'accept-encoding:gzip,deflate\n'
'host:my_host\n')
def test_get_canonical_headers_sorts_headers_lexically(self):
headers = {
'accept-encoding': 'gzip,deflate',
'host': 'my_host',
'1st-header': '2',
'x-amz-date': '20150304T173452Z',
'user-agent': 'my-ua'
}
self.assertEqual(self.signer._get_canonical_headers(headers),
'1st-header:2\n'
'accept-encoding:gzip,deflate\n'
'host:my_host\n'
'user-agent:my-ua\n'
'x-amz-date:20150304T173452Z\n')
def test_get_canonical_headers_lowercases_headers_names(self):
headers = {
'Accept-Encoding': 'GZIP,DEFLATE',
'User-Agent': 'My-UA'
}
self.assertEqual(self.signer._get_canonical_headers(headers),
'accept-encoding:GZIP,DEFLATE\n'
'user-agent:My-UA\n')
def test_get_canonical_headers_trims_header_values(self):
# TODO: according to AWS spec (and RFC 2616 Section 4.2.) excess whitespace
# from inside non-quoted strings should be stripped. Now we only strip the
# start and end of the string. See
# http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
headers = {
'accept-encoding': ' gzip,deflate',
'user-agent': 'libcloud/0.17.0 '
}
self.assertEqual(self.signer._get_canonical_headers(headers),
'accept-encoding:gzip,deflate\n'
'user-agent:libcloud/0.17.0\n')
def test_get_request_params_joins_params_sorted_lexically(self):
self.assertEqual(self.signer._get_request_params({
'Action': 'DescribeInstances',
'Filter.1.Name': 'state',
'Version': '2013-10-15'
}),
'Action=DescribeInstances&Filter.1.Name=state&Version=2013-10-15')
def test_get_canonical_headers_allow_numeric_header_value(self):
headers = {
'Accept-Encoding': 'gzip,deflate',
'Content-Length': 314
}
self.assertEqual(self.signer._get_canonical_headers(headers),
'accept-encoding:gzip,deflate\n'
'content-length:314\n')
def test_get_request_params_allows_integers_as_value(self):
self.assertEqual(self.signer._get_request_params({'Action': 'DescribeInstances', 'Port': 22}),
'Action=DescribeInstances&Port=22')
def test_get_request_params_urlquotes_params_keys(self):
self.assertEqual(self.signer._get_request_params({'Action+Reaction': 'DescribeInstances'}),
'Action%2BReaction=DescribeInstances')
def test_get_request_params_urlquotes_params_values(self):
self.assertEqual(self.signer._get_request_params({
'Action': 'DescribeInstances&Addresses',
'Port-Range': '2000 3000'
}),
'Action=DescribeInstances%26Addresses&Port-Range=2000%203000')
def test_get_request_params_urlquotes_params_values_allows_safe_chars_in_value(self):
# http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
self.assertEqual('Action=a~b.c_d-e',
self.signer._get_request_params({'Action': 'a~b.c_d-e'}))
def test_get_payload_hash_returns_digest_of_empty_string_for_GET_requests(self):
SignedAWSConnection.method = 'GET'
self.assertEqual(self.signer._get_payload_hash(method='GET'),
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
def test_get_payload_hash_with_data_for_PUT_requests(self):
SignedAWSConnection.method = 'PUT'
self.assertEqual(self.signer._get_payload_hash(method='PUT', data='DUMMY'),
'ceec12762e66397b56dad64fd270bb3d694c78fb9cd665354383c0626dbab013')
def test_get_payload_hash_with_empty_data_for_POST_requests(self):
SignedAWSConnection.method = 'POST'
self.assertEqual(self.signer._get_payload_hash(method='POST'),
UNSIGNED_PAYLOAD)
def test_get_canonical_request(self):
req = self.signer._get_canonical_request(
{'Action': 'DescribeInstances', 'Version': '2013-10-15'},
{'Accept-Encoding': 'gzip,deflate', 'User-Agent': 'My-UA'},
method='GET',
path='/my_action/',
data=None
)
self.assertEqual(req, 'GET\n'
'/my_action/\n'
'Action=DescribeInstances&Version=2013-10-15\n'
'accept-encoding:gzip,deflate\n'
'user-agent:My-UA\n'
'\n'
'accept-encoding;user-agent\n'
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
def test_post_canonical_request(self):
req = self.signer._get_canonical_request(
{'Action': 'DescribeInstances', 'Version': '2013-10-15'},
{'Accept-Encoding': 'gzip,deflate', 'User-Agent': 'My-UA'},
method='POST',
path='/my_action/',
data='{}'
)
self.assertEqual(req, 'POST\n'
'/my_action/\n'
'Action=DescribeInstances&Version=2013-10-15\n'
'accept-encoding:gzip,deflate\n'
'user-agent:My-UA\n'
'\n'
'accept-encoding;user-agent\n'
'44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a')
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CustomField.empty_selection_list'
db.add_column('helpdesk_customfield', 'empty_selection_list', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'CustomField.empty_selection_list'
db.delete_column('helpdesk_customfield', 'empty_selection_list')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helpdesk.attachment': {
'Meta': {'ordering': "['filename']", 'object_name': 'Attachment'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'followup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.FollowUp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'helpdesk.customfield': {
'Meta': {'object_name': 'CustomField'},
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'decimal_places': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'empty_selection_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'help_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': "'30'"}),
'list_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'max_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'staff_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'helpdesk.emailtemplate': {
'Meta': {'ordering': "['template_name', 'locale']", 'object_name': 'EmailTemplate'},
'heading': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'plain_text': ('django.db.models.fields.TextField', [], {}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helpdesk.escalationexclusion': {
'Meta': {'object_name': 'EscalationExclusion'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'queues': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['helpdesk.Queue']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.followup': {
'Meta': {'ordering': "['date']", 'object_name': 'FollowUp'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 2, 14, 54, 29, 596233)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Ticket']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.ignoreemail': {
'Meta': {'object_name': 'IgnoreEmail'},
'date': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'email_address': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keep_in_mailbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'queues': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['helpdesk.Queue']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.kbcategory': {
'Meta': {'ordering': "['title']", 'object_name': 'KBCategory'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helpdesk.kbitem': {
'Meta': {'ordering': "['title']", 'object_name': 'KBItem'},
'answer': ('django.db.models.fields.TextField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.KBCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {}),
'recommendations': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'helpdesk.presetreply': {
'Meta': {'ordering': "['name']", 'object_name': 'PreSetReply'},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'queues': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['helpdesk.Queue']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.queue': {
'Meta': {'ordering': "('title',)", 'object_name': 'Queue'},
'allow_email_submission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_public_submission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'email_box_host': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'email_box_imap_folder': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'email_box_interval': ('django.db.models.fields.IntegerField', [], {'default': "'5'", 'null': 'True', 'blank': 'True'}),
'email_box_last_check': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email_box_pass': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'email_box_port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'email_box_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_box_type': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'email_box_user': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'escalate_days': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'new_ticket_cc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_ticket_cc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'helpdesk.savedsearch': {
'Meta': {'object_name': 'SavedSearch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'query': ('django.db.models.fields.TextField', [], {}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'helpdesk.ticket': {
'Meta': {'object_name': 'Ticket'},
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_to'", 'null': 'True', 'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_escalation': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'on_hold': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '3', 'blank': '3'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Queue']"}),
'resolution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'submitter_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'helpdesk.ticketcc': {
'Meta': {'object_name': 'TicketCC'},
'can_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Ticket']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.ticketchange': {
'Meta': {'object_name': 'TicketChange'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'followup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.FollowUp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'old_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'helpdesk.ticketcustomfieldvalue': {
'Meta': {'unique_together': "(('ticket', 'field'),)", 'object_name': 'TicketCustomFieldValue'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.CustomField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Ticket']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'helpdesk.ticketdependency': {
'Meta': {'unique_together': "(('ticket', 'depends_on'),)", 'object_name': 'TicketDependency'},
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'depends_on'", 'to': "orm['helpdesk.Ticket']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ticketdependency'", 'to': "orm['helpdesk.Ticket']"})
},
'helpdesk.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'settings_pickled': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['helpdesk']
|
|
import os.path
from theano import Apply, config, Op
from theano.compile import optdb
from theano.gof import LocalOptGroup
from theano.tensor.basic import as_tensor_variable
from theano.tensor.opt import in2out
from .basic_ops import as_gpuarray_variable, infer_context_name
from .opt_util import inplace_allocempty
try:
import pygpu
from pygpu import blas
except ImportError as e:
# To make sure theano is importable
pass
class BlasOp(Op):
def c_headers(self):
return ['<blas_api.h>', '<numpy_compat.h>', '<gpuarray_helper.h>']
def c_header_dirs(self):
return [pygpu.get_include(), os.path.dirname(__file__)]
def c_init_code(self):
return ['import_pygpu__blas();']
class GpuGemv(BlasOp):
"""
Gemv on the GPU.
"""
__props__ = ('inplace',)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
def make_node(self, y, alpha, A, x, beta):
ctx_name = infer_context_name(y, A, x)
A = as_gpuarray_variable(A, ctx_name)
x = as_gpuarray_variable(x, ctx_name)
y = as_gpuarray_variable(y, ctx_name)
alpha = as_tensor_variable(alpha)
beta = as_tensor_variable(beta)
assert alpha.ndim == 0
assert beta.ndim == 0
assert A.ndim == 2
assert x.ndim == 1
assert y.ndim == 1
assert A.dtype == x.dtype == y.dtype
return Apply(self, [y, alpha, A, x, beta], [y.type()])
def perform(self, node, inputs, out_storage):
y, alpha, A, x, beta = inputs
inplace = self.inplace
if inplace and y.strides[0] < 0:
inplace = False
out_storage[0][0] = blas.gemv(alpha, A, x, beta, y,
overwrite_y=inplace)
def c_code(self, node, name, inp, out, sub):
vars = dict(out=out[0], y=inp[0], alpha=inp[1], A=inp[2], x=inp[3],
beta=inp[4], fail=sub['fail'], name=name)
if self.inplace:
code = """
if (%(y)s->ga.strides[0] <= 0) {
%(out)s = theano_try_copy(%(out)s, %(y)s);
if (%(out)s == NULL) {
%(fail)s
}
} else {
Py_XDECREF(%(out)s);
%(out)s = %(y)s;
Py_INCREF(%(out)s);
}
""" % vars
else:
code = """
%(out)s = theano_try_copy(%(out)s, %(y)s);
if (%(out)s == NULL) {
%(fail)s
}
""" % vars
code += """
if (pygpu_blas_rgemv(cb_no_trans,
((dtype_%(alpha)s *)PyArray_DATA(%(alpha)s))[0],
%(A)s, %(x)s,
((dtype_%(beta)s *)PyArray_DATA(%(beta)s))[0],
%(out)s, 0) == -1) {
%(fail)s
}
""" % vars
if config.gpuarray.sync:
code += """
GpuArray_sync(&%(out)s->ga);
""" % vars
return code
def c_code_cache_version(self):
return (4,)
gpugemv_no_inplace = GpuGemv(inplace=False)
gpugemv_inplace = GpuGemv(inplace=True)
class GpuGemm(BlasOp):
"""
Gemm on the GPU.
"""
__props__ = ('inplace',)
_f16_ok = True
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
def make_node(self, C, alpha, A, B, beta):
ctx_name = infer_context_name(C, A, B)
A = as_gpuarray_variable(A, ctx_name)
B = as_gpuarray_variable(B, ctx_name)
C = as_gpuarray_variable(C, ctx_name)
alpha = as_tensor_variable(alpha)
beta = as_tensor_variable(beta)
assert alpha.ndim == 0
assert beta.ndim == 0
assert A.ndim == 2
assert B.ndim == 2
assert C.ndim == 2
assert A.dtype == B.dtype == C.dtype
return Apply(self, [C, alpha, A, B, beta], [C.type()])
def perform(self, node, inputs, outputs):
C, alpha, A, B, beta = inputs
inplace = self.inplace
if inplace and not C.flags.forc:
inplace = False
outputs[0][0] = blas.gemm(alpha, A, B, beta, C,
overwrite_c=inplace)
def c_code(self, node, name, inp, out, sub):
vars = dict(out=out[0], C=inp[0], alpha=inp[1], A=inp[2], B=inp[3],
beta=inp[4], fail=sub['fail'], name=name)
if self.inplace:
code = """
if (!GpuArray_ISONESEGMENT(&%(C)s->ga)) {
%(out)s = theano_try_copy(%(out)s, %(C)s);
if (%(out)s == NULL) {
%(fail)s
}
} else {
Py_XDECREF(%(out)s);
%(out)s = %(C)s;
Py_INCREF(%(out)s);
}
""" % vars
else:
code = """
%(out)s = theano_try_copy(%(out)s, %(C)s);
if (%(out)s == NULL) {
%(fail)s
}
""" % vars
code += """
if (pygpu_blas_rgemm(cb_no_trans, cb_no_trans,
((dtype_%(alpha)s *)PyArray_DATA(%(alpha)s))[0],
%(A)s, %(B)s,
((dtype_%(beta)s *)PyArray_DATA(%(beta)s))[0],
%(out)s, 0) == -1) {
%(fail)s
}
""" % vars
if config.gpuarray.sync:
code += """
GpuArray_sync(&%(out)s->ga);
""" % vars
return code
def c_code_cache_version(self):
return (5,)
gpugemm_no_inplace = GpuGemm(inplace=False)
gpugemm_inplace = GpuGemm(inplace=True)
class GpuGer(BlasOp):
"""
Ger on the GPU.
"""
__props__ = ('inplace',)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
def make_node(self, A, alpha, x, y):
ctx_name = infer_context_name(A, x, y)
A = as_gpuarray_variable(A, ctx_name)
x = as_gpuarray_variable(x, ctx_name)
y = as_gpuarray_variable(y, ctx_name)
alpha = as_tensor_variable(alpha)
assert alpha.ndim == 0
assert A.ndim == 2
assert x.ndim == 1
assert y.ndim == 1
assert A.dtype == x.dtype == y.dtype
return Apply(self, [A, alpha, x, y], [A.type()])
def perform(self, node, inp, out):
A, alpha, x, y = inp
inplace = self.inplace
if inplace and not A.flags.forc:
inplace = False
out[0][0] = blas.ger(alpha, x, y, A,
overwrite_a=inplace)
def c_code(self, node, name, inp, out, sub):
vars = dict(out=out[0], A=inp[0], alpha=inp[1], x=inp[2], y=inp[3],
fail=sub['fail'], name=name)
if self.inplace:
code = """
if (!GpuArray_ISONESEGMENT(&%(A)s->ga)) {
%(out)s = theano_try_copy(%(out)s, %(A)s);
if (%(out)s == NULL) {
%(fail)s
}
} else {
Py_XDECREF(%(out)s);
%(out)s = %(A)s;
Py_INCREF(%(out)s);
}
""" % vars
else:
code = """
%(out)s = theano_try_copy(%(out)s, %(A)s);
if (%(out)s == NULL) {
%(fail)s
}
""" % vars
code += """
if (pygpu_blas_rger(((dtype_%(alpha)s *)PyArray_DATA(%(alpha)s))[0],
%(x)s, %(y)s, %(out)s, 0) == -1) {
%(fail)s
}
""" % vars
if config.gpuarray.sync:
code += """
GpuArray_sync(&%(out)s->ga);
""" % vars
return code
def c_code_cache_version(self):
return (3,)
gpuger_no_inplace = GpuGer(inplace=False)
gpuger_inplace = GpuGer(inplace=True)
class GpuDot22(BlasOp):
"""
Dot22 on the GPU.
"""
__props__ = ()
def make_node(self, x, y):
ctx_name = infer_context_name(x, y)
x = as_gpuarray_variable(x, ctx_name)
y = as_gpuarray_variable(y, ctx_name)
assert x.ndim == 2
assert y.ndim == 2
assert x.dtype == y.dtype
otype = x.type.clone(
broadcastable=(x.type.broadcastable[0], y.type.broadcastable[1]))
return Apply(self, [x, y], [otype()])
def perform(self, node, inputs, outputs):
x, y = inputs
out = pygpu.empty((x.shape[0], y.shape[1]), dtype=x.dtype,
context=x.context)
outputs[0][0] = blas.gemm(1., x, y, 0., out,
overwrite_c=True)
def c_code(self, node, name, inputs, outputs, sub):
dtype = node.inputs[0].dtype
typecode = pygpu.gpuarray.dtype_to_typecode(dtype)
vars = dict(A=inputs[0], B=inputs[1], dtype=dtype, out=outputs[0],
typecode=typecode,
fail=sub['fail'], name=name)
code = """
double one = 1.;
double zero = 0.;
size_t dims[] = {0, 0};
dims[0] = PyGpuArray_DIMS(%(A)s)[0];
dims[1] = PyGpuArray_DIMS(%(B)s)[1];
if (theano_prep_output(&%(out)s, 2, dims, %(typecode)s, GA_C_ORDER,
%(A)s->context)) {
%(fail)s
}
if (pygpu_blas_rgemm(cb_no_trans, cb_no_trans,
one,
%(A)s, %(B)s,
zero,
%(out)s, 0) == -1) {
%(fail)s
}
""" % vars
if config.gpuarray.sync:
code += """
GpuArray_sync(&%(out)s->ga);
""" % vars
return code
def c_code_cache_version(self):
return (4,)
gpu_dot22 = GpuDot22()
@inplace_allocempty(GpuGemv, 0)
def local_inplace_gpuagemv(node, inputs):
return [gpugemv_inplace(*inputs)]
@inplace_allocempty(GpuGemm, 0)
def local_inplace_gpuagemm(node, inputs):
return [gpugemm_inplace(*inputs)]
@inplace_allocempty(GpuGer, 0)
def local_inplace_gpuager(node, inputs):
return [gpuger_inplace(*inputs)]
gpuablas_opt_inplace = in2out(LocalOptGroup(local_inplace_gpuagemv,
local_inplace_gpuagemm,
local_inplace_gpuager),
name='gpuablas_opt_inplace')
optdb.register('InplaceGpuaBlasOpt',
gpuablas_opt_inplace,
70.0, 'fast_run', 'inplace', 'gpuarray')
|
|
import logging
import sys
import os
import socket
import re
import xmlrpclib
import httplib
from time import sleep
from urlparse import urlparse
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.config_schema import one_or_more
from flexget.utils.bittorrent import Torrent, is_torrent_file
log = logging.getLogger('rtorrent')
class TimeoutHTTPConnection(httplib.HTTPConnection):
def __init__(self, host, timeout=30):
httplib.HTTPConnection.__init__(self, host, timeout=timeout)
class HTTPTransport(xmlrpclib.Transport):
def __init__(self, timeout=30, *args, **kwargs):
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.timeout = timeout
def make_connection(self, host):
# return an existing connection if possible. This allows HTTP/1.1 keep-alive.
if self._connection and host == self._connection[0]:
return self._connection[1]
# create a HTTP connection object from a host descriptor
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, TimeoutHTTPConnection(chost, timeout=self.timeout)
return self._connection[1]
class HTTPServerProxy(xmlrpclib.ServerProxy):
"""
Supports http with timeout
"""
def __init__(self, uri, timeout=30, *args, **kwargs):
kwargs['transport'] = HTTPTransport(timeout=timeout, use_datetime=kwargs.get('use_datetime', 0))
xmlrpclib.ServerProxy.__init__(self, uri, *args, **kwargs)
class SCGITransport(xmlrpclib.Transport):
""" Used to override the default xmlrpclib transport to support SCGI """
def __init__(self, timeout=30, *args, **kwargs):
self.verbose = 0
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.timeout = timeout
def request(self, host, handler, request_body, verbose=False):
return self.single_request(host, handler, request_body, verbose)
def single_request(self, host, handler, request_body, verbose=0):
# Add SCGI headers to the request.
headers = [('CONTENT_LENGTH', str(len(request_body))), ('SCGI', '1')]
header = '\x00'.join(['%s\x00%s' % (key, value) for key, value in headers]) + '\x00'
header = '%d:%s' % (len(header), header)
request_body = '%s,%s' % (header, request_body)
sock = None
try:
if host:
parsed_host = urlparse(host)
host = parsed_host.hostname
port = parsed_host.port
addr_info = socket.getaddrinfo(host, int(port), socket.AF_INET, socket.SOCK_STREAM)
sock = socket.socket(*addr_info[0][:3])
sock.settimeout(self.timeout)
sock.connect(addr_info[0][4])
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(handler)
self.verbose = verbose
if sys.version_info[0] > 2:
sock.send(bytes(request_body, 'utf-8'))
else:
sock.send(request_body)
return self.parse_response(sock.makefile())
finally:
if sock:
sock.close()
def parse_response(self, response):
p, u = self.getparser()
response_body = ''
while True:
data = response.read(1024)
if not data:
break
response_body += data
if self.verbose:
log.info('body:', repr(response_body))
# Remove SCGI headers from the response.
response_header, response_body = re.split(r'\n\s*?\n', response_body, maxsplit=1)
p.feed(response_body)
p.close()
return u.close()
class SCGIServerProxy(xmlrpclib.ServerProxy):
""" Enable connection to SCGI proxy """
def __init__(self, uri, transport=None, encoding=None,
verbose=False, allow_none=False, use_datetime=False, timeout=30):
parsed_uri = urlparse(uri)
self.timeout = timeout
self.uri = uri
self.__host = uri
self.__handler = parsed_uri.path
if not self.__handler:
self.__handler = '/'
if not transport:
transport = SCGITransport(use_datetime=use_datetime, timeout=self.timeout)
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
def __close(self):
self.__transport.close()
def __request(self, method_name, params):
# call a method on the remote server
request = xmlrpclib.dumps(params, method_name, encoding=self.__encoding, allow_none=self.__allow_none)
response = self.__transport.request(self.uri, self.__handler, request, verbose=self.__verbose)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return '<SCGIServerProxy for %s%s>' % (self.__host, self.__handler)
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
def __call__(self, attr):
"""
A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__
"""
if attr == 'close':
return self.__close
elif attr == 'transport':
return self.__transport
raise AttributeError('Attribute %r not found' % (attr,))
class RTorrent(object):
""" rTorrent API client """
default_fields = [
'hash',
'name',
'up_total', 'down_total', 'down_rate',
'is_open', 'is_active',
'custom1', 'custom2', 'custom3', 'custom4', 'custom5',
'state', 'complete',
'bytes_done', 'down.rate', 'left_bytes',
'ratio',
'base_path',
]
required_fields = [
'hash',
'name',
'base_path'
]
def __init__(self, uri, username=None, password=None, timeout=30):
"""
New connection to rTorrent
:param uri: RTorrent URL. Supports both http(s) and scgi
:param username: Username for basic auth over http(s)
:param password: Password for basic auth over http(s)
"""
self.uri = uri
self.username = username
self.password = password
self._version = None
parsed_uri = urlparse(uri)
# Reformat uri with username and password for HTTP(s) Auth
if self.username and self.password:
if parsed_uri.scheme not in ['http', 'https']:
raise IOError('Username and password only supported on http(s)')
data = {
'scheme': parsed_uri.scheme,
'hostname': parsed_uri.hostname,
'port': parsed_uri.port,
'path': parsed_uri.path,
'query': parsed_uri.query,
'username': self.username,
'password': self.password,
}
self.uri = '%(scheme)s://%(username)s:%(password)s@%(hostname)s%(path)s%(query)s' % data
# Determine the proxy server
if parsed_uri.scheme in ['http', 'https']:
sp = HTTPServerProxy
elif parsed_uri.scheme == 'scgi':
sp = SCGIServerProxy
else:
raise IOError('Unsupported scheme %s for uri %s' % (parsed_uri.scheme, self.uri))
self._server = sp(self.uri, timeout=timeout)
def _clean_fields(self, fields, reverse=False):
if not fields:
fields = self.default_fields
if reverse:
for field in ['up.total', 'down.total', 'down.rate']:
if field in fields:
fields[fields.index(field)] = field.replace('.', '_')
return fields
for required_field in self.required_fields:
if required_field not in fields:
fields.insert(0, required_field)
for field in ['up_total', 'down_total', 'down_rate']:
if field in fields:
fields[fields.index(field)] = field.replace('_', '.')
return fields
@property
def version(self):
return [int(v) for v in self._server.system.client_version().split('.')]
def load(self, raw_torrent, fields={}, start=False, mkdir=True):
# First param is empty 'target'
params = ['', xmlrpclib.Binary(raw_torrent)]
# Additional fields to set
for key, val in fields.iteritems():
# Values must be escaped if within params
params.append('d.%s.set=%s' % (key, re.escape(str(val))))
if mkdir and 'directory' in fields:
result = self._server.execute.throw('', 'mkdir', '-p', fields['directory'])
if result != 0:
raise xmlrpclib.Error('Failed creating directory %s' % fields['directory'])
# by default rtorrent won't allow calls over 512kb in size.
xmlrpc_size = len(xmlrpclib.dumps(tuple(params), 'raw_start')) + 71680 # Add 70kb for buffer
if xmlrpc_size > 524288:
prev_size = self._server.network.xmlrpc.size_limit()
self._server.network.xmlrpc.size_limit.set('', xmlrpc_size)
# Call load method and return the response
if start:
result = self._server.load.raw_start(*params)
else:
result = self._server.load.raw(*params)
if xmlrpc_size > 524288:
self._server.network.xmlrpc.size_limit.set('', prev_size)
return result
def torrent(self, info_hash, fields=default_fields):
""" Get the details of a torrent """
fields = self._clean_fields(fields)
multi_call = xmlrpclib.MultiCall(self._server)
for field in fields:
method_name = 'd.%s' % field
getattr(multi_call, method_name)(info_hash)
resp = multi_call()
# TODO: Maybe we should return a named tuple or a Torrent class?
return dict(zip(self._clean_fields(fields, reverse=True), [val for val in resp]))
def torrents(self, view='main', fields=default_fields):
fields = self._clean_fields(fields)
params = ['d.%s=' % field for field in fields]
params.insert(0, view)
resp = self._server.d.multicall(params)
# Response is formatted as a list of lists, with just the values
return [dict(zip(self._clean_fields(fields, reverse=True), val)) for val in resp]
def update(self, info_hash, fields):
multi_call = xmlrpclib.MultiCall(self._server)
for key, val in fields.iteritems():
method_name = 'd.%s.set' % key
getattr(multi_call, method_name)(info_hash, str(val))
return multi_call()[0]
def delete(self, info_hash):
return self._server.d.erase(info_hash)
def stop(self, info_hash):
self._server.d.stop(info_hash)
return self._server.d.close(info_hash)
def start(self, info_hash):
return self._server.d.start(info_hash)
def move(self, info_hash, dst_path):
self.stop(info_hash)
torrent = self.torrent(info_hash, fields=['base_path'])
try:
log.verbose('Creating destination directory `%s`' % dst_path)
self._server.execute.throw('', 'mkdir', '-p', dst_path)
except xmlrpclib.Error:
raise xmlrpclib.Error("unable to create folder %s" % dst_path)
self._server.execute.throw('', 'mv', '-u', torrent['base_path'], dst_path)
self._server.d.set_directory(info_hash, dst_path)
self.start(info_hash)
class RTorrentPluginBase(object):
priority_map = {
'high': 3,
'medium': 2,
'low': 1,
'off': 0,
}
def _build_options(self, config, entry, entry_first=True):
options = {}
for opt_key in ('path', 'message', 'priority',
'custom1', 'custom2', 'custom3', 'custom4', 'custom5'):
# Values do not merge config with task
# Task takes priority then config is used
entry_value = entry.get(opt_key)
config_value = config.get(opt_key)
if entry_first:
if entry_value:
options[opt_key] = entry.render(entry_value)
elif config_value:
options[opt_key] = entry.render(config_value)
else:
if config_value:
options[opt_key] = entry.render(config_value)
elif entry_value:
options[opt_key] = entry.render(entry_value)
# Convert priority from string to int
priority = options.get('priority')
if priority and priority in self.priority_map:
options['priority'] = self.priority_map[priority]
# Map Flexget path to directory in rTorrent
if options.get('path'):
options['directory'] = options['path']
del options['path']
if 'directory' in options:
options['directory'] = pathscrub(options['directory'])
return options
def on_task_start(self, task, config):
try:
client = RTorrent(config['uri'], username=config.get('username'),
password=config.get('password'), timeout=config.get('timeout'))
if client.version < [0, 9, 4]:
log.error('rtorrent version >=0.9.4 required, found {0}'.format('.'.join(map(str, client.version))))
task.abort('rtorrent version >=0.9.4 required, found {0}'.format('.'.join(map(str, client.version))))
except (IOError, xmlrpclib.Error) as e:
raise plugin.PluginError("Couldn't connect to rTorrent: %s" % str(e))
class RTorrentOutputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
# connection info
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'start': {'type': 'boolean', 'default': True},
'mkdir': {'type': 'boolean', 'default': True},
'action': {'type': 'string', 'emun': ['update', 'delete', 'add'], 'default': 'add'},
'timeout': {'type': 'integer', 'default': 30},
# properties to set on rtorrent download object
'message': {'type': 'string'},
'priority': {'type': 'string'},
'path': {'type': 'string'},
'custom1': {'type': 'string'},
'custom2': {'type': 'string'},
'custom3': {'type': 'string'},
'custom4': {'type': 'string'},
'custom5': {'type': 'string'},
},
'required': ['uri'],
'additionalProperties': False,
}
def _verify_load(self, client, info_hash):
for i in range(0, 5):
try:
return client.torrent(info_hash, fields=['hash'])
except (IOError, xmlrpclib.Error):
sleep(0.5)
raise
def on_task_download(self, task, config):
# If the download plugin is not enabled, we need to call it to get
# our temp .torrent files
if config['action'] == 'add' and 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.get_temp_files(task, handle_magnets=True, fail_html=True)
def on_task_output(self, task, config):
client = RTorrent(config['uri'], username=config.get('username'),
password=config.get('password'), timeout=config.get('timeout'))
for entry in task.accepted:
if task.options.test:
log.info('Would add %s to rTorrent' % entry['url'])
continue
if config['action'] == 'add':
try:
options = self._build_options(config, entry)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
continue
self.add_entry(client, entry, options, start=config['start'], mkdir=config['mkdir'])
info_hash = entry.get('torrent_info_hash')
if not info_hash:
entry.fail('Failed to %s as no info_hash found' % config['action'])
continue
if config['action'] == 'delete':
self.delete_entry(client, entry)
if config['action'] == 'update':
self.update_entry(client, entry, config)
def delete_entry(self, client, entry):
try:
client.delete(entry['torrent_info_hash'])
log.verbose('Deleted %s (%s) in rtorrent ' % (entry['title'], entry['torrent_info_hash']))
except (IOError, xmlrpclib.Error) as e:
entry.fail('Failed to delete: %s' % str(e))
return
def update_entry(self, client, entry, config):
info_hash = entry['torrent_info_hash']
# First check if it already exists
try:
existing = client.torrent(info_hash, fields=['base_path'])
except IOError as e:
entry.fail("Error updating torrent %s" % str(e))
return
except xmlrpclib.Error as e:
existing = False
# Build options but make config values override entry values
try:
options = self._build_options(config, entry, entry_first=False)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
return
if existing and 'directory' in options:
# Check if changing to another directory which requires a move
if options['directory'] != existing['base_path']\
and options['directory'] != os.path.dirname(existing['base_path']):
try:
log.verbose("Path is changing, moving files from '%s' to '%s'"
% (existing['base_path'], options['directory']))
client.move(info_hash, options['directory'])
except (IOError, xmlrpclib.Error) as e:
entry.fail('Failed moving torrent: %s' % str(e))
return
# Remove directory from update otherwise rTorrent will append the title to the directory path
if 'directory' in options:
del options['directory']
try:
client.update(info_hash, options)
log.verbose('Updated %s (%s) in rtorrent ' % (entry['title'], info_hash))
except (IOError, xmlrpclib.Error) as e:
entry.fail('Failed to update: %s' % str(e))
return
def add_entry(self, client, entry, options, start=True, mkdir=False):
if 'torrent_info_hash' not in entry:
entry.fail('missing torrent_info_hash')
return
if entry['url'].startswith('magnet:'):
torrent_raw = 'd10:magnet-uri%d:%se' % (len(entry['url']), entry['url'])
else:
# Check that file is downloaded
if 'file' not in entry:
entry.fail('file missing?')
return
# Verify the temp file exists
if not os.path.exists(entry['file']):
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
return
# Verify valid torrent file
if not is_torrent_file(entry['file']):
entry.fail("Downloaded temp file '%s' is not a torrent file" % entry['file'])
return
try:
with open(entry['file'], 'rb') as f:
torrent_raw = f.read()
except IOError as e:
entry.fail('Failed to add to rTorrent %s' % str(e))
return
try:
Torrent(torrent_raw)
except SyntaxError as e:
entry.fail('Strange, unable to decode torrent, raise a BUG: %s' % str(e))
return
# First check if it already exists
try:
if client.torrent(entry['torrent_info_hash']):
log.warning("Torrent %s already exists, won't add" % entry['title'])
return
except IOError as e:
entry.fail("Error checking if torrent already exists %s" % str(e))
except xmlrpclib.Error:
# No existing found
pass
try:
resp = client.load(torrent_raw, fields=options, start=start, mkdir=mkdir)
if resp != 0:
entry.fail('Failed to add to rTorrent invalid return value %s' % resp)
except (IOError, xmlrpclib.Error) as e:
log.exception(e)
entry.fail('Failed to add to rTorrent %s' % str(e))
return
# Verify the torrent loaded
try:
self._verify_load(client, entry['torrent_info_hash'])
log.info('%s added to rtorrent' % entry['title'])
except (IOError, xmlrpclib.Error) as e:
entry.fail('Failed to verify torrent loaded: %s' % str(e))
def on_task_exit(self, task, config):
""" Make sure all temp files are cleaned up when task exists """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.cleanup_temp_files(task)
on_task_abort = on_task_exit
class RTorrentInputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'timeout': {'type': 'integer', 'default': 30},
'view': {'type': 'string', 'default': 'main'},
'fields': one_or_more({'type': 'string', 'enum': RTorrent.default_fields}),
},
'required': ['uri'],
'additionalProperties': False
}
def on_task_input(self, task, config):
client = RTorrent(config['uri'], username=config.get('username'),
password=config.get('password'), timeout=config.get('timeout'))
fields = config.get('fields')
try:
torrents = client.torrents(config['view'], fields=fields)
except (IOError, xmlrpclib.Error) as e:
task.abort('Could not get torrents (%s): %s' % (config['view'], e))
return
entries = []
for torrent in torrents:
entry = Entry(
title=torrent['name'],
url='%s/%s' % (config['uri'], torrent['hash']),
path=torrent['base_path'],
torrent_info_hash=torrent['hash'],
)
for attr, value in torrent.iteritems():
entry[attr] = value
entries.append(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(RTorrentOutputPlugin, 'rtorrent', api_ver=2)
plugin.register(RTorrentInputPlugin, 'from_rtorrent', api_ver=2)
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from collections import OrderedDict
from copy import deepcopy
from typing import (
Any,
Dict,
Set,
Tuple,
)
# External imports
import bs4
from jinja2 import Template
from mock import MagicMock, patch
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
# Bokeh imports
import bokeh.resources as resources
import bokeh.util.version as buv
from bokeh.core.types import ID
from bokeh.document import Document
from bokeh.embed.util import RenderRoot, standalone_docs_json
from bokeh.io import curdoc
from bokeh.plotting import figure
from bokeh.resources import CDN, CSSResources, JSResources
from bokeh.themes import Theme
# Module under test
import bokeh.embed.standalone as bes # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
"bokeh._testing.plugins.selenium",
)
def stable_id() -> ID:
return ID('ID')
@pytest.fixture
def test_plot() -> figure:
from bokeh.plotting import figure
test_plot = figure(title="'foo'")
test_plot.circle([1, 2], [2, 3])
return test_plot
PAGE = Template("""
<!DOCTYPE html>
<html lang="en">
<head>
</head>
<body>
<script>
{{js}}
</script>
{{tag}}
</body>
""")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_autoload_static:
def test_return_type(self, test_plot: figure) -> None:
r = bes.autoload_static(test_plot, CDN, "some/path")
assert len(r) == 2
def test_script_attrs(self, test_plot: figure) -> None:
_, tag = bes.autoload_static(test_plot, CDN, "some/path")
html = bs4.BeautifulSoup(tag, "html.parser")
scripts = html.find_all(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == {"src", "id"}
assert attrs["src"] == "some/path"
@pytest.mark.parametrize("version", ["1.4.0rc1", "2.0.0dev3"])
@pytest.mark.selenium
def test_js_dev_cdn(self, version: str, monkeypatch: pytest.MonkeyPatch, driver: WebDriver,
test_file_path_and_url: Tuple[str, str], test_plot: figure) -> None:
monkeypatch.setattr(buv, "__version__", "1.4.0rc1")
monkeypatch.setattr(resources, "__version__", "1.4.0rc1")
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements(By.CSS_SELECTOR, 'head script')
assert len(scripts) == 5
for script in scripts:
assert script.get_attribute("crossorigin") == None
assert script.get_attribute("integrity") == ""
@pytest.mark.selenium
def test_js_release_cdn(self, monkeypatch: pytest.MonkeyPatch, driver: WebDriver,
test_file_path_and_url: Tuple[str, str], test_plot: figure) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0")
monkeypatch.setattr(resources, "__version__", "2.0.0")
r = deepcopy(CDN)
# Skip bokeh-mathjax for older versions
r.js_components.remove("bokeh-mathjax")
js, tag = bes.autoload_static(test_plot, r, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements(By.CSS_SELECTOR, 'head script')
for x in scripts:
print(x.get_attribute("src"))
assert len(scripts) == 4
for script in scripts:
assert script.get_attribute("crossorigin") == None
assert script.get_attribute("integrity") == ""
@pytest.mark.selenium
def test_js_release_dev_cdn(self, monkeypatch: pytest.MonkeyPatch, driver: WebDriver,
test_file_path_and_url: Tuple[str, str], test_plot: figure) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0-foo")
monkeypatch.setattr(resources, "__version__", "2.0.0-foo")
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements(By.CSS_SELECTOR, 'head script')
for x in scripts:
print(x.get_attribute("src"))
assert len(scripts) == 5
for script in scripts:
assert script.get_attribute("crossorigin") == None
assert script.get_attribute("integrity") == ""
@pytest.mark.selenium
def test_js_release_server(self, monkeypatch: pytest.MonkeyPatch, driver: WebDriver,
test_file_path_and_url: Tuple[str, str], test_plot: figure) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0")
monkeypatch.setattr(resources, "__version__", "2.0.0")
js, tag = bes.autoload_static(test_plot, resources.Resources(mode="server"), "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements(By.CSS_SELECTOR, 'head script')
assert len(scripts) == 5
for script in scripts:
assert script.get_attribute("crossorigin") == None
assert script.get_attribute("integrity") == ""
class Test_components:
def test_return_type(self) -> None:
plot1 = figure()
plot1.circle([], [])
plot2 = figure()
plot2.circle([], [])
# This is a testing artefact, users don't have to do this in practice
curdoc().add_root(plot1)
curdoc().add_root(plot2)
r = bes.components(plot1)
assert len(r) == 2
_, divs0 = bes.components((plot1, plot2))
assert isinstance(divs0, tuple)
_, divs1 = bes.components([plot1, plot2])
assert isinstance(divs1, tuple)
_, divs2 = bes.components({"Plot 1": plot1, "Plot 2": plot2})
assert isinstance(divs2, dict)
assert all(isinstance(x, str) for x in divs2.keys())
# explict test for OrderedDict (don't replace with dict)
_, divs3 = bes.components(OrderedDict([("Plot 1", plot1), ("Plot 2", plot2)]))
assert isinstance(divs3, OrderedDict)
assert all(isinstance(x, str) for x in divs3.keys())
@patch('bokeh.embed.util.make_globally_unique_id', new_callable=lambda: stable_id)
def test_plot_dict_returned_when_wrap_plot_info_is_false(self, mock_make_id: MagicMock) -> None:
doc = Document()
plot1 = figure()
plot1.circle([], [])
doc.add_root(plot1)
plot2 = figure()
plot2.circle([], [])
doc.add_root(plot2)
expected_plotdict_1 = RenderRoot(elementid=ID("ID"), id=ID("ID"))
expected_plotdict_2 = RenderRoot(elementid=ID("ID"), id=ID("ID"))
_, plotdict = bes.components(plot1, wrap_plot_info=False)
assert plotdict == expected_plotdict_1
_, plotids = bes.components((plot1, plot2), wrap_plot_info=False)
assert plotids == (expected_plotdict_1, expected_plotdict_2)
_, plotiddict = bes.components({'p1': plot1, 'p2': plot2}, wrap_plot_info=False)
assert plotiddict == {'p1': expected_plotdict_1, 'p2': expected_plotdict_2}
def test_result_attrs(self, test_plot: figure) -> None:
script, _ = bes.components(test_plot)
html = bs4.BeautifulSoup(script, "html.parser")
scripts = html.find_all(name='script')
assert len(scripts) == 1
assert scripts[0].attrs == {'type': 'text/javascript'}
@patch('bokeh.embed.util.make_globally_unique_id', new=stable_id)
def test_div_attrs(self, test_plot: figure) -> None:
_, div = bes.components(test_plot)
html = bs4.BeautifulSoup(div, "html.parser")
els = html.find_all(name='div')
assert len(els) == 1
el = els[0]
assert set(el.attrs) == {"class", "id", "data-root-id"}
assert el.attrs["class"] == ["bk-root"]
assert el.attrs["id"] == "ID"
assert el.attrs["data-root-id"] == test_plot.id
assert el.string is None
def test_script_is_utf8_encoded(self, test_plot: figure) -> None:
script, _ = bes.components(test_plot)
assert isinstance(script, str)
def test_quoting(self, test_plot: figure) -> None:
script, _ = bes.components(test_plot)
assert """ not in script
assert "'foo'" not in script
assert "'foo'" in script
def test_output_is_without_script_tag_when_wrap_script_is_false(self, test_plot: figure) -> None:
script, _ = bes.components(test_plot)
html = bs4.BeautifulSoup(script, "html.parser")
scripts = html.find_all(name='script')
assert len(scripts) == 1
# XXX: this needs to account for indentation
#script_content = scripts[0].getText()
#rawscript, div = bes.components(test_plot, wrap_script=False)
#self.maxDiff = None
#assert rawscript.strip() == script_content.strip()
class Test_file_html:
def test_return_type(self, test_plot: figure) -> None:
class fake_template:
def __init__(self, tester: Any, user_template_variables: Set[str] | None = None) -> None:
self.tester = tester
self.template_variables = {
"title",
"bokeh_js",
"bokeh_css",
"plot_script",
"doc",
"docs",
"base",
}
if user_template_variables is not None:
self.template_variables.update(user_template_variables)
def render(self, template_variables: Dict[str, Any]) -> str:
assert self.template_variables.issubset(set(template_variables.keys()))
return "template result"
r = bes.file_html(test_plot, CDN, "title")
assert isinstance(r, str)
r = bes.file_html(test_plot, CDN, "title",
template=fake_template(self)) # type: ignore[arg-type]
assert isinstance(r, str)
r = bes.file_html(test_plot, CDN, "title",
template=fake_template(self, {"test_var"}), # type: ignore[arg-type]
template_variables={"test_var": "test"})
assert isinstance(r, str)
@patch('bokeh.embed.bundle.warn')
def test_file_html_handles_js_only_resources(self, mock_warn: MagicMock, test_plot: figure) -> None:
js_resources = JSResources(mode="relative", components=["bokeh"])
template = Template("<head>{{ bokeh_js }}</head><body></body>")
output = bes.file_html(test_plot, (js_resources, None), "title", template=template)
html = "<head>%s</head><body></body>" % js_resources.render_js()
assert output == html
@patch('bokeh.embed.bundle.warn')
def test_file_html_provides_warning_if_no_css(self, mock_warn: MagicMock, test_plot: figure) -> None:
js_resources = JSResources()
bes.file_html(test_plot, (js_resources, None), "title")
mock_warn.assert_called_once_with(
'No Bokeh CSS Resources provided to template. If required you will need to provide them manually.'
)
@patch('bokeh.embed.bundle.warn')
def test_file_html_handles_css_only_resources(self, mock_warn: MagicMock, test_plot: figure) -> None:
css_resources = CSSResources(mode="relative", components=["bokeh"])
template = Template("<head>{{ bokeh_css }}</head><body></body>")
output = bes.file_html(test_plot, (None, css_resources), "title", template=template)
html = "<head>%s</head><body></body>" % css_resources.render_css()
assert output == html
@patch('bokeh.embed.bundle.warn')
def test_file_html_provides_warning_if_no_js(self, mock_warn: MagicMock, test_plot: figure) -> None:
css_resources = CSSResources()
bes.file_html(test_plot, (None, css_resources), "title")
mock_warn.assert_called_once_with(
'No Bokeh JS Resources provided to template. If required you will need to provide them manually.'
)
def test_file_html_title_is_escaped(self, test_plot: figure) -> None:
r = bes.file_html(test_plot, CDN, "&<")
assert "<title>&<</title>" in r
def test_entire_doc_is_not_used(self) -> None:
from bokeh.document import Document
from bokeh.models import Button
fig = figure()
fig.x([0], [0])
button = Button(label="Button")
d = Document()
d.add_root(fig)
d.add_root(button)
out = bes.file_html([fig], CDN)
# this is a very coarse test but it will do
assert "bokeh-widgets" not in out
JSON_ITEMS_KEYS = {"target_id", "root_id", "doc", "version"}
class Test_json_item:
def test_with_target_id(self, test_plot: figure) -> None:
out = bes.json_item(test_plot, target=ID("foo"))
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['target_id'] == "foo"
def test_without_target_id(self, test_plot: figure) -> None:
out = bes.json_item(test_plot)
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['target_id'] == None
def test_doc_json(self, test_plot: figure) -> None:
out = bes.json_item(test_plot, target=ID("foo"))
assert set(out.keys()) == JSON_ITEMS_KEYS
expected = list(standalone_docs_json([test_plot]).values())[0]
assert out['doc'] == expected
def test_doc_title(self, test_plot: figure) -> None:
out = bes.json_item(test_plot, target=ID("foo"))
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['doc']['title'] == ""
def test_root_id(self, test_plot: figure) -> None:
out = bes.json_item(test_plot, target=ID("foo"))
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['doc']['roots'][0]["id"] == out['root_id']
def test_version(self, monkeypatch: pytest.MonkeyPatch, test_plot: figure) -> None:
from bokeh import __version__
out = bes.json_item(test_plot, target=ID("foo"))
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['doc']['version'] == __version__
out = bes.json_item(test_plot)
assert set(out.keys()) == JSON_ITEMS_KEYS
assert out['doc']['version'] == __version__
@patch('bokeh.embed.standalone.OutputDocumentFor')
def test_apply_theme(self, mock_OFD: MagicMock, test_plot: figure) -> None:
# the subsequent call inside ODF will fail since the model was never
# added to a document. Ignoring that since we just want to make sure
# ODF is called with the expected theme arg.
theme = Theme(json={})
try:
bes.json_item(test_plot, theme=theme)
except ValueError:
pass
mock_OFD.assert_called_once_with([test_plot], apply_theme=theme)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class Test__title_from_models:
pass
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
#!/g/bork3/bin/python
# Thanks to http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/457411
from bisect import bisect_left, bisect_right
from itertools import izip
class intervalmap(object):
"""
This class maps a set of intervals to a set of values.
>>> i = intervalmap()
>>> i[0:5] = '0-5'
>>> i[8:12] = '8-12'
>>> print i[2]
0-5
>>> print i[10]
8-12
>>> print repr(i[-1])
None
>>> print repr(i[17])
None
>>> i[4:9] = '4-9'
>>> print [(j,i[j]) for j in range(6)]
[(0, '0-5'), (1, '0-5'), (2, '0-5'), (3, '0-5'), (4, '4-9'), (5, '4-9')]
>>> print list(i.items())
[((0, 4), '0-5'), ((4, 9), '4-9'), ((9, 12), '8-12')]
>>> i[:0] = 'less than 0'
>>> i[-5]
'less than 0'
>>> i[0]
'0-5'
>>> print list(i.items())
[((None, 0), 'less than 0'), ((0, 4), '0-5'), ((4, 9), '4-9'), ((9, 12), '8-12')]
>>> i[21:] = 'more than twenty'
>>> i[42]
'more than twenty'
>>> i[10.5:15.5] = '10.5-15.5'
>>> i[11.5]
'10.5-15.5'
>>> i[0.5]
'0-5'
>>> print list(i.items())
[((None, 0),... ((9, 10.5), '8-12'), ((10.5, 15.5), '10.5-15.5'), ((21, None),...
>>> i = intervalmap()
>>> i[0:2] = 1
>>> i[2:8] = 2
>>> i[4:] = 3
>>> i[5:6] = 4
>>> i
{[0, 2] => 1, [2, 4] => 2, [4, 5] => 3, [5, 6] => 4, [6, None] => 3}
"""
def __init__(self):
"""
Initializes an empty intervalmap.
"""
self._bounds = []
self._items = []
self._upperitem = None
def __setitem__(self,_slice,_value):
"""
Sets an interval mapping.
"""
assert isinstance(_slice,slice), 'The key must be a slice object'
if _slice.start is None:
start_point = -1
else:
start_point = bisect_left(self._bounds,_slice.start)
if _slice.stop is None:
end_point = -1
else:
end_point = bisect_left(self._bounds,_slice.stop)
if start_point>=0:
if start_point < len(self._bounds) and self._bounds[start_point]<_slice.start:
start_point += 1
if end_point>=0:
self._bounds[start_point:end_point] = [_slice.start,_slice.stop]
if start_point < len(self._items):
self._items[start_point:end_point] = [self._items[start_point],_value]
else:
self._items[start_point:end_point] = [self._upperitem,_value]
else:
self._bounds[start_point:] = [_slice.start]
if start_point < len(self._items):
self._items[start_point:] = [self._items[start_point],_value]
else:
self._items[start_point:] = [self._upperitem]
self._upperitem = _value
else:
if end_point>=0:
self._bounds[:end_point] = [_slice.stop]
self._items[:end_point] = [_value]
else:
self._bounds[:] = []
self._items[:] = []
self._upperitem = _value
def __getitem__(self,_point):
"""
Gets a value from the mapping.
"""
assert not isinstance(_point,slice), 'The key cannot be a slice object'
index = bisect_right(self._bounds,_point)
if index < len(self._bounds):
return self._items[index]
else:
return self._upperitem
def items(self):
"""
Returns an iterator with each item being
((low_bound,high_bound), value). The items are returned
in order.
"""
previous_bound = None
for b,v in izip(self._bounds,self._items):
if v is not None:
yield (previous_bound,b), v
previous_bound = b
if self._upperitem is not None:
yield (previous_bound,None), self._upperitem
def values(self):
"""
Returns an iterator with each item being a stored value. The items
are returned in order.
"""
for v in self._items:
if v is not None:
yield v
if self._upperitem is not None:
yield self._upperitem
def __repr__(self):
s = []
for b,v in self.items():
if v is not None:
s.append('[%r, %r] => %r'%(
b[0],
b[1],
v
))
return '{'+', '.join(s)+'}'
if __name__ == "__main__":
# Test 1
i = intervalmap()
i[9:] = "!"
assert repr(i) == "{[9, None] => '!'}"
i[:5] = "Hello"
i[6:7] = "World"
assert repr(i) == "{[None, 5] => 'Hello', [6, 7] => 'World', [9, None] => '!'}"
i[8:10] = "(Test)"
assert repr(i) == "{[None, 5] => 'Hello', [6, 7] => 'World', [8, 10] => '(Test)', [10, None] => '!'}"
i[:3] = 'My,'
assert repr(i) == "{[None, 3] => 'My,', [3, 5] => 'Hello', [6, 7] => 'World', [8, 10] => '(Test)', [10, None] => '!'}"
i[5.5:6] = "Cruel"
assert repr(i) == "{[None, 3] => 'My,', [3, 5] => 'Hello', [5.5, 6] => 'Cruel', [6, 7] => 'World', [8, 10] => '(Test)', [10, None] => '!'}"
i[6:6.5] = "And Harsh"
assert repr(i) == "{[None, 3] => 'My,', [3, 5] => 'Hello', [5.5, 6] => 'Cruel', [6, 6.5] => 'And Harsh', [6.5, 7] => 'World', [8, 10] => '(Test)', [10, None] => '!'}"
i[5.9:6.6] = None
assert repr(i) == "{[None, 3] => 'My,', [3, 5] => 'Hello', [5.5, 5.9000000000000004] => 'Cruel', [6.5999999999999996, 7] => 'World', [8, 10] => '(Test)', [10, None] => '!'}"
assert ' '.join(i.values()) == "My, Hello Cruel World (Test) !"
print 'Test 1 OK'
# Test 2
i = intervalmap()
i[:0] = 'A'
i[2:5] = 'B'
i[8:10] = 'C'
i[12:] = 'D'
assert repr(i) == "{[None, 0] => 'A', [2, 5] => 'B', [8, 10] => 'C', [12, None] => 'D'}"
i[:] = 'K'
assert repr(i) == "{[None, None] => 'K'}"
assert i[5] == 'K'
i[0:10] = 'L'
i[6:8] = 'M'
i[20:] = 'J'
assert i[-1] == 'K'
assert i[5] == 'L'
assert i[7] == 'M'
assert i[9] == 'L'
assert i[15] == 'K'
assert i[42] == 'J'
print 'Test 2 OK'
# Test 3
try:
from datetime import datetime
except:
print 'Test 3 skipped'
else:
i = intervalmap()
i[:datetime(2005,10,24)] = 'A'
i[datetime(2005,11,11):datetime(2005,11,17)] = 'B'
i[datetime(2005,11,30):] = 'C'
assert i[datetime(2005,9,25)] == 'A'
assert i[datetime(2005,10,23)] == 'A'
assert i[datetime(2005,10,26)] == None
assert i[datetime(2005,11,9)] == None
assert i[datetime(2005,11,16)] == 'B'
assert i[datetime(2005,11,23)] == None
assert i[datetime(2005,11,29)] == None
assert i[datetime(2005,11,30)] == 'C'
assert i[datetime(2005,12,3)] == 'C'
print 'Test 3 OK'
try:
import doctest
except:
print 'Skipping the doctests'
else:
print 'And now, the doctests'
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
|
import mock
import io
import netlib.utils
from netlib.http import Headers
from mitmproxy import filt, controller, flow, options
from mitmproxy.contrib import tnetstring
from mitmproxy.exceptions import FlowReadException
from mitmproxy.models import Error
from mitmproxy.models import Flow
from mitmproxy.models import HTTPFlow
from mitmproxy.models import HTTPRequest
from mitmproxy.models import HTTPResponse
from mitmproxy.proxy import ProxyConfig
from mitmproxy.proxy.server import DummyServer
from mitmproxy.models.connections import ClientConnection
from . import tutils
def test_app_registry():
ar = flow.AppRegistry()
ar.add("foo", "domain", 80)
r = HTTPRequest.wrap(netlib.tutils.treq())
r.host = "domain"
r.port = 80
assert ar.get(r)
r.port = 81
assert not ar.get(r)
r = HTTPRequest.wrap(netlib.tutils.treq())
r.host = "domain2"
r.port = 80
assert not ar.get(r)
r.headers["host"] = "domain"
assert ar.get(r)
class TestClientPlaybackState:
def test_tick(self):
first = tutils.tflow()
s = flow.State()
fm = flow.FlowMaster(None, None, s)
fm.start_client_playback([first, tutils.tflow()], True)
c = fm.client_playback
c.testing = True
assert not c.done()
assert not s.flow_count()
assert c.count() == 2
c.tick(fm)
assert s.flow_count()
assert c.count() == 1
c.tick(fm)
assert c.count() == 1
c.clear(c.current)
c.tick(fm)
assert c.count() == 0
c.clear(c.current)
assert c.done()
fm.state.clear()
fm.tick(timeout=0)
fm.stop_client_playback()
assert not fm.client_playback
class TestServerPlaybackState:
def test_hash(self):
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
False,
None,
False)
r = tutils.tflow()
r2 = tutils.tflow()
assert s._hash(r)
assert s._hash(r) == s._hash(r2)
r.request.headers["foo"] = "bar"
assert s._hash(r) == s._hash(r2)
r.request.path = "voing"
assert s._hash(r) != s._hash(r2)
r.request.path = "path?blank_value"
r2.request.path = "path?"
assert s._hash(r) != s._hash(r2)
def test_headers(self):
s = flow.ServerPlaybackState(
["foo"],
[],
False,
False,
None,
False,
None,
False)
r = tutils.tflow(resp=True)
r.request.headers["foo"] = "bar"
r2 = tutils.tflow(resp=True)
assert not s._hash(r) == s._hash(r2)
r2.request.headers["foo"] = "bar"
assert s._hash(r) == s._hash(r2)
r2.request.headers["oink"] = "bar"
assert s._hash(r) == s._hash(r2)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
assert s._hash(r) == s._hash(r2)
def test_load(self):
r = tutils.tflow(resp=True)
r.request.headers["key"] = "one"
r2 = tutils.tflow(resp=True)
r2.request.headers["key"] = "two"
s = flow.ServerPlaybackState(
None, [
r, r2], False, False, None, False, None, False)
assert s.count() == 2
assert len(s.fmap.keys()) == 1
n = s.next_flow(r)
assert n.request.headers["key"] == "one"
assert s.count() == 1
n = s.next_flow(r)
assert n.request.headers["key"] == "two"
assert s.count() == 0
assert not s.next_flow(r)
def test_load_with_nopop(self):
r = tutils.tflow(resp=True)
r.request.headers["key"] = "one"
r2 = tutils.tflow(resp=True)
r2.request.headers["key"] = "two"
s = flow.ServerPlaybackState(
None, [
r, r2], False, True, None, False, None, False)
assert s.count() == 2
s.next_flow(r)
assert s.count() == 2
def test_ignore_params(self):
s = flow.ServerPlaybackState(
None, [], False, False, [
"param1", "param2"], False, None, False)
r = tutils.tflow(resp=True)
r.request.path = "/test?param1=1"
r2 = tutils.tflow(resp=True)
r2.request.path = "/test"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param1=2"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param2=1"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param3=2"
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_params(self):
s = flow.ServerPlaybackState(
None, [], False, False, None, False, [
"param1", "param2"], False)
r = tutils.tflow(resp=True)
r.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r.request.content = b"paramx=x¶m1=1"
r2 = tutils.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r2.request.content = b"paramx=x¶m1=1"
# same parameters
assert s._hash(r) == s._hash(r2)
# ignored parameters !=
r2.request.content = b"paramx=x¶m1=2"
assert s._hash(r) == s._hash(r2)
# missing parameter
r2.request.content = b"paramx=x"
assert s._hash(r) == s._hash(r2)
# ignorable parameter added
r2.request.content = b"paramx=x¶m1=2"
assert s._hash(r) == s._hash(r2)
# not ignorable parameter changed
r2.request.content = b"paramx=y¶m1=1"
assert not s._hash(r) == s._hash(r2)
# not ignorable parameter missing
r2.request.content = b"param1=1"
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_params_other_content_type(self):
s = flow.ServerPlaybackState(
None, [], False, False, None, False, [
"param1", "param2"], False)
r = tutils.tflow(resp=True)
r.request.headers["Content-Type"] = "application/json"
r.request.content = b'{"param1":"1"}'
r2 = tutils.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/json"
r2.request.content = b'{"param1":"1"}'
# same content
assert s._hash(r) == s._hash(r2)
# distint content (note only x-www-form-urlencoded payload is analysed)
r2.request.content = b'{"param1":"2"}'
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_wins_over_params(self):
# NOTE: parameters are mutually exclusive in options
s = flow.ServerPlaybackState(
None, [], False, False, None, True, [
"param1", "param2"], False)
r = tutils.tflow(resp=True)
r.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r.request.content = b"paramx=y"
r2 = tutils.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r2.request.content = b"paramx=x"
# same parameters
assert s._hash(r) == s._hash(r2)
def test_ignore_content(self):
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
False,
None,
False)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
r.request.content = b"foo"
r2.request.content = b"foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = b"bar"
assert not s._hash(r) == s._hash(r2)
# now ignoring content
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
True,
None,
False)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
r.request.content = b"foo"
r2.request.content = b"foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = b"bar"
assert s._hash(r) == s._hash(r2)
r2.request.content = b""
assert s._hash(r) == s._hash(r2)
r2.request.content = None
assert s._hash(r) == s._hash(r2)
def test_ignore_host(self):
s = flow.ServerPlaybackState(
None,
[],
False,
False,
None,
False,
None,
True)
r = tutils.tflow(resp=True)
r2 = tutils.tflow(resp=True)
r.request.host = "address"
r2.request.host = "address"
assert s._hash(r) == s._hash(r2)
r2.request.host = "wrong_address"
assert s._hash(r) == s._hash(r2)
class TestHTTPFlow(object):
def test_copy(self):
f = tutils.tflow(resp=True)
f.get_state()
f2 = f.copy()
a = f.get_state()
b = f2.get_state()
del a["id"]
del b["id"]
assert a == b
assert not f == f2
assert f is not f2
assert f.request.get_state() == f2.request.get_state()
assert f.request is not f2.request
assert f.request.headers == f2.request.headers
assert f.request.headers is not f2.request.headers
assert f.response.get_state() == f2.response.get_state()
assert f.response is not f2.response
f = tutils.tflow(err=True)
f2 = f.copy()
assert f is not f2
assert f.request is not f2.request
assert f.request.headers == f2.request.headers
assert f.request.headers is not f2.request.headers
assert f.error.get_state() == f2.error.get_state()
assert f.error is not f2.error
def test_match(self):
f = tutils.tflow(resp=True)
assert not f.match("~b test")
assert f.match(None)
assert not f.match("~b test")
f = tutils.tflow(err=True)
assert f.match("~e")
tutils.raises(ValueError, f.match, "~")
def test_backup(self):
f = tutils.tflow()
f.response = HTTPResponse.wrap(netlib.tutils.tresp())
f.request.content = b"foo"
assert not f.modified()
f.backup()
f.request.content = b"bar"
assert f.modified()
f.revert()
assert f.request.content == b"foo"
def test_backup_idempotence(self):
f = tutils.tflow(resp=True)
f.backup()
f.revert()
f.backup()
f.revert()
def test_getset_state(self):
f = tutils.tflow(resp=True)
state = f.get_state()
assert f.get_state() == HTTPFlow.from_state(
state).get_state()
f.response = None
f.error = Error("error")
state = f.get_state()
assert f.get_state() == HTTPFlow.from_state(
state).get_state()
f2 = f.copy()
f2.id = f.id # copy creates a different uuid
assert f.get_state() == f2.get_state()
assert not f == f2
f2.error = Error("e2")
assert not f == f2
f.set_state(f2.get_state())
assert f.get_state() == f2.get_state()
def test_kill(self):
s = flow.State()
fm = flow.FlowMaster(None, None, s)
f = tutils.tflow()
f.intercept(mock.Mock())
f.kill(fm)
for i in s.view:
assert "killed" in str(i.error)
def test_killall(self):
s = flow.State()
fm = flow.FlowMaster(None, None, s)
f = tutils.tflow()
f.intercept(fm)
s.killall(fm)
for i in s.view:
assert "killed" in str(i.error)
def test_accept_intercept(self):
f = tutils.tflow()
f.intercept(mock.Mock())
assert not f.reply.acked
f.accept_intercept(mock.Mock())
assert f.reply.acked
def test_replace_unicode(self):
f = tutils.tflow(resp=True)
f.response.content = b"\xc2foo"
f.replace(b"foo", u"bar")
def test_replace_no_content(self):
f = tutils.tflow()
f.request.content = None
assert f.replace("foo", "bar") == 0
def test_replace(self):
f = tutils.tflow(resp=True)
f.request.headers["foo"] = "foo"
f.request.content = b"afoob"
f.response.headers["foo"] = "foo"
f.response.content = b"afoob"
assert f.replace("foo", "bar") == 6
assert f.request.headers["bar"] == "bar"
assert f.request.content == b"abarb"
assert f.response.headers["bar"] == "bar"
assert f.response.content == b"abarb"
def test_replace_encoded(self):
f = tutils.tflow(resp=True)
f.request.content = b"afoob"
f.request.encode("gzip")
f.response.content = b"afoob"
f.response.encode("gzip")
f.replace("foo", "bar")
assert f.request.raw_content != b"abarb"
f.request.decode()
assert f.request.raw_content == b"abarb"
assert f.response.raw_content != b"abarb"
f.response.decode()
assert f.response.raw_content == b"abarb"
class TestTCPFlow:
def test_match(self):
f = tutils.ttcpflow()
assert not f.match("~b nonexistent")
assert f.match(None)
assert not f.match("~b nonexistent")
f = tutils.ttcpflow(err=True)
assert f.match("~e")
tutils.raises(ValueError, f.match, "~")
class TestState:
def test_backup(self):
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
f.backup()
c.revert(f)
def test_flow(self):
"""
normal flow:
connect -> request -> response
"""
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
assert f
assert c.flow_count() == 1
assert c.active_flow_count() == 1
newf = tutils.tflow()
assert c.add_flow(newf)
assert c.active_flow_count() == 2
f.response = HTTPResponse.wrap(netlib.tutils.tresp())
assert c.update_flow(f)
assert c.flow_count() == 2
assert c.active_flow_count() == 1
assert not c.update_flow(None)
assert c.active_flow_count() == 1
newf.response = HTTPResponse.wrap(netlib.tutils.tresp())
assert c.update_flow(newf)
assert c.active_flow_count() == 0
def test_err(self):
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
f.error = Error("message")
assert c.update_flow(f)
c = flow.State()
f = tutils.tflow()
c.add_flow(f)
c.set_limit("~e")
assert not c.view
f.error = tutils.terr()
assert c.update_flow(f)
assert c.view
def test_set_limit(self):
c = flow.State()
f = tutils.tflow()
assert len(c.view) == 0
c.add_flow(f)
assert len(c.view) == 1
c.set_limit("~s")
assert c.limit_txt == "~s"
assert len(c.view) == 0
f.response = HTTPResponse.wrap(netlib.tutils.tresp())
c.update_flow(f)
assert len(c.view) == 1
c.set_limit(None)
assert len(c.view) == 1
f = tutils.tflow()
c.add_flow(f)
assert len(c.view) == 2
c.set_limit("~q")
assert len(c.view) == 1
c.set_limit("~s")
assert len(c.view) == 1
assert "Invalid" in c.set_limit("~")
def test_set_intercept(self):
c = flow.State()
assert not c.set_intercept("~q")
assert c.intercept_txt == "~q"
assert "Invalid" in c.set_intercept("~")
assert not c.set_intercept(None)
assert c.intercept_txt is None
def _add_request(self, state):
f = tutils.tflow()
state.add_flow(f)
return f
def _add_response(self, state):
f = tutils.tflow()
state.add_flow(f)
f.response = HTTPResponse.wrap(netlib.tutils.tresp())
state.update_flow(f)
def _add_error(self, state):
f = tutils.tflow(err=True)
state.add_flow(f)
def test_clear(self):
c = flow.State()
f = self._add_request(c)
f.intercepted = True
c.clear()
assert c.flow_count() == 0
def test_dump_flows(self):
c = flow.State()
self._add_request(c)
self._add_response(c)
self._add_request(c)
self._add_response(c)
self._add_request(c)
self._add_response(c)
self._add_error(c)
flows = c.view[:]
c.clear()
c.load_flows(flows)
assert isinstance(c.flows[0], Flow)
def test_accept_all(self):
c = flow.State()
self._add_request(c)
self._add_response(c)
self._add_request(c)
c.accept_all(mock.Mock())
class TestSerialize:
def _treader(self):
sio = io.BytesIO()
w = flow.FlowWriter(sio)
for i in range(3):
f = tutils.tflow(resp=True)
w.add(f)
for i in range(3):
f = tutils.tflow(err=True)
w.add(f)
f = tutils.ttcpflow()
w.add(f)
f = tutils.ttcpflow(err=True)
w.add(f)
sio.seek(0)
return flow.FlowReader(sio)
def test_roundtrip(self):
sio = io.BytesIO()
f = tutils.tflow()
f.marked = True
f.request.content = bytes(bytearray(range(256)))
w = flow.FlowWriter(sio)
w.add(f)
sio.seek(0)
r = flow.FlowReader(sio)
l = list(r.stream())
assert len(l) == 1
f2 = l[0]
assert f2.get_state() == f.get_state()
assert f2.request == f.request
assert f2.marked
def test_load_flows(self):
r = self._treader()
s = flow.State()
fm = flow.FlowMaster(None, None, s)
fm.load_flows(r)
assert len(s.flows) == 6
def test_load_flows_reverse(self):
r = self._treader()
s = flow.State()
opts = options.Options(
mode="reverse",
upstream_server="https://use-this-domain"
)
conf = ProxyConfig(opts)
fm = flow.FlowMaster(opts, DummyServer(conf), s)
fm.load_flows(r)
assert s.flows[0].request.host == "use-this-domain"
def test_filter(self):
sio = io.BytesIO()
fl = filt.parse("~c 200")
w = flow.FilteredFlowWriter(sio, fl)
f = tutils.tflow(resp=True)
f.response.status_code = 200
w.add(f)
f = tutils.tflow(resp=True)
f.response.status_code = 201
w.add(f)
sio.seek(0)
r = flow.FlowReader(sio)
assert len(list(r.stream()))
def test_error(self):
sio = io.BytesIO()
sio.write(b"bogus")
sio.seek(0)
r = flow.FlowReader(sio)
tutils.raises(FlowReadException, list, r.stream())
f = FlowReadException("foo")
assert str(f) == "foo"
def test_versioncheck(self):
f = tutils.tflow()
d = f.get_state()
d["version"] = (0, 0)
sio = io.BytesIO()
tnetstring.dump(d, sio)
sio.seek(0)
r = flow.FlowReader(sio)
tutils.raises("version", list, r.stream())
class TestFlowMaster:
def test_replay(self):
s = flow.State()
fm = flow.FlowMaster(None, None, s)
f = tutils.tflow(resp=True)
f.request.content = None
assert "missing" in fm.replay_request(f)
f.intercepted = True
assert "intercepting" in fm.replay_request(f)
f.live = True
assert "live" in fm.replay_request(f)
def test_duplicate_flow(self):
s = flow.State()
fm = flow.FlowMaster(None, None, s)
f = tutils.tflow(resp=True)
fm.load_flow(f)
assert s.flow_count() == 1
f2 = fm.duplicate_flow(f)
assert f2.response
assert s.flow_count() == 2
assert s.index(f2) == 1
def test_create_flow(self):
s = flow.State()
fm = flow.FlowMaster(None, None, s)
assert fm.create_request("GET", "http", "example.com", 80, "/")
def test_all(self):
s = flow.State()
fm = flow.FlowMaster(None, None, s)
f = tutils.tflow(req=None)
fm.clientconnect(f.client_conn)
f.request = HTTPRequest.wrap(netlib.tutils.treq())
fm.request(f)
assert s.flow_count() == 1
f.response = HTTPResponse.wrap(netlib.tutils.tresp())
fm.response(f)
assert s.flow_count() == 1
fm.clientdisconnect(f.client_conn)
f.error = Error("msg")
f.error.reply = controller.DummyReply()
fm.error(f)
fm.shutdown()
def test_client_playback(self):
s = flow.State()
f = tutils.tflow(resp=True)
pb = [tutils.tflow(resp=True), f]
fm = flow.FlowMaster(
options.Options(),
DummyServer(ProxyConfig(options.Options())),
s
)
assert not fm.start_server_playback(
pb,
False,
[],
False,
False,
None,
False,
None,
False)
assert not fm.start_client_playback(pb, False)
fm.client_playback.testing = True
assert not fm.state.flow_count()
fm.tick(0)
assert fm.state.flow_count()
f.error = Error("error")
fm.error(f)
def test_server_playback(self):
s = flow.State()
f = tutils.tflow()
f.response = HTTPResponse.wrap(netlib.tutils.tresp(content=f.request))
pb = [f]
fm = flow.FlowMaster(options.Options(), None, s)
fm.refresh_server_playback = True
assert not fm.do_server_playback(tutils.tflow())
fm.start_server_playback(
pb,
False,
[],
False,
False,
None,
False,
None,
False)
assert fm.do_server_playback(tutils.tflow())
fm.start_server_playback(
pb,
False,
[],
True,
False,
None,
False,
None,
False)
r = tutils.tflow()
r.request.content = b"gibble"
assert not fm.do_server_playback(r)
assert fm.do_server_playback(tutils.tflow())
fm.tick(0)
assert fm.should_exit.is_set()
fm.stop_server_playback()
assert not fm.server_playback
def test_server_playback_kill(self):
s = flow.State()
f = tutils.tflow()
f.response = HTTPResponse.wrap(netlib.tutils.tresp(content=f.request))
pb = [f]
fm = flow.FlowMaster(None, None, s)
fm.refresh_server_playback = True
fm.start_server_playback(
pb,
True,
[],
False,
False,
None,
False,
None,
False)
f = tutils.tflow()
f.request.host = "nonexistent"
fm.process_new_request(f)
assert "killed" in f.error.msg
class TestRequest:
def test_simple(self):
f = tutils.tflow()
r = f.request
u = r.url
r.url = u
tutils.raises(ValueError, setattr, r, "url", "")
assert r.url == u
r2 = r.copy()
assert r.get_state() == r2.get_state()
def test_get_url(self):
r = HTTPRequest.wrap(netlib.tutils.treq())
assert r.url == "http://address:22/path"
r.scheme = "https"
assert r.url == "https://address:22/path"
r.host = "host"
r.port = 42
assert r.url == "https://host:42/path"
r.host = "address"
r.port = 22
assert r.url == "https://address:22/path"
assert r.pretty_url == "https://address:22/path"
r.headers["Host"] = "foo.com:22"
assert r.url == "https://address:22/path"
assert r.pretty_url == "https://foo.com:22/path"
def test_replace(self):
r = HTTPRequest.wrap(netlib.tutils.treq())
r.path = "path/foo"
r.headers["Foo"] = "fOo"
r.content = b"afoob"
assert r.replace("foo(?i)", "boo") == 4
assert r.path == "path/boo"
assert b"foo" not in r.content
assert r.headers["boo"] == "boo"
def test_constrain_encoding(self):
r = HTTPRequest.wrap(netlib.tutils.treq())
r.headers["accept-encoding"] = "gzip, oink"
r.constrain_encoding()
assert "oink" not in r.headers["accept-encoding"]
r.headers.set_all("accept-encoding", ["gzip", "oink"])
r.constrain_encoding()
assert "oink" not in r.headers["accept-encoding"]
def test_get_content_type(self):
resp = HTTPResponse.wrap(netlib.tutils.tresp())
resp.headers = Headers(content_type="text/plain")
assert resp.headers["content-type"] == "text/plain"
class TestResponse:
def test_simple(self):
f = tutils.tflow(resp=True)
resp = f.response
resp2 = resp.copy()
assert resp2.get_state() == resp.get_state()
def test_replace(self):
r = HTTPResponse.wrap(netlib.tutils.tresp())
r.headers["Foo"] = "fOo"
r.content = b"afoob"
assert r.replace("foo(?i)", "boo") == 3
assert b"foo" not in r.content
assert r.headers["boo"] == "boo"
def test_get_content_type(self):
resp = HTTPResponse.wrap(netlib.tutils.tresp())
resp.headers = Headers(content_type="text/plain")
assert resp.headers["content-type"] == "text/plain"
class TestError:
def test_getset_state(self):
e = Error("Error")
state = e.get_state()
assert Error.from_state(state).get_state() == e.get_state()
assert e.copy()
e2 = Error("bar")
assert not e == e2
e.set_state(e2.get_state())
assert e.get_state() == e2.get_state()
e3 = e.copy()
assert e3.get_state() == e.get_state()
def test_repr(self):
e = Error("yay")
assert repr(e)
class TestClientConnection:
def test_state(self):
c = tutils.tclient_conn()
assert ClientConnection.from_state(c.get_state()).get_state() == \
c.get_state()
c2 = tutils.tclient_conn()
c2.address.address = (c2.address.host, 4242)
assert not c == c2
c2.timestamp_start = 42
c.set_state(c2.get_state())
assert c.timestamp_start == 42
c3 = c.copy()
assert c3.get_state() == c.get_state()
assert str(c)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.api import metric_pb2 as api_metric_pb2
from google.api import monitored_resource_pb2
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3 import enums
from google.cloud.monitoring_v3.proto import common_pb2
from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2
from google.cloud.monitoring_v3.proto import metric_service_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestMetricServiceClient(object):
def test_list_monitored_resource_descriptors(self):
# Setup Expected Response
next_page_token = ''
resource_descriptors_element = {}
resource_descriptors = [resource_descriptors_element]
expected_response = {
'next_page_token': next_page_token,
'resource_descriptors': resource_descriptors
}
expected_response = metric_service_pb2.ListMonitoredResourceDescriptorsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path('[PROJECT]')
paged_list_response = client.list_monitored_resource_descriptors(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.resource_descriptors[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_monitored_resource_descriptors_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path('[PROJECT]')
paged_list_response = client.list_monitored_resource_descriptors(name)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_monitored_resource_descriptor(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
type_ = 'type3575610'
display_name = 'displayName1615086568'
description = 'description-1724546052'
expected_response = {
'name': name_2,
'type': type_,
'display_name': display_name,
'description': description
}
expected_response = monitored_resource_pb2.MonitoredResourceDescriptor(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.monitored_resource_descriptor_path(
'[PROJECT]', '[MONITORED_RESOURCE_DESCRIPTOR]')
response = client.get_monitored_resource_descriptor(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = metric_service_pb2.GetMonitoredResourceDescriptorRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_monitored_resource_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.monitored_resource_descriptor_path(
'[PROJECT]', '[MONITORED_RESOURCE_DESCRIPTOR]')
with pytest.raises(CustomException):
client.get_monitored_resource_descriptor(name)
def test_list_metric_descriptors(self):
# Setup Expected Response
next_page_token = ''
metric_descriptors_element = {}
metric_descriptors = [metric_descriptors_element]
expected_response = {
'next_page_token': next_page_token,
'metric_descriptors': metric_descriptors
}
expected_response = metric_service_pb2.ListMetricDescriptorsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path('[PROJECT]')
paged_list_response = client.list_metric_descriptors(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.metric_descriptors[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListMetricDescriptorsRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_metric_descriptors_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path('[PROJECT]')
paged_list_response = client.list_metric_descriptors(name)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_metric_descriptor(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
type_ = 'type3575610'
unit = 'unit3594628'
description = 'description-1724546052'
display_name = 'displayName1615086568'
expected_response = {
'name': name_2,
'type': type_,
'unit': unit,
'description': description,
'display_name': display_name
}
expected_response = api_metric_pb2.MetricDescriptor(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.metric_descriptor_path('[PROJECT]',
'[METRIC_DESCRIPTOR]')
response = client.get_metric_descriptor(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = metric_service_pb2.GetMetricDescriptorRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_metric_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.metric_descriptor_path('[PROJECT]',
'[METRIC_DESCRIPTOR]')
with pytest.raises(CustomException):
client.get_metric_descriptor(name)
def test_create_metric_descriptor(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
type_ = 'type3575610'
unit = 'unit3594628'
description = 'description-1724546052'
display_name = 'displayName1615086568'
expected_response = {
'name': name_2,
'type': type_,
'unit': unit,
'description': description,
'display_name': display_name
}
expected_response = api_metric_pb2.MetricDescriptor(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path('[PROJECT]')
metric_descriptor = {}
response = client.create_metric_descriptor(name, metric_descriptor)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = metric_service_pb2.CreateMetricDescriptorRequest(
name=name, metric_descriptor=metric_descriptor)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_metric_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path('[PROJECT]')
metric_descriptor = {}
with pytest.raises(CustomException):
client.create_metric_descriptor(name, metric_descriptor)
def test_delete_metric_descriptor(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.metric_descriptor_path('[PROJECT]',
'[METRIC_DESCRIPTOR]')
client.delete_metric_descriptor(name)
assert len(channel.requests) == 1
expected_request = metric_service_pb2.DeleteMetricDescriptorRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_metric_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.metric_descriptor_path('[PROJECT]',
'[METRIC_DESCRIPTOR]')
with pytest.raises(CustomException):
client.delete_metric_descriptor(name)
def test_list_time_series(self):
# Setup Expected Response
next_page_token = ''
time_series_element = {}
time_series = [time_series_element]
expected_response = {
'next_page_token': next_page_token,
'time_series': time_series
}
expected_response = metric_service_pb2.ListTimeSeriesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path('[PROJECT]')
filter_ = 'filter-1274492040'
interval = {}
view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
paged_list_response = client.list_time_series(name, filter_, interval,
view)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.time_series[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListTimeSeriesRequest(
name=name, filter=filter_, interval=interval, view=view)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_time_series_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path('[PROJECT]')
filter_ = 'filter-1274492040'
interval = {}
view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
paged_list_response = client.list_time_series(name, filter_, interval,
view)
with pytest.raises(CustomException):
list(paged_list_response)
def test_create_time_series(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path('[PROJECT]')
time_series = []
client.create_time_series(name, time_series)
assert len(channel.requests) == 1
expected_request = metric_service_pb2.CreateTimeSeriesRequest(
name=name, time_series=time_series)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_time_series_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path('[PROJECT]')
time_series = []
with pytest.raises(CustomException):
client.create_time_series(name, time_series)
|
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#from gadgets import coroutine
from itertools import islice
#FIXME: Should be in gadgets
def coroutine(func):
def start(*args,**kwargs):
cr = func(*args,**kwargs)
next(cr)
return cr
return start
#Simple version of window, below
def context(an_iter):
an_iter = iter(an_iter)
prev_item = None
this_item = next(an_iter)
for next_item in an_iter:
yield (prev_item, this_item, next_item)
prev_item = this_item
this_item = next_item
yield (prev_item, this_item, None)
def window(an_iter, length):
"""Creates a len length window in an_iter and steps through the iter returning the window as a tuple each time it is called"""
an_iter=iter(an_iter)#in case it's a list
stor = tuple(islice(an_iter, length))
if len(stor) == length:
yield stor
for thing in an_iter:
stor = stor[1:] + (thing,)
yield stor
def group(an_iter, length=2):
"""Like window but with no overlap. Last set is yielded even if not full."""
stor = []
for x in an_iter:
stor.append(x)
if len(stor)>=length:
yield tuple(stor)
stor = []
if len(stor):
yield tuple(stor)
def dump(aniter):
for itm in aniter:
print(itm)
yield itm
def progress(aniter, tick):
count = 0
bigdig = 0
for itm in aniter:
count+=1
if count>tick:
bigdig+=1
print("%d x %d" % (bigdig, tick))
count=0
yield itm
def dictize(aniter, mode, initial=None):
"""iter must contain (key,value) pairs. mode is a string, one of: replace, keep,
tally, sum, append, or a custom function that takes two arguments.
replace: default dict behavior. New value overwrites old if key exists. This
is essentially a pass-thru.
keep: Noop if kv already exists in dict.
tally: Ignore value, count how many times each key occurs.
sum: Each key contains a sum of the (presumably summable) values that arrive
with that key.
append: each key contains a list of the items that arrived with that key.
add: each key contains a set of the items that arrived with that key.
Custom func: The first argument is the existing key value. This function
won't be called if the key doesn't exist. The second is the newly arrived value.
The return value will replace the existing value in the internal dict
initial optional argument: function that gets called the first time a key
occurs. It's parameter is the value. It's return is placed in the dict. Use
to specify a default value."""
data = {}
modes = "replace keep tally sum append add".split(' ')
funcs = [lambda e, n: n,
lambda e, n: e,
lambda e, n=None: e+1,
lambda e, n: e+n,
lambda e, n: e+[n],
lambda e, n: e.union([n])]
inits = [lambda v: v,
lambda v: v,
lambda v: 1,
lambda v: v,
lambda v: [v],
lambda v: set([v])]
if mode in modes:
modei = modes.index(mode)
func = funcs[modei]
if not initial:
initial = inits[modei]
else:
assert hasattr(mode, '__call__'), '2nd argument must be a function or\
one of: %s' % ' '.join(modes)
func = mode
if not initial:
initial = lambda x: x
for (k, v) in aniter:
if k in data:
data[k] = func(data[k], v)
else:
data[k] = initial(v)
return data
class StateMachinery(object):
iter = None
state = None #this must be set
initial='state_sample'
def __init__(self, aniter):
self.iter = aniter
def __iter__(self):
return self
def __next__(self):
if not self.state:
self.state = (getattr(self, self.initial),)
return self.state[0](*self.state[1:])
def state_sample(self, param=False):
#set next state
self.state=(self.state_sample, True)
return next(self.iter)
#test
class MyState(StateMachinery):
def state_one(self):
v = next(self.iter)
if v < 3:
print('here')
self.state=(self.state_two, v)
return 'one'
def state_two(self, param):
v = 0
for x in range(param):
v = next(self.iter)
self.state = (self.state_three, v)
return 'two'
def state_three(self, param):
if param==0:
self.state = (self.state_one, )
return self.state_one()
else:
self.state = (self.state_three, param - 1)
return next(self.iter)
initial = 'state_one'
def states2(iterable, test, state2, filter1=lambda x: x, include_match=True):
iterable = unyield(iterable)
if filter1 == None:
for x in iterable:
if test(x):
if include_match:
iterable.unyield(x)
subiter = state2(iterable)
for y in subiter:
yield y
else:
for x in iterable:
if test(x):
if include_match:
iterable.unyield(x)
subiter = state2(iterable)
for y in subiter:
yield y
else:
yield filter1(x)
class readahead():
gen = None
stack = None
def __init__(self, gen):
self.gen = gen
self.stack = []
def __iter__(self):
return self
def __next__(self):
if len(self.stack):
return self.stack.pop()
else:
return next(self.gen)
def readahead(self, count=1):
"""Feeds items from the source iterator into the stack. count
tells how many times to call next. You may preview or tamper with
stuff using the .stack property. StopIteration will fall through
.readahead() if the source iterator ends, so you should be
prepared to catch it if there is any doubt about where you are
in your iterator
"""
for x in range(count):
itm = next(self.gen)
self.stack.insert(0, itm)
def iter(self):
data = reversed(self.stack)
def gen():
for x in data:
yield x
while True:
self.readahead()
yield self.stack[0]
return gen()
class unyield(readahead):
def unyield(self, itm):
self.stack.append(itm)
def combinations2(iter1, iter2, func=lambda a,b: True):
return ((x, y) for y in iter2 for x in iter1 if func(x, y))
def chunk(iter, key=lambda x: x):
last = None
store = None
for itm in iter:
k = key(itm)
if store==None:
store=[]
last = k
elif k==last:
pass
else:
yield store
store=[]
last = k
store.append(itm)
if store:
yield store
def change_indices(iterable, key=lambda x: x):
"""Yields indices of state changes in iterable. Comparisons can be done on
different properties or aspects of the contents of iterable by defining a
different key function."""
started = False
last = None
for i, itm in enumerate(iterable):
k = key(itm)
if started:
if k != last:
yield i
last = k
else:
last = k
started = True
def flatten1(aniter):
for x in aniter:
for y in x:
yield y
def denumerate(finite_iterable):
"""Like enumerate, returns (index, item) pairs, but in reverse order,
starting with the end of the list/highest index and proceeding to the
beginning/lowest"""
data = list(reversed(finite_iterable))
for i, x in zip(list(range(len(data)-1, -1, -1)), data):
yield i, x
def rotations(finite_iterable):
itr = finite_iterable
for x in range(len(itr)):
yield itr[x:] + itr[:x]
def take(iter, quantity):
return [x for i, x in zip(list(range(quantity)), iter)]
|
|
import unittest2 as unittest
import pymongo
import time
import random
import threading
from oplogreplay import OplogReplayer
SOURCE_HOST = '127.0.0.1:27017'
DEST_HOST = '127.0.0.1:27018'
TESTDB = 'testdb'
# Inherit from OplogReplayer to count number of processed_op methodcalls.
class CountingOplogReplayer(OplogReplayer):
count = 0
def process_op(self, ns, raw):
OplogReplayer.process_op(self, ns, raw)
CountingOplogReplayer.count += 1
class TestOplogReplayer(unittest.TestCase):
""" TestCase for the OplogReplayer.
Each test performs the following (see setUp and tearDown for more details):
* delete test databases
* start an OplogReplayer
* perform some actions (inserts, etc.)
* wait for the OplogReplayer to finish replaying ops
* assertions
* stop the OplogReplayer
"""
@classmethod
def setUpClass(cls):
# Create connections to both test databases.
cls.source = pymongo.Connection(SOURCE_HOST)
cls.dest = pymongo.Connection(DEST_HOST)
def _start_replay(self, **kwargs):
# Stop the OplogReplayer before starting a new one.
self._stop_replay()
#if getattr(self, 'oplogreplayer', None):
# self._stop_replay()
# Init & start OplogReplayer, in a separate thread.
self.oplogreplayer = CountingOplogReplayer(
SOURCE_HOST, DEST_HOST, poll_time=0.1, **kwargs)
self.thread = threading.Thread(target=self.oplogreplayer.start)
self.thread.start()
def _stop_replay(self):
# Stop OplogReplayer & join its thread.
if getattr(self, 'oplogreplayer', None):
self.oplogreplayer.stop()
if getattr(self, 'thread', None):
self.thread.join()
# Delete oplogreplayer & thread.
self.oplogreplayer = None
self.thread = None
def setUp(self):
# Drop test databases.
self.source.drop_database(TESTDB)
self.dest.drop_database(TESTDB)
self.dest.drop_database('oplogreplay')
# Sleep a little to allow drop database operations to complete.
time.sleep(0.05)
# Remember Database objects.
self.sourcedb = self.source.testdb
self.destdb = self.dest.testdb
# Stop replay, in case it was still running from a previous test.
self._stop_replay()
# Reset global counter & start OplogReplayer.
CountingOplogReplayer.count = 0
self._start_replay()
def tearDown(self):
self._stop_replay()
def _synchronous_wait(self, target, timeout=3.0):
""" Synchronously wait for the oplogreplay to finish.
Waits until the oplog's retry_count hits target, but at most
timeout seconds.
"""
wait_until = time.time() + timeout
while time.time() < wait_until:
if CountingOplogReplayer.count == target:
return
time.sleep(0.05)
# Synchronously waiting timed out - we should alert this.
raise Exception('retry_count was only %s/%s after a %.2fsec wait' % \
(CountingOplogReplayer.count, target, timeout))
def assertCollectionEqual(self, coll1, coll2):
self.assertEqual(coll1.count(), coll2.count(),
msg='Collections have different count.')
for obj1 in coll1.find():
obj2 = coll2.find_one(obj1)
self.assertEqual(obj1, obj2)
def assertDatabaseEqual(self, db1, db2):
self.assertListEqual(db1.collection_names(), db2.collection_names(),
msg='Databases have different collections.')
for coll in db1.collection_names():
self.assertCollectionEqual(db1[coll], db2[coll])
def test_writes(self):
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 1})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 2})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 3})
self.sourcedb.testcoll.remove({'nr': 3})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 4})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 5})
self.sourcedb.testcoll.insert({'content': '...', 'nr': 6})
self.sourcedb.testcoll.update({'nr': 6}, {'$set': {'content': 'newContent'}})
self.sourcedb.testcoll.update({'nr': 97}, {'$set': {'content': 'newContent'}})
self.sourcedb.testcoll.update({'nr': 8}, {'$set': {'content': 'newContent'}}, upsert=True)
self.sourcedb.testcoll.remove({'nr': 99})
self.sourcedb.testcoll.remove({'nr': 3})
self.sourcedb.testcoll.remove({'nr': 4})
self.sourcedb.testcoll.insert({'content': 'new content', 'nr': 3})
self.sourcedb.testcoll.insert({'content': 'new content', 'nr': 4})
# Removes and updates that don't do anything will not hit the oplog:
self._synchronous_wait(12)
# Test that the 2 test databases are identical.
self.assertDatabaseEqual(self.sourcedb, self.destdb)
def _perform_bulk_inserts(self, nr=100):
for i in xrange(nr):
obj = { 'content': '%s' % random.random(),
'nr': random.randrange(100000) }
self.sourcedb.testcoll.insert(obj)
def test_bulk_inserts(self):
self._perform_bulk_inserts(1000)
self._synchronous_wait(1000)
# Test that the 2 test databases are identical.
self.assertDatabaseEqual(self.sourcedb, self.destdb)
def test_discontinued_replay(self):
self._perform_bulk_inserts(200)
self._stop_replay()
self._perform_bulk_inserts(150)
self._start_replay()
self._perform_bulk_inserts(100)
self._synchronous_wait(450)
# Test that the 2 test databases are identical.
self.assertDatabaseEqual(self.sourcedb, self.destdb)
# Test that no operation was replayed twice.
self.assertEqual(CountingOplogReplayer.count, 450)
def test_index_operations(self):
# Create an index, then test that it was created on destionation.
index = self.sourcedb.testidx.ensure_index('idxfield')
self._synchronous_wait(1)
self.assertIn(index, self.destdb.testidx.index_information())
# Delete the index, and test that it was deleted from destination.
self.sourcedb.testidx.drop_index(index)
self._synchronous_wait(2)
self.assertNotIn(index, self.destdb.testidx.index_information())
def test_replay_indexes(self):
# Create index1 on source + dest.
index1 = self.sourcedb.testidx.ensure_index('idxfield1')
# Restart OplogReplayer, without replaying indexes.
self._start_replay(replay_indexes=False)
# Create index2 on source only.
index2 = self.sourcedb.testidx.ensure_index('idxfield2')
# Delete index1 from source only.
self.sourcedb.testidx.drop_index(index1)
self._synchronous_wait(3)
# Test indexes on source and destination.
source_indexes = self.sourcedb.testidx.index_information()
self.assertNotIn(index1, source_indexes)
self.assertIn(index2, source_indexes)
dest_indexes = self.destdb.testidx.index_information()
self.assertIn(index1, dest_indexes)
self.assertNotIn(index2, dest_indexes)
def test_start_from_ts(self):
self._stop_replay()
# Should not be replayed:
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 1})
# Get last timestamp.
obj = self.source.local.oplog.rs.find().sort('$natural', -1).limit(1)[0]
lastts = obj['ts']
# Should be replayed.
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 1})
self._start_replay(ts=lastts)
self._synchronous_wait(1)
self.assertEqual(self.destdb.testcoll.count(), 1)
|
|
# -*- coding: utf-8 -*-
import mock
from nose.tools import * # noqa
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
from website.addons.mendeley.tests.factories import (
MendeleyAccountFactory,
MendeleyUserSettingsFactory,
ExternalAccountFactory,
)
from website.addons.mendeley.provider import MendeleyCitationsProvider
import datetime
from website.addons.mendeley import model
class MockFolder(object):
@property
def name(self):
return 'somename'
@property
def json(self):
return {'id': 'abc123', 'parent_id': 'cba321'}
class MendeleyProviderTestCase(OsfTestCase):
def setUp(self):
super(MendeleyProviderTestCase, self).setUp()
self.provider = model.Mendeley()
@mock.patch('website.addons.mendeley.model.Mendeley._get_client')
def test_handle_callback(self, mock_get_client):
# Must return provider_id and display_name
mock_client = mock.Mock()
mock_client.profiles.me = mock.Mock(id='testid', display_name='testdisplay')
mock_get_client.return_value = mock_client
res = self.provider.handle_callback('testresponse')
mock_get_client.assert_called_with('testresponse')
assert_equal(res.get('provider_id'), 'testid')
assert_equal(res.get('display_name'), 'testdisplay')
@mock.patch('website.addons.mendeley.model.Mendeley._get_client')
def test_client_not_cached(self, mock_get_client):
# The first call to .client returns a new client
mock_account = mock.Mock()
mock_account.expires_at = datetime.datetime.now()
self.provider.account = mock_account
self.provider.client
mock_get_client.assert_called
assert_true(mock_get_client.called)
@mock.patch('website.addons.mendeley.model.Mendeley._get_client')
def test_client_cached(self, mock_get_client):
# Repeated calls to .client returns the same client
self.provider._client = mock.Mock()
res = self.provider.client
assert_equal(res, self.provider._client)
assert_false(mock_get_client.called)
def test_citation_lists(self):
mock_client = mock.Mock()
mock_folders = [MockFolder()]
mock_list = mock.Mock()
mock_list.items = mock_folders
mock_client.folders.list.return_value = mock_list
self.provider._client = mock_client
mock_account = mock.Mock()
self.provider.account = mock_account
res = self.provider.citation_lists(MendeleyCitationsProvider()._extract_folder)
assert_equal(res[1]['name'], mock_folders[0].name)
assert_equal(res[1]['id'], mock_folders[0].json['id'])
class MendeleyNodeSettingsTestCase(OsfTestCase):
def setUp(self):
super(MendeleyNodeSettingsTestCase, self).setUp()
self.node = ProjectFactory()
self.node_settings = model.MendeleyNodeSettings(owner=self.node)
self.node_settings.save()
self.user = self.node.creator
self.user_settings = self.user.get_or_add_addon('mendeley')
def tearDown(self):
super(MendeleyNodeSettingsTestCase, self).tearDown()
self.user_settings.remove()
self.node_settings.remove()
self.node.remove()
self.user.remove()
@mock.patch('website.addons.mendeley.model.Mendeley')
def test_api_not_cached(self, mock_mendeley):
# The first call to .api returns a new object
api = self.node_settings.api
mock_mendeley.assert_called_once()
assert_equal(api, mock_mendeley())
@mock.patch('website.addons.mendeley.model.Mendeley')
def test_api_cached(self, mock_mendeley):
# Repeated calls to .api returns the same object
self.node_settings._api = 'testapi'
api = self.node_settings.api
assert_false(mock_mendeley.called)
assert_equal(api, 'testapi')
def test_set_auth(self):
external_account = ExternalAccountFactory()
self.user.external_accounts.append(external_account)
self.user.save()
# this should be reset after the call
self.node_settings.mendeley_list_id = 'anything'
self.node_settings.set_auth(
external_account=external_account,
user=self.user
)
# this instance is updated
assert_equal(
self.node_settings.external_account,
external_account
)
assert_equal(
self.node_settings.user_settings,
self.user_settings
)
assert_is_none(
self.node_settings.mendeley_list_id
)
# user_settings was updated
# TODO: The call to grant_oauth_access in set_auth should be mocked
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=external_account,
)
)
def test_set_auth_wrong_user(self):
external_account = ExternalAccountFactory()
self.user.external_accounts.append(external_account)
self.user.save()
with assert_raises(PermissionsError):
self.node_settings.set_auth(
external_account=external_account,
user=UserFactory()
)
def test_clear_auth(self):
self.node_settings.external_account = ExternalAccountFactory()
self.node_settings.mendeley_list_id = 'something'
self.node_settings.user_settings = self.user_settings
self.node_settings.save()
self.node_settings.clear_auth()
assert_is_none(self.node_settings.external_account)
assert_is_none(self.node_settings.mendeley_list_id)
assert_is_none(self.node_settings.user_settings)
def test_set_target_folder(self):
folder_id = 'fake-folder-id'
folder_name = 'fake-folder-name'
external_account = ExternalAccountFactory()
self.user.external_accounts.append(external_account)
self.user.save()
self.node_settings.set_auth(
external_account=external_account,
user=self.user,
)
assert_is_none(self.node_settings.mendeley_list_id)
self.node_settings.set_target_folder(
folder_id,
folder_name,
auth=Auth(user=self.user),
)
# instance was updated
assert_equal(
self.node_settings.mendeley_list_id,
'fake-folder-id',
)
# user_settings was updated
# TODO: the call to grant_oauth_access should be mocked
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=external_account,
metadata={'folder': 'fake-folder-id'}
)
)
log = self.node.logs[-1]
assert_equal(log.action, 'mendeley_folder_selected')
assert_equal(log.params['folder_id'], folder_id)
assert_equal(log.params['folder_name'], folder_name)
def test_has_auth_false(self):
external_account = ExternalAccountFactory()
assert_false(self.node_settings.has_auth)
# both external_account and user_settings must be set to have auth
self.node_settings.external_account = external_account
assert_false(self.node_settings.has_auth)
self.node_settings.external_account = None
self.node_settings.user_settings = self.user_settings
assert_false(self.node_settings.has_auth)
# set_auth must be called to have auth
self.node_settings.external_account = external_account
self.node_settings.user_settings = self.user_settings
assert_false(self.node_settings.has_auth)
def test_has_auth_true(self):
external_account = ExternalAccountFactory()
self.user.external_accounts.append(external_account)
self.node_settings.set_auth(external_account, self.user)
# mendeley_list_id should have no effect
self.node_settings.mendeley_list_id = None
assert_true(self.node_settings.has_auth)
# mendeley_list_id should have no effect
self.node_settings.mendeley_list_id = 'totally fake ID'
assert_true(self.node_settings.has_auth)
def test_selected_folder_name_root(self):
self.node_settings.mendeley_list_id = 'ROOT'
assert_equal(
self.node_settings.selected_folder_name,
"All Documents"
)
def test_selected_folder_name_empty(self):
self.node_settings.mendeley_list_id = None
assert_equal(
self.node_settings.selected_folder_name,
''
)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_selected_folder_name(self, mock_folder_metadata):
# Mock the return from api call to get the folder's name
mock_folder = mock.Mock()
mock_folder.name = 'Fake Folder'
# Add the mocked return object to the mocked api client
mock_folder_metadata.return_value = mock_folder
self.node_settings.mendeley_list_id = 'fake-list-id'
assert_equal(
self.node_settings.selected_folder_name,
'Fake Folder'
)
# TODO: Make these tests generic and move to core
@mock.patch('framework.status.push_status_message')
def test_remove_contributor_authorizer(self, mock_push_status):
external_account = ExternalAccountFactory()
self.user.external_accounts.append(external_account)
self.node_settings.set_auth(external_account, self.user)
contributor = UserFactory()
self.node.add_contributor(contributor, permissions=['read', 'write', 'admin'])
self.node.remove_contributor(self.node.creator, auth=Auth(user=contributor))
assert_false(self.node_settings.has_auth)
assert_false(self.user_settings.verify_oauth_access(self.node, external_account))
def test_remove_contributor_not_authorizer(self):
external_account = ExternalAccountFactory()
self.user.external_accounts.append(external_account)
self.node_settings.set_auth(external_account, self.user)
contributor = UserFactory()
self.node.add_contributor(contributor)
self.node.remove_contributor(contributor, auth=Auth(user=self.node.creator))
assert_true(self.node_settings.has_auth)
assert_true(self.user_settings.verify_oauth_access(self.node, external_account))
@mock.patch('framework.status.push_status_message')
def test_fork_by_authorizer(self, mock_push_status):
external_account = ExternalAccountFactory()
self.user.external_accounts.append(external_account)
self.node_settings.set_auth(external_account, self.user)
fork = self.node.fork_node(auth=Auth(user=self.node.creator))
assert_true(fork.get_addon('mendeley').has_auth)
assert_true(self.user_settings.verify_oauth_access(fork, external_account))
@mock.patch('framework.status.push_status_message')
def test_fork_not_by_authorizer(self, mock_push_status):
external_account = ExternalAccountFactory()
self.user.external_accounts.append(external_account)
self.node_settings.set_auth(external_account, self.user)
contributor = UserFactory()
self.node.add_contributor(contributor)
fork = self.node.fork_node(auth=Auth(user=contributor))
assert_false(fork.get_addon('mendeley').has_auth)
assert_false(self.user_settings.verify_oauth_access(fork, external_account))
class MendeleyUserSettingsTestCase(OsfTestCase):
def _prep_oauth_case(self):
self.node = ProjectFactory()
self.user = self.node.creator
self.external_account = ExternalAccountFactory()
self.user.external_accounts.append(self.external_account)
self.user.save()
self.user_settings = self.user.get_or_add_addon('mendeley')
def test_grant_oauth_access_no_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
self.user_settings.save()
assert_equal(
self.user_settings.oauth_grants,
{self.node._id: {self.external_account._id: {}}},
)
def test_grant_oauth_access_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
assert_equal(
self.user_settings.oauth_grants,
{
self.node._id: {
self.external_account._id: {'folder': 'fake_folder_id'}
},
}
)
def test_verify_oauth_access_no_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
self.user_settings.save()
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account
)
)
assert_false(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=ExternalAccountFactory()
)
)
def test_verify_oauth_access_metadata(self):
self._prep_oauth_case()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
self.user_settings.save()
assert_true(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'fake_folder_id'}
)
)
assert_false(
self.user_settings.verify_oauth_access(
node=self.node,
external_account=self.external_account,
metadata={'folder': 'another_folder_id'}
)
)
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stim
import pytest
def test_identity():
p = stim.PauliString(3)
assert len(p) == 3
assert p[0] == p[1] == p[2] == 0
assert p.sign == +1
def test_from_str():
p = stim.PauliString("-_XYZ_ZYX")
assert len(p) == 8
assert p[0] == 0
assert p[1] == 1
assert p[2] == 2
assert p[3] == 3
assert p[4] == 0
assert p[5] == 3
assert p[6] == 2
assert p[7] == 1
assert p.sign == -1
p = stim.PauliString("")
assert len(p) == 0
assert p.sign == +1
p = stim.PauliString("X")
assert len(p) == 1
assert p[0] == 1
assert p.sign == +1
p = stim.PauliString("+X")
assert len(p) == 1
assert p[0] == 1
assert p.sign == +1
p = stim.PauliString("iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == 1j
p = stim.PauliString("+iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == 1j
p = stim.PauliString("-iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == -1j
def test_equality():
assert not (stim.PauliString(4) == None)
assert not (stim.PauliString(4) == "other object")
assert not (stim.PauliString(4) == object())
assert stim.PauliString(4) != None
assert stim.PauliString(4) != "other object"
assert stim.PauliString(4) != object()
assert stim.PauliString(4) == stim.PauliString(4)
assert stim.PauliString(3) != stim.PauliString(4)
assert not (stim.PauliString(4) != stim.PauliString(4))
assert not (stim.PauliString(3) == stim.PauliString(4))
assert stim.PauliString("+X") == stim.PauliString("+X")
assert stim.PauliString("+X") != stim.PauliString("-X")
assert stim.PauliString("+X") != stim.PauliString("+Y")
assert stim.PauliString("+X") != stim.PauliString("-Y")
assert stim.PauliString("+X") != stim.PauliString("+iX")
assert stim.PauliString("+X") != stim.PauliString("-iX")
assert stim.PauliString("__") != stim.PauliString("_X")
assert stim.PauliString("__") != stim.PauliString("X_")
assert stim.PauliString("__") != stim.PauliString("XX")
assert stim.PauliString("__") == stim.PauliString("__")
def test_random():
p1 = stim.PauliString.random(100)
p2 = stim.PauliString.random(100)
assert p1 != p2
seen_signs = {stim.PauliString.random(1).sign for _ in range(200)}
assert seen_signs == {1, -1}
seen_signs = {stim.PauliString.random(1, allow_imaginary=True).sign for _ in range(200)}
assert seen_signs == {1, -1, 1j, -1j}
def test_str():
assert str(stim.PauliString(3)) == "+___"
assert str(stim.PauliString("XYZ")) == "+XYZ"
assert str(stim.PauliString("-XYZ")) == "-XYZ"
assert str(stim.PauliString("iXYZ")) == "+iXYZ"
assert str(stim.PauliString("-iXYZ")) == "-iXYZ"
def test_repr():
assert repr(stim.PauliString(3)) == 'stim.PauliString("+___")'
assert repr(stim.PauliString("-XYZ")) == 'stim.PauliString("-XYZ")'
vs = [
stim.PauliString(""),
stim.PauliString("ZXYZZ"),
stim.PauliString("-XYZ"),
stim.PauliString("I"),
stim.PauliString("iIXYZ"),
stim.PauliString("-iIXYZ"),
]
for v in vs:
r = repr(v)
assert eval(r, {'stim': stim}) == v
def test_commutes():
def c(a: str, b: str) -> bool:
return stim.PauliString(a).commutes(stim.PauliString(b))
assert c("", "")
assert c("X", "_")
assert c("X", "X")
assert not c("X", "Y")
assert not c("X", "Z")
assert c("XXXX", "YYYY")
assert c("XXXX", "YYYZ")
assert not c("XXXX", "XXXZ")
assert not c("XXXX", "___Z")
assert not c("XXXX", "Z___")
assert c("XXXX", "Z_Z_")
def test_product():
assert stim.PauliString("") * stim.PauliString("") == stim.PauliString("")
assert stim.PauliString("i") * stim.PauliString("i") == stim.PauliString("-")
assert stim.PauliString("i") * stim.PauliString("-i") == stim.PauliString("+")
assert stim.PauliString("-i") * stim.PauliString("-i") == stim.PauliString("-")
assert stim.PauliString("i") * stim.PauliString("-") == stim.PauliString("-i")
x = stim.PauliString("X")
y = stim.PauliString("Y")
z = stim.PauliString("Z")
assert x == +1 * x == x * +1 == +x
assert x * -1 == -x == -1 * x
assert (-x)[0] == 1
assert (-x).sign == -1
assert -(-x) == x
assert stim.PauliString(10) * stim.PauliString(11) == stim.PauliString(11)
assert x * z == stim.PauliString("-iY")
assert x * x == stim.PauliString(1)
assert x * y == stim.PauliString("iZ")
assert y * x == stim.PauliString("-iZ")
assert x * y == 1j * z
assert y * x == z * -1j
assert x.extended_product(y) == (1, 1j * z)
assert y.extended_product(x) == (1, -1j * z)
assert x.extended_product(x) == (1, stim.PauliString(1))
xx = stim.PauliString("+XX")
yy = stim.PauliString("+YY")
zz = stim.PauliString("+ZZ")
assert xx * zz == -yy
assert xx.extended_product(zz) == (1, -yy)
def test_inplace_product():
p = stim.PauliString("X")
alias = p
p *= 1j
assert alias == stim.PauliString("iX")
assert alias is p
p *= 1j
assert alias == stim.PauliString("-X")
p *= 1j
assert alias == stim.PauliString("-iX")
p *= 1j
assert alias == stim.PauliString("+X")
p *= stim.PauliString("Z")
assert alias == stim.PauliString("-iY")
p *= -1j
assert alias == stim.PauliString("-Y")
p *= -1j
assert alias == stim.PauliString("iY")
p *= -1j
assert alias == stim.PauliString("+Y")
p *= -1j
assert alias == stim.PauliString("-iY")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("+Y")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("iY")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("-Y")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("-iY")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("-Y")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("iY")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("+Y")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("-iY")
assert alias is p
def test_imaginary_phase():
p = stim.PauliString("IXYZ")
ip = stim.PauliString("iIXYZ")
assert 1j * p == p * 1j == ip == -stim.PauliString("-iIXYZ")
assert p.sign == 1
assert (-p).sign == -1
assert ip.sign == 1j
assert (-ip).sign == -1j
assert stim.PauliString("X") * stim.PauliString("Y") == 1j * stim.PauliString("Z")
assert stim.PauliString("Y") * stim.PauliString("X") == -1j * stim.PauliString("Z")
def test_get_set_sign():
p = stim.PauliString(2)
assert p.sign == +1
p.sign = -1
assert str(p) == "-__"
assert p.sign == -1
p.sign = +1
assert str(p) == "+__"
assert p.sign == +1
with pytest.raises(ValueError, match="new_sign"):
p.sign = 5
p.sign = 1j
assert str(p) == "+i__"
assert p.sign == 1j
p.sign = -1j
assert str(p) == "-i__"
assert p.sign == -1j
def test_get_set_item():
p = stim.PauliString(5)
assert list(p) == [0, 0, 0, 0, 0]
assert p[0] == 0
p[0] = 1
assert p[0] == 1
p[0] = 'Y'
assert p[0] == 2
p[0] = 'Z'
assert p[0] == 3
with pytest.raises(IndexError, match="new_pauli"):
p[0] = 't'
with pytest.raises(IndexError, match="new_pauli"):
p[0] = 10
assert p[1] == 0
p[1] = 2
assert p[1] == 2
def test_get_slice():
p = stim.PauliString("XXXX__YYYY__ZZZZX")
assert p[:7] == stim.PauliString("XXXX__Y")
assert p[:-3] == stim.PauliString("XXXX__YYYY__ZZ")
assert p[::2] == stim.PauliString("XX_YY_ZZX")
assert p[::-1] == stim.PauliString("XZZZZ__YYYY__XXXX")
assert p[-3:3] == stim.PauliString("")
assert p[-6:-1] == stim.PauliString("_ZZZZ")
assert p[3:5:-1] == stim.PauliString("")
assert p[5:3:-1] == stim.PauliString("__")
assert p[4:2:-1] == stim.PauliString("_X")
assert p[2:0:-1] == stim.PauliString("XX")
def test_copy():
p = stim.PauliString(3)
p2 = p.copy()
assert p == p2
assert p is not p2
p = stim.PauliString("-i_XYZ")
p2 = p.copy()
assert p == p2
assert p is not p2
def test_hash():
# stim.PauliString is mutable. It must not also be value-hashable.
# Defining __hash__ requires defining a FrozenPauliString variant instead.
with pytest.raises(TypeError, match="unhashable"):
_ = hash(stim.PauliString(1))
def test_add():
ps = stim.PauliString
assert ps(0) + ps(0) == ps(0)
assert ps(3) + ps(1000) == ps(1003)
assert ps(1000) + ps(3) == ps(1003)
assert ps("_XYZ") + ps("_ZZZ_") == ps("_XYZ_ZZZ_")
p = ps("_XYZ")
p += p
assert p == ps("_XYZ_XYZ")
for k in range(1, 8):
p += p
assert p == ps("_XYZ_XYZ" * 2**k)
p = ps("_XXX")
p += ps("Y")
assert p == ps("_XXXY")
p = ps("")
alias = p
p += ps("X")
assert alias is p
assert alias == ps("X")
p += p
assert alias is p
assert alias == ps("XX")
def test_mul_different_sizes():
ps = stim.PauliString
assert ps("") * ps("X" * 1000) == ps("X" * 1000)
assert ps("X" * 1000) * ps("") == ps("X" * 1000)
assert ps("Z" * 1000) * ps("") == ps("Z" * 1000)
p = ps("Z")
alias = p
p *= ps("ZZZ")
assert p == ps("_ZZ")
p *= ps("Z")
assert p == ps("ZZZ")
assert alias is p
def test_div():
assert stim.PauliString("+XYZ") / +1 == stim.PauliString("+XYZ")
assert stim.PauliString("+XYZ") / -1 == stim.PauliString("-XYZ")
assert stim.PauliString("+XYZ") / 1j == stim.PauliString("-iXYZ")
assert stim.PauliString("+XYZ") / -1j == stim.PauliString("iXYZ")
assert stim.PauliString("iXYZ") / 1j == stim.PauliString("XYZ")
p = stim.PauliString("__")
alias = p
assert p / -1 == stim.PauliString("-__")
assert alias == stim.PauliString("__")
p /= -1
assert alias == stim.PauliString("-__")
p /= 1j
assert alias == stim.PauliString("i__")
p /= 1j
assert alias == stim.PauliString("__")
p /= -1j
assert alias == stim.PauliString("i__")
p /= 1
assert alias == stim.PauliString("i__")
def test_mul_repeat():
ps = stim.PauliString
assert ps("") * 100 == ps("")
assert ps("X") * 100 == ps("X" * 100)
assert ps("XYZ_") * 1000 == ps("XYZ_" * 1000)
assert ps("XYZ_") * 1 == ps("XYZ_")
assert ps("XYZ_") * 0 == ps("")
assert 100 * ps("") == ps("")
assert 100 * ps("X") == ps("X" * 100)
assert 1000 * ps("XYZ_") == ps("XYZ_" * 1000)
assert 1 * ps("XYZ_") == ps("XYZ_")
assert 0 * ps("XYZ_") == ps("")
assert ps("i") * 0 == ps("+")
assert ps("i") * 1 == ps("i")
assert ps("i") * 2 == ps("-")
assert ps("i") * 3 == ps("-i")
assert ps("i") * 4 == ps("+")
assert ps("i") * 5 == ps("i")
assert ps("-i") * 0 == ps("+")
assert ps("-i") * 1 == ps("-i")
assert ps("-i") * 2 == ps("-")
assert ps("-i") * 3 == ps("i")
assert ps("-i") * 4 == ps("+")
assert ps("-i") * 5 == ps("-i")
assert ps("-") * 0 == ps("+")
assert ps("-") * 1 == ps("-")
assert ps("-") * 2 == ps("+")
assert ps("-") * 3 == ps("-")
assert ps("-") * 4 == ps("+")
assert ps("-") * 5 == ps("-")
p = ps("XYZ")
alias = p
p *= 1000
assert p == ps("XYZ" * 1000)
assert alias is p
def test_init_list():
assert stim.PauliString([]) == stim.PauliString(0)
assert stim.PauliString([0, 1, 2, 3]) == stim.PauliString("_XYZ")
with pytest.raises(ValueError, match="pauli"):
_ = stim.PauliString([-1])
with pytest.raises(ValueError, match="pauli"):
_ = stim.PauliString([4])
with pytest.raises(TypeError):
_ = stim.PauliString([2**500])
def test_init_copy():
p = stim.PauliString("_XYZ")
p2 = stim.PauliString(p)
assert p is not p2
assert p == p2
p = stim.PauliString("-i_XYZ")
p2 = stim.PauliString(p)
assert p is not p2
assert p == p2
def test_commutes_different_lengths():
x1000 = stim.PauliString("X" * 1000)
z1000 = stim.PauliString("Z" * 1000)
x1 = stim.PauliString("X")
z1 = stim.PauliString("Z")
assert x1.commutes(x1000)
assert x1000.commutes(x1)
assert z1.commutes(z1000)
assert z1000.commutes(z1)
assert not z1.commutes(x1000)
assert not x1000.commutes(z1)
assert not x1.commutes(z1000)
assert not z1000.commutes(x1)
def test_pickle():
import pickle
t = stim.PauliString.random(4)
a = pickle.dumps(t)
assert pickle.loads(a) == t
t = stim.PauliString("i_XYZ")
a = pickle.dumps(t)
assert pickle.loads(a) == t
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras model-to-estimator using tf.distribute.Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow_estimator.python.estimator import keras_lib
from tensorflow_estimator.python.estimator import run_config as run_config_lib
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
def simple_sequential_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = tf.keras.layers.Input(shape=_INPUT_SIZE)
b = tf.keras.layers.Dense(16, activation='relu')(a)
b = tf.keras.layers.Dropout(0.1)(b)
b = tf.keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = tf.keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = tf.keras.layers.Input(shape=(16,), name='input_a')
input_b = tf.keras.layers.Input(shape=(16,), name='input_b')
input_m = tf.keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = tf.keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = tf.keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = tf.keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = tf.keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = tf.keras.layers.Dense(3, activation='softmax', name='dense_2')(
merged)
output_d = tf.keras.layers.Dense(2, activation='softmax', name='dense_3')(
merged)
model = tf.keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = tf.keras.utils.to_categorical(y_train)
dataset = tf.compat.v1.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = tf.keras.utils.to_categorical(y_test)
dataset = tf.compat.v1.data.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = tf.keras.utils.to_categorical(c_train)
c_test = tf.keras.utils.to_categorical(c_test)
d_train = tf.keras.utils.to_categorical(d_train)
d_test = tf.keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
class TestEstimatorDistributionStrategy(tf.test.TestCase,
parameterized.TestCase):
def setUp(self):
super(TestEstimatorDistributionStrategy, self).setUp()
strategy_combinations.set_virtual_cpus_to_at_least(3)
self._base_dir = os.path.join(self.get_temp_dir(),
'keras_to_estimator_strategy_test')
tf.compat.v1.gfile.MakeDirs(self._base_dir)
self._config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED, model_dir=self._base_dir)
def tearDown(self):
super(TestEstimatorDistributionStrategy, self).tearDown()
tf.compat.v1.summary.FileWriterCache.clear()
if os.path.isdir(self._base_dir):
tf.compat.v1.gfile.DeleteRecursively(self._base_dir)
@tf.compat.v2.__internal__.distribute.combinations.generate(
tf.compat.v2.__internal__.test.combinations.combine(
distribution=[
tf.compat.v2.__internal__.distribute.combinations.mirrored_strategy_with_cpu_1_and_2,
],
mode=['graph'],
cloning=[True, False]))
def test_train_functional_with_distribution_strategy(self, distribution,
cloning):
keras_model = simple_functional_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[tf.keras.metrics.CategoricalAccuracy()],
optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.01),
cloning=cloning)
config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=distribution,
eval_distribute=distribution)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
tf.compat.v1.summary.FileWriterCache.clear()
tf.compat.v1.gfile.DeleteRecursively(self._config.model_dir)
@tf.compat.v2.__internal__.distribute.combinations.generate(
tf.compat.v2.__internal__.test.combinations.combine(
distribution=[
tf.compat.v2.__internal__.distribute.combinations.mirrored_strategy_with_cpu_1_and_2,
],
mode=['graph'],
cloning=[True, False]))
def test_train_sequential_with_distribution_strategy(self, distribution,
cloning):
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[tf.keras.metrics.CategoricalAccuracy()],
optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.01),
cloning=cloning)
config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=distribution)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
tf.compat.v1.summary.FileWriterCache.clear()
tf.compat.v1.gfile.DeleteRecursively(self._config.model_dir)
@tf.compat.v2.__internal__.distribute.combinations.generate(
tf.compat.v2.__internal__.test.combinations.combine(
distribution=[
tf.compat.v2.__internal__.distribute.combinations.mirrored_strategy_with_cpu_1_and_2,
],
mode=['graph']))
def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self, distribution):
train_data, test_data = get_multi_inputs_multi_outputs_data()
def train_input_fn():
input_dict = {
'input_a': train_data['input_a'],
'input_b': train_data['input_b'],
'input_m': train_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': train_data['output_c'],
'dense_3': train_data['output_d']
}
return tf.compat.v1.data.Dataset.from_tensor_slices(
(input_dict, output_dict)).batch(16)
def eval_input_fn():
input_dict = {
'input_a': test_data['input_a'],
'input_b': test_data['input_b'],
'input_m': test_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': test_data['output_c'],
'dense_3': test_data['output_d']
}
return tf.compat.v1.data.Dataset.from_tensor_slices(
(input_dict, output_dict)).batch(16)
self.do_test_multi_inputs_multi_outputs_with_input_fn(
distribution, train_input_fn, eval_input_fn)
def do_test_multi_inputs_multi_outputs_with_input_fn(self, distribution,
train_input_fn,
eval_input_fn):
config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=distribution)
with self.cached_session():
model = multi_inputs_multi_outputs_model()
est_keras = keras_lib.model_to_estimator(keras_model=model, config=config)
baseline_eval_results = est_keras.evaluate(
input_fn=eval_input_fn, steps=1)
est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
def get_test_data(train_samples,
test_samples,
input_shape,
num_classes,
random_seed=None):
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
return ((x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]))
if __name__ == '__main__':
test.main()
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
from datetime import datetime
from .scenario_tests import AbstractPreparer, SingleValueReplacer
from .base import LiveScenarioTest
from .exceptions import CliTestError
from .reverse_dependency import get_dummy_cli
from .utilities import StorageAccountKeyReplacer, GraphClientPasswordReplacer
KEY_RESOURCE_GROUP = 'rg'
KEY_VIRTUAL_NETWORK = 'vnet'
KEY_VNET_NIC = 'nic'
# This preparer's traffic is not recorded.
# As a result when tests are run in record mode, sdk calls cannot be made to return the prepared resource group.
# Rather the deterministic prepared resource's information should be returned.
class NoTrafficRecordingPreparer(AbstractPreparer):
from .base import execute as _raw_execute
def __init__(self, *args, **kwargs):
super(NoTrafficRecordingPreparer, self).__init__(disable_recording=True, *args, **kwargs)
def live_only_execute(self, cli_ctx, command, expect_failure=False):
# call AbstractPreparer.moniker to make resource counts and self.resource_moniker consistent between live and
# play-back. see SingleValueReplacer.process_request, AbstractPreparer.__call__._preparer_wrapper
# and ScenarioTest.create_random_name. This is so that when self.create_random_name is called for the
# first time during live or playback, it would have the same value.
_ = self.moniker
try:
if self.test_class_instance.in_recording:
return self._raw_execute(cli_ctx, command, expect_failure)
except AttributeError:
# A test might not have an in_recording attribute. Run live if this is an instance of LiveScenarioTest
if isinstance(self.test_class_instance, LiveScenarioTest):
return self._raw_execute(cli_ctx, command, expect_failure)
return None
# Resource Group Preparer and its shorthand decorator
class ResourceGroupPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
def __init__(self, name_prefix='clitest.rg',
parameter_name='resource_group',
parameter_name_for_location='resource_group_location', location='westus',
dev_setting_name='AZURE_CLI_TEST_DEV_RESOURCE_GROUP_NAME',
dev_setting_location='AZURE_CLI_TEST_DEV_RESOURCE_GROUP_LOCATION',
random_name_length=75, key='rg', subscription=None, additional_tags=None):
if ' ' in name_prefix:
raise CliTestError('Error: Space character in resource group name prefix \'%s\'' % name_prefix)
super(ResourceGroupPreparer, self).__init__(name_prefix, random_name_length)
self.cli_ctx = get_dummy_cli()
self.location = location
self.subscription = subscription
self.parameter_name = parameter_name
self.parameter_name_for_location = parameter_name_for_location
self.key = key
self.additional_tags = additional_tags
self.dev_setting_name = os.environ.get(dev_setting_name, None)
self.dev_setting_location = os.environ.get(dev_setting_location, location)
def create_resource(self, name, **kwargs):
if self.dev_setting_name:
self.test_class_instance.kwargs[self.key] = self.dev_setting_name
return {self.parameter_name: self.dev_setting_name,
self.parameter_name_for_location: self.dev_setting_location}
tags = {'product': 'azurecli', 'cause': 'automation', 'date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')}
if 'ENV_JOB_NAME' in os.environ:
tags['job'] = os.environ['ENV_JOB_NAME']
tags = ' '.join(['{}={}'.format(key, value) for key, value in tags.items()])
if self.additional_tags is not None:
tags = tags.join(['{}={}'.format(key, value) for key, value in self.additional_tags.items()])
template = 'az group create --location {} --name {} --tag ' + tags
if self.subscription:
template += ' --subscription {} '.format(self.subscription)
self.live_only_execute(self.cli_ctx, template.format(self.location, name))
self.test_class_instance.kwargs[self.key] = name
return {self.parameter_name: name, self.parameter_name_for_location: self.location}
def remove_resource(self, name, **kwargs):
# delete group if test is being recorded and if the group is not a dev rg
if not self.dev_setting_name:
template = 'az group delete --name {} --yes --no-wait '
if self.subscription:
template += ' --subscription {} '.format(self.subscription)
self.live_only_execute(self.cli_ctx, template.format(name))
# Storage Account Preparer and its shorthand decorator
# pylint: disable=too-many-instance-attributes
class StorageAccountPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
def __init__(self, name_prefix='clitest', sku='Standard_LRS', location='westus', kind='Storage', hns=False, length=24,
parameter_name='storage_account', resource_group_parameter_name='resource_group', skip_delete=True,
dev_setting_name='AZURE_CLI_TEST_DEV_STORAGE_ACCOUNT_NAME', key='sa'):
super(StorageAccountPreparer, self).__init__(name_prefix, length)
self.cli_ctx = get_dummy_cli()
self.location = location
self.sku = sku
self.kind = kind
self.hns = hns
self.resource_group_parameter_name = resource_group_parameter_name
self.skip_delete = skip_delete
self.parameter_name = parameter_name
self.key = key
self.dev_setting_name = os.environ.get(dev_setting_name, None)
def create_resource(self, name, **kwargs):
group = self._get_resource_group(**kwargs)
if not self.dev_setting_name:
template = 'az storage account create -n {} -g {} -l {} --sku {} --kind {} --https-only '
if self.hns:
template += '--hns'
self.live_only_execute(self.cli_ctx, template.format(
name, group, self.location, self.sku, self.kind, self.hns))
else:
name = self.dev_setting_name
try:
account_key = self.live_only_execute(self.cli_ctx,
'storage account keys list -n {} -g {} --query "[0].value" -otsv'
.format(name, group)).output
except AttributeError: # live only execute returns None if playing from record
account_key = None
self.test_class_instance.kwargs[self.key] = name
return {self.parameter_name: name,
self.parameter_name + '_info': (name, account_key or StorageAccountKeyReplacer.KEY_REPLACEMENT)}
def remove_resource(self, name, **kwargs):
if not self.skip_delete and not self.dev_setting_name:
group = self._get_resource_group(**kwargs)
self.live_only_execute(self.cli_ctx, 'az storage account delete -n {} -g {} --yes'.format(name, group))
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a storage account a resource group is required. Please add ' \
'decorator @{} in front of this storage account preparer.'
raise CliTestError(template.format(ResourceGroupPreparer.__name__))
# KeyVault Preparer and its shorthand decorator
# pylint: disable=too-many-instance-attributes
class KeyVaultPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
def __init__(self, name_prefix='clitest', sku='standard', location='westus', enable_soft_delete=True,
parameter_name='key_vault', resource_group_parameter_name='resource_group', skip_delete=False,
dev_setting_name='AZURE_CLI_TEST_DEV_KEY_VAULT_NAME', key='kv', name_len=24, additional_params=None):
super(KeyVaultPreparer, self).__init__(name_prefix, name_len)
self.cli_ctx = get_dummy_cli()
self.location = location
self.sku = sku
self.enable_soft_delete = enable_soft_delete
self.resource_group_parameter_name = resource_group_parameter_name
self.skip_delete = skip_delete
self.parameter_name = parameter_name
self.key = key
self.additional_params = additional_params
self.dev_setting_name = os.environ.get(dev_setting_name, None)
def create_resource(self, name, **kwargs):
if not self.dev_setting_name:
group = self._get_resource_group(**kwargs)
template = 'az keyvault create -n {} -g {} -l {} --sku {} '
if self.enable_soft_delete:
template += '--enable-soft-delete --retention-days 7 '
if self.additional_params:
template += self.additional_params
self.live_only_execute(self.cli_ctx, template.format(name, group, self.location, self.sku))
self.test_class_instance.kwargs[self.key] = name
return {self.parameter_name: name}
self.test_class_instance.kwargs[self.key] = self.dev_setting_name
return {self.parameter_name: self.dev_setting_name}
def remove_resource(self, name, **kwargs):
if not self.skip_delete and not self.dev_setting_name:
group = self._get_resource_group(**kwargs)
self.live_only_execute(self.cli_ctx, 'az keyvault delete -n {} -g {}'.format(name, group))
if self.enable_soft_delete:
from azure.core.exceptions import HttpResponseError
try:
self.live_only_execute(self.cli_ctx, 'az keyvault purge -n {} -l {}'.format(name, self.location))
except HttpResponseError:
# purge operation will fail with HttpResponseError when --enable-purge-protection
pass
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a KeyVault a resource group is required. Please add ' \
'decorator @{} in front of this KeyVault preparer.'
raise CliTestError(template.format(KeyVaultPreparer.__name__))
# Role based access control service principal preparer
# pylint: disable=too-many-instance-attributes
class RoleBasedServicePrincipalPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
def __init__(self, name_prefix='clitest',
skip_assignment=True, parameter_name='sp_name', parameter_password='sp_password',
dev_setting_sp_name='AZURE_CLI_TEST_DEV_SP_NAME',
dev_setting_sp_password='AZURE_CLI_TEST_DEV_SP_PASSWORD', key='sp'):
super(RoleBasedServicePrincipalPreparer, self).__init__(name_prefix, 24)
self.cli_ctx = get_dummy_cli()
self.skip_assignment = skip_assignment
self.result = {}
self.parameter_name = parameter_name
self.parameter_password = parameter_password
self.dev_setting_sp_name = os.environ.get(dev_setting_sp_name, None)
self.dev_setting_sp_password = os.environ.get(dev_setting_sp_password, None)
self.key = key
def create_resource(self, name, **kwargs):
if not self.dev_setting_sp_name:
command = 'az ad sp create-for-rbac -n {}{}' \
.format(name, ' --skip-assignment' if self.skip_assignment else '')
try:
self.result = self.live_only_execute(self.cli_ctx, command).get_output_in_json()
except AttributeError: # live only execute returns None if playing from record
pass
self.test_class_instance.kwargs[self.key] = name
self.test_class_instance.kwargs['{}_pass'.format(self.key)] = self.parameter_password
return {self.parameter_name: name,
self.parameter_password: self.result.get('password') or GraphClientPasswordReplacer.PWD_REPLACEMENT}
self.test_class_instance.kwargs[self.key] = self.dev_setting_sp_name
self.test_class_instance.kwargs['{}_pass'.format(self.key)] = self.dev_setting_sp_password
return {self.parameter_name: self.dev_setting_sp_name,
self.parameter_password: self.dev_setting_sp_password}
def remove_resource(self, name, **kwargs):
if not self.dev_setting_sp_name:
self.live_only_execute(self.cli_ctx, 'az ad sp delete --id {}'.format(self.result.get('appId')))
# Managed Application preparer
# pylint: disable=too-many-instance-attributes
class ManagedApplicationPreparer(AbstractPreparer, SingleValueReplacer):
from .base import execute
def __init__(self, name_prefix='clitest', parameter_name='aad_client_app_id',
parameter_secret='aad_client_app_secret', app_name='app_name',
dev_setting_app_name='AZURE_CLI_TEST_DEV_APP_NAME',
dev_setting_app_secret='AZURE_CLI_TEST_DEV_APP_SECRET', key='app'):
super(ManagedApplicationPreparer, self).__init__(name_prefix, 24)
self.cli_ctx = get_dummy_cli()
self.parameter_name = parameter_name
self.parameter_secret = parameter_secret
self.result = {}
self.app_name = app_name
self.dev_setting_app_name = os.environ.get(dev_setting_app_name, None)
self.dev_setting_app_secret = os.environ.get(dev_setting_app_secret, None)
self.key = key
def create_resource(self, name, **kwargs):
if not self.dev_setting_app_name:
template = 'az ad app create --display-name {} --key-type Password --password {} --identifier-uris ' \
'http://{}'
self.result = self.execute(self.cli_ctx, template.format(name, name, name)).get_output_in_json()
self.test_class_instance.kwargs[self.key] = name
return {self.parameter_name: self.result['appId'], self.parameter_secret: name}
self.test_class_instance.kwargs[self.key] = name
return {self.parameter_name: self.dev_setting_app_name,
self.parameter_secret: self.dev_setting_app_secret}
def remove_resource(self, name, **kwargs):
if not self.dev_setting_app_name:
self.execute(self.cli_ctx, 'az ad app delete --id {}'.format(self.result['appId']))
# pylint: disable=too-many-instance-attributes
class VirtualNetworkPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
def __init__(self, name_prefix='clitest.vn', location='westus',
parameter_name='virtual_network',
resource_group_parameter_name='resource_group',
resource_group_key=KEY_RESOURCE_GROUP,
dev_setting_name='AZURE_CLI_TEST_DEV_VIRTUAL_NETWORK_NAME',
random_name_length=24, key=KEY_VIRTUAL_NETWORK):
if ' ' in name_prefix:
raise CliTestError(
'Error: Space character in name prefix \'%s\'' % name_prefix)
super(VirtualNetworkPreparer, self).__init__(
name_prefix, random_name_length)
self.cli_ctx = get_dummy_cli()
self.location = location
self.parameter_name = parameter_name
self.key = key
self.resource_group_parameter_name = resource_group_parameter_name
self.resource_group_key = resource_group_key
self.dev_setting_name = os.environ.get(dev_setting_name, None)
def create_resource(self, name, **kwargs):
if self.dev_setting_name:
self.test_class_instance.kwargs[self.key] = name
return {self.parameter_name: self.dev_setting_name, }
tags = {'product': 'azurecli', 'cause': 'automation',
'date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')}
if 'ENV_JOB_NAME' in os.environ:
tags['job'] = os.environ['ENV_JOB_NAME']
tags = ' '.join(['{}={}'.format(key, value)
for key, value in tags.items()])
template = 'az network vnet create --resource-group {} --location {} --name {} --subnet-name default --tag ' + tags
self.live_only_execute(self.cli_ctx, template.format(self._get_resource_group(**kwargs), self.location, name))
self.test_class_instance.kwargs[self.key] = name
return {self.parameter_name: name}
def remove_resource(self, name, **kwargs):
if not self.dev_setting_name:
from msrestazure.azure_exceptions import CloudError
try:
self.live_only_execute(
self.cli_ctx,
'az network vnet delete --name {} --resource-group {}'.format(name, self._get_resource_group(**kwargs)))
except CloudError:
# deletion of vnet may fail as service could create subresources like IPConfig. We could rely on the deletion of resource group to delete the vnet.
pass
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a VirtualNetwork a resource group is required. Please add ' \
'decorator @{} in front of this VirtualNetwork preparer.'
raise CliTestError(template.format(VirtualNetworkPreparer.__name__))
# pylint: disable=too-many-instance-attributes
class VnetNicPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
def __init__(self, name_prefix='clitest.nic',
parameter_name='subnet',
resource_group_parameter_name=KEY_RESOURCE_GROUP,
vnet_parameter_name=KEY_VIRTUAL_NETWORK,
dev_setting_name='AZURE_CLI_TEST_DEV_VNET_NIC_NAME',
key=KEY_VNET_NIC):
if ' ' in name_prefix:
raise CliTestError(
'Error: Space character in name prefix \'%s\'' % name_prefix)
super(VnetNicPreparer, self).__init__(name_prefix, 15)
self.cli_ctx = get_dummy_cli()
self.parameter_name = parameter_name
self.key = key
self.resource_group_parameter_name = resource_group_parameter_name
self.vnet_parameter_name = vnet_parameter_name
self.dev_setting_name = os.environ.get(dev_setting_name, None)
def create_resource(self, name, **kwargs):
if self.dev_setting_name:
self.test_class_instance.kwargs[self.key] = name
return {self.parameter_name: self.dev_setting_name, }
template = 'az network nic create --resource-group {} --name {} --vnet-name {} --subnet default '
self.live_only_execute(self.cli_ctx, template.format(
self._get_resource_group(**kwargs), name, self._get_virtual_network(**kwargs)))
self.test_class_instance.kwargs[self.key] = name
return {self.parameter_name: name}
def remove_resource(self, name, **kwargs):
if not self.dev_setting_name:
self.live_only_execute(
self.cli_ctx,
'az network nic delete --name {} --resource-group {}'.format(name, self._get_resource_group(**kwargs)))
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a VirtualNetworkNic a resource group is required. Please add ' \
'decorator @{} in front of this VirtualNetworkNic preparer.'
raise CliTestError(template.format(VnetNicPreparer.__name__))
def _get_virtual_network(self, **kwargs):
try:
return kwargs.get(self.vnet_parameter_name)
except KeyError:
template = 'To create a VirtualNetworkNic a virtual network is required. Please add ' \
'decorator @{} in front of this VirtualNetworkNic preparer.'
raise CliTestError(template.format(VnetNicPreparer.__name__))
# Utility
def is_preparer_func(fn):
return getattr(fn, '__is_preparer', False)
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Module files as torch.nn.Module subclasses for Seq2seqAgent.
"""
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
import torch.nn.functional as F
from parlai.utils.torch import neginf
from parlai.core.torch_generator_agent import TorchGeneratorModel
def _transpose_hidden_state(hidden_state):
"""
Transpose the hidden state so that batch is the first dimension.
RNN modules produce (num_layers x batchsize x dim) hidden state, but DataParallel
expects batch size to be first. This helper is used to ensure that we're always
outputting batch-first, in case DataParallel tries to stitch things back together.
"""
if isinstance(hidden_state, tuple):
return tuple(map(_transpose_hidden_state, hidden_state))
elif torch.is_tensor(hidden_state):
return hidden_state.transpose(0, 1)
else:
raise ValueError("Don't know how to transpose {}".format(hidden_state))
def opt_to_kwargs(opt):
"""
Get kwargs for seq2seq from opt.
"""
kwargs = {}
for k in [
'numlayers',
'dropout',
'bidirectional',
'rnn_class',
'lookuptable',
'decoder',
'numsoftmax',
'attention',
'attention_length',
'attention_time',
'input_dropout',
]:
if k in opt:
kwargs[k] = opt[k]
return kwargs
class Seq2seq(TorchGeneratorModel):
"""
Sequence to sequence parent module.
"""
RNN_OPTS = {'rnn': nn.RNN, 'gru': nn.GRU, 'lstm': nn.LSTM}
def __init__(
self,
num_features,
embeddingsize,
hiddensize,
numlayers=2,
dropout=0,
bidirectional=False,
rnn_class='lstm',
lookuptable='unique',
decoder='same',
numsoftmax=1,
attention='none',
attention_length=48,
attention_time='post',
padding_idx=0,
start_idx=1,
end_idx=2,
unknown_idx=3,
input_dropout=0,
longest_label=1,
):
"""
Initialize seq2seq model.
See cmdline args in Seq2seqAgent for description of arguments.
"""
super().__init__(
padding_idx=padding_idx,
start_idx=start_idx,
end_idx=end_idx,
unknown_idx=unknown_idx,
input_dropout=input_dropout,
longest_label=longest_label,
)
self.attn_type = attention
rnn_class = Seq2seq.RNN_OPTS[rnn_class]
self.decoder = RNNDecoder(
num_features,
embeddingsize,
hiddensize,
padding_idx=padding_idx,
rnn_class=rnn_class,
numlayers=numlayers,
dropout=dropout,
attn_type=attention,
attn_length=attention_length,
attn_time=attention_time,
bidir_input=bidirectional,
)
shared_lt = (
self.decoder.lt # share embeddings between rnns
if lookuptable in ('enc_dec', 'all')
else None
)
shared_rnn = self.decoder.rnn if decoder == 'shared' else None
self.encoder = RNNEncoder(
num_features,
embeddingsize,
hiddensize,
padding_idx=padding_idx,
rnn_class=rnn_class,
numlayers=numlayers,
dropout=dropout,
bidirectional=bidirectional,
shared_lt=shared_lt,
shared_rnn=shared_rnn,
unknown_idx=unknown_idx,
input_dropout=input_dropout,
)
shared_weight = (
self.decoder.lt # use embeddings for projection
if lookuptable in ('dec_out', 'all')
else None
)
self.output = OutputLayer(
num_features,
embeddingsize,
hiddensize,
dropout=dropout,
numsoftmax=numsoftmax,
shared_weight=shared_weight,
padding_idx=padding_idx,
)
def reorder_encoder_states(self, encoder_states, indices):
"""
Reorder encoder states according to a new set of indices.
"""
enc_out, hidden, attn_mask = encoder_states
# make sure we swap the hidden state around, apropos multigpu settings
hidden = _transpose_hidden_state(hidden)
# LSTM or GRU/RNN hidden state?
if isinstance(hidden, torch.Tensor):
hid, cell = hidden, None
else:
hid, cell = hidden
if not torch.is_tensor(indices):
# cast indices to a tensor if needed
indices = torch.LongTensor(indices).to(hid.device)
hid = hid.index_select(1, indices)
if cell is None:
hidden = hid
else:
cell = cell.index_select(1, indices)
hidden = (hid, cell)
if self.attn_type != 'none':
enc_out = enc_out.index_select(0, indices)
attn_mask = attn_mask.index_select(0, indices)
# and bring it back to multigpu friendliness
hidden = _transpose_hidden_state(hidden)
return enc_out, hidden, attn_mask
def reorder_decoder_incremental_state(self, incremental_state, inds):
if torch.is_tensor(incremental_state):
# gru or vanilla rnn
return torch.index_select(incremental_state, 0, inds).contiguous()
elif isinstance(incremental_state, tuple):
return tuple(
self.reorder_decoder_incremental_state(x, inds)
for x in incremental_state
)
class UnknownDropout(nn.Module):
"""
With set frequency, replaces tokens with unknown token.
This layer can be used right before an embedding layer to make the model more robust
to unknown words at test time.
"""
def __init__(self, unknown_idx, probability):
"""
Initialize layer.
:param unknown_idx: index of unknown token, replace tokens with this
:param probability: during training, replaces tokens with unknown token
at this rate.
"""
super().__init__()
self.unknown_idx = unknown_idx
self.prob = probability
def forward(self, input):
"""
If training and dropout rate > 0, masks input with unknown token.
"""
if self.training and self.prob > 0:
mask = input.new(input.size()).float().uniform_(0, 1) < self.prob
input.masked_fill_(mask, self.unknown_idx)
return input
class RNNEncoder(nn.Module):
"""
RNN Encoder.
"""
def __init__(
self,
num_features,
embeddingsize,
hiddensize,
padding_idx=0,
rnn_class='lstm',
numlayers=2,
dropout=0.1,
bidirectional=False,
shared_lt=None,
shared_rnn=None,
input_dropout=0,
unknown_idx=None,
sparse=False,
):
"""
Initialize recurrent encoder.
"""
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.layers = numlayers
self.dirs = 2 if bidirectional else 1
self.hsz = hiddensize
if input_dropout > 0 and unknown_idx is None:
raise RuntimeError('input_dropout > 0 but unknown_idx not set')
self.input_dropout = UnknownDropout(unknown_idx, input_dropout)
if shared_lt is None:
self.lt = nn.Embedding(
num_features, embeddingsize, padding_idx=padding_idx, sparse=sparse
)
else:
self.lt = shared_lt
if shared_rnn is None:
self.rnn = rnn_class(
embeddingsize,
hiddensize,
numlayers,
dropout=dropout if numlayers > 1 else 0,
batch_first=True,
bidirectional=bidirectional,
)
elif bidirectional:
raise RuntimeError('Cannot share decoder with bidir encoder.')
else:
self.rnn = shared_rnn
def forward(self, xs):
"""
Encode sequence.
:param xs: (bsz x seqlen) LongTensor of input token indices
:returns: encoder outputs, hidden state, attention mask
encoder outputs are the output state at each step of the encoding.
the hidden state is the final hidden state of the encoder.
the attention mask is a mask of which input values are nonzero.
"""
bsz = len(xs)
# embed input tokens
xs = self.input_dropout(xs)
xes = self.dropout(self.lt(xs))
attn_mask = xs.ne(0)
try:
x_lens = torch.sum(attn_mask.int(), dim=1).cpu()
xes = pack_padded_sequence(xes, x_lens, batch_first=True)
packed = True
except ValueError:
# packing failed, don't pack then
packed = False
encoder_output, hidden = self.rnn(xes)
if packed:
# total_length to make sure we give the proper length in the case
# of multigpu settings.
# https://pytorch.org/docs/stable/notes/faq.html#pack-rnn-unpack-with-data-parallelism
encoder_output, _ = pad_packed_sequence(
encoder_output, batch_first=True, total_length=xs.size(1)
)
if self.dirs > 1:
# project to decoder dimension by taking sum of forward and back
if isinstance(self.rnn, nn.LSTM):
hidden = (
hidden[0].view(-1, self.dirs, bsz, self.hsz).sum(1),
hidden[1].view(-1, self.dirs, bsz, self.hsz).sum(1),
)
else:
hidden = hidden.view(-1, self.dirs, bsz, self.hsz).sum(1)
return encoder_output, _transpose_hidden_state(hidden), attn_mask
class RNNDecoder(nn.Module):
"""
Recurrent decoder module.
Can be used as a standalone language model or paired with an encoder.
"""
def __init__(
self,
num_features,
embeddingsize,
hiddensize,
padding_idx=0,
rnn_class='lstm',
numlayers=2,
dropout=0.1,
bidir_input=False,
attn_type='none',
attn_time='pre',
attn_length=-1,
sparse=False,
):
"""
Initialize recurrent decoder.
"""
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.layers = numlayers
self.hsz = hiddensize
self.esz = embeddingsize
self.lt = nn.Embedding(
num_features, embeddingsize, padding_idx=padding_idx, sparse=sparse
)
self.rnn = rnn_class(
embeddingsize,
hiddensize,
numlayers,
dropout=dropout if numlayers > 1 else 0,
batch_first=True,
)
self.attn_type = attn_type
self.attn_time = attn_time
self.attention = AttentionLayer(
attn_type=attn_type,
hiddensize=hiddensize,
embeddingsize=embeddingsize,
bidirectional=bidir_input,
attn_length=attn_length,
attn_time=attn_time,
)
def forward(self, xs, encoder_output, incremental_state=None):
"""
Decode from input tokens.
:param xs: (bsz x seqlen) LongTensor of input token indices
:param encoder_output: output from RNNEncoder. Tuple containing
(enc_out, enc_hidden, attn_mask) tuple.
:param incremental_state: most recent hidden state to the decoder.
If None, the hidden state of the encoder is used as initial state,
and the full sequence is computed. If not None, computes only the
next forward in the sequence.
:returns: (output, hidden_state) pair from the RNN.
- output is a bsz x time x latentdim matrix. If incremental_state is
given, the time dimension will be 1. This value must be passed to
the model's OutputLayer for a final softmax.
- hidden_state depends on the choice of RNN
"""
enc_state, enc_hidden, attn_mask = encoder_output
# in case of multi gpu, we need to transpose back out the hidden state
attn_params = (enc_state, attn_mask)
if incremental_state is not None:
# we're doing it piece by piece, so we have a more important hidden
# seed, and we only need to compute for the final timestep
hidden = _transpose_hidden_state(incremental_state)
# only need the last timestep then
xs = xs[:, -1:]
else:
# starting fresh, or generating from scratch. Use the encoder hidden
# state as our start state
hidden = _transpose_hidden_state(enc_hidden)
if isinstance(hidden, tuple):
hidden = tuple(x.contiguous() for x in hidden)
else:
hidden = hidden.contiguous()
# sequence indices => sequence embeddings
seqlen = xs.size(1)
xes = self.dropout(self.lt(xs))
if self.attn_time == 'pre':
# modify input vectors with attention
# attention module requires we do this one step at a time
new_xes = []
for i in range(seqlen):
nx, _ = self.attention(xes[:, i : i + 1], hidden, attn_params)
new_xes.append(nx)
xes = torch.cat(new_xes, 1).to(xes.device)
if self.attn_time != 'post':
# no attn, we can just trust the rnn to run through
output, new_hidden = self.rnn(xes, hidden)
else:
# uh oh, post attn, we need run through one at a time, and do the
# attention modifications
new_hidden = hidden
output = []
for i in range(seqlen):
o, new_hidden = self.rnn(xes[:, i, :].unsqueeze(1), new_hidden)
o, _ = self.attention(o, new_hidden, attn_params)
output.append(o)
output = torch.cat(output, dim=1).to(xes.device)
return output, _transpose_hidden_state(new_hidden)
class Identity(nn.Module):
def forward(self, x):
return x
class OutputLayer(nn.Module):
"""
Takes in final states and returns distribution over candidates.
"""
def __init__(
self,
num_features,
embeddingsize,
hiddensize,
dropout=0,
numsoftmax=1,
shared_weight=None,
padding_idx=-1,
):
"""
Initialize output layer.
:param num_features: number of candidates to rank
:param hiddensize: (last) dimension of the input vectors
:param embeddingsize: (last) dimension of the candidate vectors
:param numsoftmax: (default 1) number of softmaxes to calculate.
see arxiv.org/abs/1711.03953 for more info.
increasing this slows down computation but can
add more expressivity to the embeddings.
:param shared_weight: (num_features x esz) vector of weights to use as
the final linear layer's weight matrix. default
None starts with a new linear layer.
:param padding_idx: model should output a large negative number for
score at this index. if set to -1 (default),
this is disabled. if >= 0, subtracts one from
num_features and always outputs -1e20 at this
index. only used when shared_weight is not None.
setting this param helps protect gradient from
entering shared embedding matrices.
"""
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.padding_idx = padding_idx
rng = 1.0 / math.sqrt(num_features)
self.bias = Parameter(torch.Tensor(num_features).uniform_(-rng, rng))
# embedding to scores
if shared_weight is None:
# just a regular linear layer
self.shared = False
self.weight = Parameter(
torch.Tensor(num_features, embeddingsize).normal_(0, 1)
)
else:
# use shared weights and a bias layer instead
self.shared = True
self.weight = shared_weight.weight
self.numsoftmax = numsoftmax
if numsoftmax > 1:
self.esz = embeddingsize
self.softmax = nn.Softmax(dim=1)
self.prior = nn.Linear(hiddensize, numsoftmax, bias=False)
self.latent = nn.Linear(hiddensize, numsoftmax * embeddingsize)
self.activation = nn.Tanh()
else:
# rnn output to embedding
if hiddensize != embeddingsize:
# learn projection to correct dimensions
self.o2e = nn.Linear(hiddensize, embeddingsize, bias=True)
else:
# no need for any transformation here
self.o2e = Identity()
def forward(self, input):
"""
Compute scores from inputs.
:param input: (bsz x seq_len x num_directions * hiddensize) tensor of
states, e.g. the output states of an RNN
:returns: (bsz x seqlen x num_cands) scores for each candidate
"""
# next compute scores over dictionary
if self.numsoftmax > 1:
bsz = input.size(0)
seqlen = input.size(1) if input.dim() > 1 else 1
# first compute different softmax scores based on input vec
# hsz => numsoftmax * esz
latent = self.latent(input)
active = self.dropout(self.activation(latent))
# esz => num_features
logit = F.linear(active.view(-1, self.esz), self.weight, self.bias)
# calculate priors: distribution over which softmax scores to use
# hsz => numsoftmax
prior_logit = self.prior(input).view(-1, self.numsoftmax)
# softmax over numsoftmax's
prior = self.softmax(prior_logit)
# now combine priors with logits
prob = self.softmax(logit).view(bsz * seqlen, self.numsoftmax, -1)
probs = (prob * prior.unsqueeze(2)).sum(1).view(bsz, seqlen, -1)
scores = probs.log()
else:
# hsz => esz, good time for dropout
e = self.dropout(self.o2e(input))
# esz => num_features
scores = F.linear(e, self.weight, self.bias)
if self.padding_idx >= 0:
scores[:, :, self.padding_idx] = neginf(scores.dtype)
return scores
class AttentionLayer(nn.Module):
"""
Computes attention between hidden and encoder states.
See arxiv.org/abs/1508.04025 for more info on each attention type.
"""
def __init__(
self,
attn_type,
hiddensize,
embeddingsize,
bidirectional=False,
attn_length=-1,
attn_time='pre',
):
"""
Initialize attention layer.
"""
super().__init__()
self.attention = attn_type
if self.attention != 'none':
hsz = hiddensize
hszXdirs = hsz * (2 if bidirectional else 1)
if attn_time == 'pre':
# attention happens on the input embeddings
input_dim = embeddingsize
elif attn_time == 'post':
# attention happens on the output of the rnn
input_dim = hsz
else:
raise RuntimeError('unsupported attention time')
# linear layer for combining applied attention weights with input
self.attn_combine = nn.Linear(hszXdirs + input_dim, input_dim, bias=False)
if self.attention == 'local':
# local attention over fixed set of output states
if attn_length < 0:
raise RuntimeError('Set attention length to > 0.')
self.max_length = attn_length
# combines input and previous hidden output layer
self.attn = nn.Linear(hsz + input_dim, attn_length, bias=False)
# combines attention weights with encoder outputs
elif self.attention == 'concat':
self.attn = nn.Linear(hsz + hszXdirs, hsz, bias=False)
self.attn_v = nn.Linear(hsz, 1, bias=False)
elif self.attention == 'general':
# equivalent to dot if attn is identity
self.attn = nn.Linear(hsz, hszXdirs, bias=False)
def forward(self, xes, hidden, attn_params):
"""
Compute attention over attn_params given input and hidden states.
:param xes: input state. will be combined with applied
attention.
:param hidden: hidden state from model. will be used to select
states to attend to in from the attn_params.
:param attn_params: tuple of encoder output states and a mask showing
which input indices are nonzero.
:returns: output, attn_weights
output is a new state of same size as input state `xes`.
attn_weights are the weights given to each state in the
encoder outputs.
"""
if self.attention == 'none':
# do nothing, no attention
return xes, None
if type(hidden) == tuple:
# for lstms use the "hidden" state not the cell state
hidden = hidden[0]
last_hidden = hidden[-1] # select hidden state from last RNN layer
enc_out, attn_mask = attn_params
bsz, seqlen, hszXnumdir = enc_out.size()
numlayersXnumdir = last_hidden.size(1)
if self.attention == 'local':
# local attention weights aren't based on encoder states
h_merged = torch.cat((xes.squeeze(1), last_hidden), 1)
attn_weights = F.softmax(self.attn(h_merged), dim=1)
# adjust state sizes to the fixed window size
if seqlen > self.max_length:
offset = seqlen - self.max_length
enc_out = enc_out.narrow(1, offset, self.max_length)
seqlen = self.max_length
if attn_weights.size(1) > seqlen:
attn_weights = attn_weights.narrow(1, 0, seqlen)
else:
hid = last_hidden.unsqueeze(1)
if self.attention == 'concat':
# concat hidden state and encoder outputs
hid = hid.expand(bsz, seqlen, numlayersXnumdir)
h_merged = torch.cat((enc_out, hid), 2)
# then do linear combination of them with activation
active = F.tanh(self.attn(h_merged))
attn_w_premask = self.attn_v(active).squeeze(2)
elif self.attention == 'dot':
# dot product between hidden and encoder outputs
if numlayersXnumdir != hszXnumdir:
# enc_out has two directions, so double hid
hid = torch.cat([hid, hid], 2)
enc_t = enc_out.transpose(1, 2)
attn_w_premask = torch.bmm(hid, enc_t).squeeze(1)
elif self.attention == 'general':
# before doing dot product, transform hidden state with linear
# same as dot if linear is identity
hid = self.attn(hid)
enc_t = enc_out.transpose(1, 2)
attn_w_premask = torch.bmm(hid, enc_t).squeeze(1)
# calculate activation scores, apply mask if needed
if attn_mask is not None:
# remove activation from NULL symbols
attn_w_premask.masked_fill_((~attn_mask), neginf(attn_w_premask.dtype))
attn_weights = F.softmax(attn_w_premask, dim=1)
# apply the attention weights to the encoder states
attn_applied = torch.bmm(attn_weights.unsqueeze(1), enc_out)
# concatenate the input and encoder states
merged = torch.cat((xes.squeeze(1), attn_applied.squeeze(1)), 1)
# combine them with a linear layer and tanh activation
output = torch.tanh(self.attn_combine(merged).unsqueeze(1))
return output, attn_weights
|
|
import functools
import torch.nn as nn
import torch.nn.functional as F
from ptsemseg.models.utils import get_upsampling_weight
from ptsemseg.loss import cross_entropy2d
# FCN32s
class fcn32s(nn.Module):
def __init__(self, n_classes=21, learned_billinear=False):
super(fcn32s, self).__init__()
self.learned_billinear = learned_billinear
self.n_classes = n_classes
self.loss = functools.partial(cross_entropy2d, size_average=False)
self.conv_block1 = nn.Sequential(
nn.Conv2d(3, 64, 3, padding=100),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block2 = nn.Sequential(
nn.Conv2d(64, 128, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block3 = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block4 = nn.Sequential(
nn.Conv2d(256, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block5 = nn.Sequential(
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.classifier = nn.Sequential(
nn.Conv2d(512, 4096, 7),
nn.ReLU(inplace=True),
nn.Dropout2d(),
nn.Conv2d(4096, 4096, 1),
nn.ReLU(inplace=True),
nn.Dropout2d(),
nn.Conv2d(4096, self.n_classes, 1),
)
if self.learned_billinear:
raise NotImplementedError
def forward(self, x):
conv1 = self.conv_block1(x)
conv2 = self.conv_block2(conv1)
conv3 = self.conv_block3(conv2)
conv4 = self.conv_block4(conv3)
conv5 = self.conv_block5(conv4)
score = self.classifier(conv5)
out = F.upsample(score, x.size()[2:])
return out
def init_vgg16_params(self, vgg16, copy_fc8=True):
blocks = [
self.conv_block1,
self.conv_block2,
self.conv_block3,
self.conv_block4,
self.conv_block5,
]
ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]
features = list(vgg16.features.children())
for idx, conv_block in enumerate(blocks):
for l1, l2 in zip(features[ranges[idx][0] : ranges[idx][1]], conv_block):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
l2.weight.data = l1.weight.data
l2.bias.data = l1.bias.data
for i1, i2 in zip([0, 3], [0, 3]):
l1 = vgg16.classifier[i1]
l2 = self.classifier[i2]
l2.weight.data = l1.weight.data.view(l2.weight.size())
l2.bias.data = l1.bias.data.view(l2.bias.size())
n_class = self.classifier[6].weight.size()[0]
if copy_fc8:
l1 = vgg16.classifier[6]
l2 = self.classifier[6]
l2.weight.data = l1.weight.data[:n_class, :].view(l2.weight.size())
l2.bias.data = l1.bias.data[:n_class]
class fcn16s(nn.Module):
def __init__(self, n_classes=21, learned_billinear=False):
super(fcn16s, self).__init__()
self.learned_billinear = learned_billinear
self.n_classes = n_classes
self.loss = functools.partial(cross_entropy2d, size_average=False)
self.conv_block1 = nn.Sequential(
nn.Conv2d(3, 64, 3, padding=100),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block2 = nn.Sequential(
nn.Conv2d(64, 128, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block3 = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block4 = nn.Sequential(
nn.Conv2d(256, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block5 = nn.Sequential(
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.classifier = nn.Sequential(
nn.Conv2d(512, 4096, 7),
nn.ReLU(inplace=True),
nn.Dropout2d(),
nn.Conv2d(4096, 4096, 1),
nn.ReLU(inplace=True),
nn.Dropout2d(),
nn.Conv2d(4096, self.n_classes, 1),
)
self.score_pool4 = nn.Conv2d(512, self.n_classes, 1)
# TODO: Add support for learned upsampling
if self.learned_billinear:
raise NotImplementedError
def forward(self, x):
conv1 = self.conv_block1(x)
conv2 = self.conv_block2(conv1)
conv3 = self.conv_block3(conv2)
conv4 = self.conv_block4(conv3)
conv5 = self.conv_block5(conv4)
score = self.classifier(conv5)
score_pool4 = self.score_pool4(conv4)
score = F.upsample(score, score_pool4.size()[2:])
score += score_pool4
out = F.upsample(score, x.size()[2:])
return out
def init_vgg16_params(self, vgg16, copy_fc8=True):
blocks = [
self.conv_block1,
self.conv_block2,
self.conv_block3,
self.conv_block4,
self.conv_block5,
]
ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]
features = list(vgg16.features.children())
for idx, conv_block in enumerate(blocks):
for l1, l2 in zip(features[ranges[idx][0] : ranges[idx][1]], conv_block):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
# print(idx, l1, l2)
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
l2.weight.data = l1.weight.data
l2.bias.data = l1.bias.data
for i1, i2 in zip([0, 3], [0, 3]):
l1 = vgg16.classifier[i1]
l2 = self.classifier[i2]
l2.weight.data = l1.weight.data.view(l2.weight.size())
l2.bias.data = l1.bias.data.view(l2.bias.size())
n_class = self.classifier[6].weight.size()[0]
if copy_fc8:
l1 = vgg16.classifier[6]
l2 = self.classifier[6]
l2.weight.data = l1.weight.data[:n_class, :].view(l2.weight.size())
l2.bias.data = l1.bias.data[:n_class]
# FCN 8s
class fcn8s(nn.Module):
def __init__(self, n_classes=21, learned_billinear=True):
super(fcn8s, self).__init__()
self.learned_billinear = learned_billinear
self.n_classes = n_classes
self.loss = functools.partial(cross_entropy2d, size_average=False)
self.conv_block1 = nn.Sequential(
nn.Conv2d(3, 64, 3, padding=100),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block2 = nn.Sequential(
nn.Conv2d(64, 128, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block3 = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block4 = nn.Sequential(
nn.Conv2d(256, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block5 = nn.Sequential(
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.classifier = nn.Sequential(
nn.Conv2d(512, 4096, 7),
nn.ReLU(inplace=True),
nn.Dropout2d(),
nn.Conv2d(4096, 4096, 1),
nn.ReLU(inplace=True),
nn.Dropout2d(),
nn.Conv2d(4096, self.n_classes, 1),
)
self.score_pool4 = nn.Conv2d(512, self.n_classes, 1)
self.score_pool3 = nn.Conv2d(256, self.n_classes, 1)
if self.learned_billinear:
self.upscore2 = nn.ConvTranspose2d(
self.n_classes, self.n_classes, 4, stride=2, bias=False
)
self.upscore4 = nn.ConvTranspose2d(
self.n_classes, self.n_classes, 4, stride=2, bias=False
)
self.upscore8 = nn.ConvTranspose2d(
self.n_classes, self.n_classes, 16, stride=8, bias=False
)
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
m.weight.data.copy_(
get_upsampling_weight(m.in_channels, m.out_channels, m.kernel_size[0])
)
def forward(self, x):
conv1 = self.conv_block1(x)
conv2 = self.conv_block2(conv1)
conv3 = self.conv_block3(conv2)
conv4 = self.conv_block4(conv3)
conv5 = self.conv_block5(conv4)
score = self.classifier(conv5)
if self.learned_billinear:
upscore2 = self.upscore2(score)
score_pool4c = self.score_pool4(conv4)[
:, :, 5 : 5 + upscore2.size()[2], 5 : 5 + upscore2.size()[3]
]
upscore_pool4 = self.upscore4(upscore2 + score_pool4c)
score_pool3c = self.score_pool3(conv3)[
:, :, 9 : 9 + upscore_pool4.size()[2], 9 : 9 + upscore_pool4.size()[3]
]
out = self.upscore8(score_pool3c + upscore_pool4)[
:, :, 31 : 31 + x.size()[2], 31 : 31 + x.size()[3]
]
return out.contiguous()
else:
score_pool4 = self.score_pool4(conv4)
score_pool3 = self.score_pool3(conv3)
score = F.upsample(score, score_pool4.size()[2:])
score += score_pool4
score = F.upsample(score, score_pool3.size()[2:])
score += score_pool3
out = F.upsample(score, x.size()[2:])
return out
def init_vgg16_params(self, vgg16, copy_fc8=True):
blocks = [
self.conv_block1,
self.conv_block2,
self.conv_block3,
self.conv_block4,
self.conv_block5,
]
ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]
features = list(vgg16.features.children())
for idx, conv_block in enumerate(blocks):
for l1, l2 in zip(features[ranges[idx][0] : ranges[idx][1]], conv_block):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
l2.weight.data = l1.weight.data
l2.bias.data = l1.bias.data
for i1, i2 in zip([0, 3], [0, 3]):
l1 = vgg16.classifier[i1]
l2 = self.classifier[i2]
l2.weight.data = l1.weight.data.view(l2.weight.size())
l2.bias.data = l1.bias.data.view(l2.bias.size())
n_class = self.classifier[6].weight.size()[0]
if copy_fc8:
l1 = vgg16.classifier[6]
l2 = self.classifier[6]
l2.weight.data = l1.weight.data[:n_class, :].view(l2.weight.size())
l2.bias.data = l1.bias.data[:n_class]
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import iso8601
from lxml import etree
from oslo_config import cfg
from oslo_serialization import jsonutils
import webob
from cinder.api import extensions
from cinder.api.v1 import router
from cinder.api import xmlutil
from cinder import test
NS = "{http://docs.openstack.org/common/api/v1.0}"
CONF = cfg.CONF
class ExtensionTestCase(test.TestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
ext_list = CONF.osapi_volume_extension[:]
fox = ('cinder.tests.unit.api.extensions.foxinsocks.Foxinsocks')
if fox not in ext_list:
ext_list.append(fox)
self.flags(osapi_volume_extension=ext_list)
class ExtensionControllerTest(ExtensionTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.ext_list = ["TypesManage", "TypesExtraSpecs", ]
self.ext_list.sort()
def test_list_extensions_json(self):
app = router.APIRouter()
request = webob.Request.blank("/fake/extensions")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
# Make sure we have all the extensions, extra extensions being OK.
data = jsonutils.loads(response.body)
names = [str(x['name']) for x in data['extensions']
if str(x['name']) in self.ext_list]
names.sort()
self.assertEqual(names, self.ext_list)
# Ensure all the timestamps are valid according to iso8601
for ext in data['extensions']:
iso8601.parse_date(ext['updated'])
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [
x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
self.assertEqual(
fox_ext, {'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
'name': 'Fox In Socks',
'updated': '2011-01-22T13:25:27-06:00',
'description': 'The Fox In Socks Extension.',
'alias': 'FOXNSOX',
'links': []}, )
for ext in data['extensions']:
url = '/fake/extensions/%s' % ext['alias']
request = webob.Request.blank(url)
response = request.get_response(app)
output = jsonutils.loads(response.body)
self.assertEqual(output['extension']['alias'], ext['alias'])
def test_get_extension_json(self):
app = router.APIRouter()
request = webob.Request.blank("/fake/extensions/FOXNSOX")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
data = jsonutils.loads(response.body)
self.assertEqual(
data['extension'],
{"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
"name": "Fox In Socks",
"updated": "2011-01-22T13:25:27-06:00",
"description": "The Fox In Socks Extension.",
"alias": "FOXNSOX",
"links": []})
def test_get_non_existing_extension_json(self):
app = router.APIRouter()
request = webob.Request.blank("/fake/extensions/4")
response = request.get_response(app)
self.assertEqual(404, response.status_int)
def test_list_extensions_xml(self):
app = router.APIRouter()
request = webob.Request.blank("/fake/extensions")
request.accept = "application/xml"
response = request.get_response(app)
self.assertEqual(200, response.status_int)
root = etree.XML(response.body)
self.assertEqual(root.tag.split('extensions')[0], NS)
# Make sure we have all the extensions, extras extensions being OK.
exts = root.findall('{0}extension'.format(NS))
self.assertGreaterEqual(len(exts), len(self.ext_list))
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
self.assertEqual(
fox_ext.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
self.assertEqual(
fox_ext.findtext('{0}description'.format(NS)),
'The Fox In Socks Extension.')
xmlutil.validate_schema(root, 'extensions')
def test_get_extension_xml(self):
app = router.APIRouter()
request = webob.Request.blank("/fake/extensions/FOXNSOX")
request.accept = "application/xml"
response = request.get_response(app)
self.assertEqual(200, response.status_int)
xml = response.body
root = etree.XML(xml)
self.assertEqual(root.tag.split('extension')[0], NS)
self.assertEqual(root.get('alias'), 'FOXNSOX')
self.assertEqual(root.get('name'), 'Fox In Socks')
self.assertEqual(
root.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
self.assertEqual(
root.findtext('{0}description'.format(NS)),
'The Fox In Socks Extension.')
xmlutil.validate_schema(root, 'extension')
class StubExtensionManager(object):
"""Provides access to Tweedle Beetles."""
name = "Tweedle Beetle Extension"
alias = "TWDLBETL"
def __init__(self, resource_ext=None, action_ext=None, request_ext=None,
controller_ext=None):
self.resource_ext = resource_ext
self.controller_ext = controller_ext
self.extra_resource_ext = None
def get_resources(self):
resource_exts = []
if self.resource_ext:
resource_exts.append(self.resource_ext)
if self.extra_resource_ext:
resource_exts.append(self.extra_resource_ext)
return resource_exts
def get_controller_extensions(self):
controller_extensions = []
if self.controller_ext:
controller_extensions.append(self.controller_ext)
return controller_extensions
class ExtensionControllerIdFormatTest(test.TestCase):
def _bounce_id(self, test_id):
class BounceController(object):
def show(self, req, id):
return id
res_ext = extensions.ResourceExtension('bounce',
BounceController())
manager = StubExtensionManager(res_ext)
app = router.APIRouter(manager)
request = webob.Request.blank("/fake/bounce/%s" % test_id)
response = request.get_response(app)
return response.body
def test_id_with_xml_format(self):
result = self._bounce_id('foo.xml')
self.assertEqual(result, 'foo')
def test_id_with_json_format(self):
result = self._bounce_id('foo.json')
self.assertEqual(result, 'foo')
def test_id_with_bad_format(self):
result = self._bounce_id('foo.bad')
self.assertEqual(result, 'foo.bad')
|
|
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# pylint: skip-file
from builtins import zip
import numpy as np
import pprint
import pytest
from neon import NervanaObject
from neon.backends.autodiff import Autodiff
class CustomFunc(object):
@staticmethod
def sig(x):
return 1. / (1. + np.exp(-x))
@staticmethod
def sig2(x):
return 1. / (1. + np.exp2(-x))
@staticmethod
def tanh2(x):
return (np.exp2(2.0 * x) - 1.0) / (np.exp2(2.0 * x) + 1.0)
@staticmethod
def argmax(x, axis=1, keepdims=True):
"""
calls numpy argmax with keepdims
"""
new_shape = list(x.shape)
new_shape[axis] = 1
new_shape = tuple(new_shape)
return np.argmax(x, axis=axis).reshape(new_shape)
@staticmethod
def argmin(x, axis=1, keepdims=True):
"""
calls numpy argmin with keepdims
"""
new_shape = list(x.shape)
new_shape[axis] = 1
new_shape = tuple(new_shape)
return np.argmin(x, axis=axis).reshape(new_shape)
@pytest.mark.usefixtures("backend_default")
class TestAutodiff(object):
def setup(self):
self.m = 2 # row
self.n = 2 # column
self.be = NervanaObject.be
self.dtype = self.be.default_dtype
self.test_epoch = 1
self.delta = 1e-5 # for numerical gradient
def _rand_gen(self, *flags):
'''
flags: 'int', 'pos', 'scalar', 'row', 'col'
'''
val = None
# dimension
m = self.m
n = self.n
if 'scalar' in flags:
m = 1.
n = 1.
if 'row' in flags:
m = 1.
if 'col' in flags:
n = 1.
# integer
if 'int' in flags:
if 'pos' in flags:
val = np.random.randint(5., size=(m, n)).astype(float) + 1.
else:
val = np.random.randint(5., size=(m, n)).astype(float) - 2.
else:
if 'pos' in flags:
val = np.absolute(np.random.randn(m, n)) + 0.1
else:
val = np.random.randn(m, n)
# cap it to avoid blowing up
val[val > 5.0] = 5.0
val[val < -5.0] = -5.0
return val
@staticmethod
def _numpy_call(f_str, tensors):
'''
evaluate function f from f_str and tensros
f_str: name of varialbe in the form of x0 - xn
tensors: numpy tensor vals
'''
# convert to numpy str
f_str = f_str.replace('be', 'np')
f_str = f_str.replace('np.sig', 'CustomFunc.sig')
f_str = f_str.replace('np.sig2', 'CustomFunc.sig2')
f_str = f_str.replace('np.tanh2', 'CustomFunc.tanh2')
f_str = f_str.replace('np.argmax', 'CustomFunc.argmax')
f_str = f_str.replace('np.argmin', 'CustomFunc.argmin')
# TODO debug only
f_str = f_str.replace('axis=0', 'axis=0, keepdims=True')
# TODO debug only
f_str = f_str.replace('axis=1', 'axis=1, keepdims=True')
# give variable name to the tensors
count = 0
for tensor in tensors:
exec(('x%s = tensor' % count), globals(), locals())
count += 1
# execute
result = None
result = eval(f_str)
return result
def _get_autodiff_grads_and_val(self, f_str, tensor_vals, get_op_tree=False,
next_error=None):
'''
get autodiff grads from optree string expression
f_str: the string of expression to be executed
tensors: numpy tensor vals
'''
# backend
be = self.be # used in f_str
# init gpu tensors
count = 0
tensors = []
for tensor_val in tensor_vals:
exec('x%s = self.be.array(tensor_val, name="x%s", dtype=self.dtype)'
% (count, count))
exec('tensors.append(x%s)' % count)
count += 1
# build op_tree
f = None
f = eval(f_str)
# evaluate op tree
f_val = be.empty(f.shape)
f_val[:] = f
# init next error
if next_error is not None:
next_error = self.be.array(next_error)
# get gradient
ad = Autodiff(f, be, next_error=next_error)
# get list
if get_op_tree:
gradients = list(ad.get_grad_op_tree(tensors))
else:
gradients = list(ad.get_grad_asnumpyarray(tensors))
return [gradients, f_val.get()]
def _get_numerical_grads_and_val(self, f_str, tensors, next_error=None):
'''`
get autodiff grads from numpy string expression
tensors: numpy tensor vals
'''
# buffer for gradients
gradients = []
for tensor in tensors:
gradients.append(np.zeros(tensor.shape))
# function values
f_val = TestAutodiff._numpy_call(f_str, tensors)
# init next error
if next_error is None:
next_error = np.ones_like(f_val)
# numerical gradients
for tensor, gradient in zip(tensors, gradients):
gradient_flat = np.copy(gradient.reshape((-1, )))
ind = 0
for x in np.nditer(tensor, op_flags=['readwrite']):
# backup
x_backup = np.copy(x)
# increment
x[...] = x + self.delta
f_inc = np.sum(TestAutodiff._numpy_call(f_str, tensors) * next_error)
x[...] = x_backup
# decrement
x[...] = x - self.delta
f_dec = np.sum(TestAutodiff._numpy_call(f_str, tensors) * next_error)
x[...] = x_backup
# gradient
gradient_flat[ind] = (f_inc - f_dec) / (2.0 * self.delta)
ind += 1
# write to gradient
gradient[:] = gradient_flat.reshape(gradient.shape)
return [gradients, f_val]
def _assert_grad_equal(self, f_str, tensors, rtol=1e-2, atol=1e-5, next_error=None):
def debug_msg(count):
msg = ''
msg += 'Error at tensor x%s' % (count,) + '\n'
msg += pprint.pformat(tensors) + '\n'
grad_op_trees = self._get_autodiff_grads_and_val(f_str, tensors,
get_op_tree=True)
grad_op_tree = grad_op_trees[0][count - 1]
msg += grad_op_tree.pp() + '\n'
return msg
# gradients
autodiff_grads_and_val = self._get_autodiff_grads_and_val(
f_str, tensors, next_error=next_error)
numerical_grads_and_val = self._get_numerical_grads_and_val(
f_str, tensors, next_error=next_error)
# asserts
assert(len(autodiff_grads_and_val) == len(numerical_grads_and_val))
# check function values
numerical_grads_and_val[1] = numerical_grads_and_val[
1].reshape(autodiff_grads_and_val[1].shape)
assert np.allclose(autodiff_grads_and_val[1].astype(self.dtype),
numerical_grads_and_val[1].astype(self.dtype),
rtol=rtol, atol=atol)
# check gradient
count = 0
for autodiff_grad, numerical_grad in zip(autodiff_grads_and_val[0],
numerical_grads_and_val[0]):
count += 1
# print count
if not np.allclose(autodiff_grad.astype(self.dtype), numerical_grad.astype(self.dtype),
rtol=rtol, atol=atol):
raise ValueError(debug_msg(count))
###################
# actual test cases
###################
def test_reduction_shape(self):
be = self.be
x0 = be.array(np.array([[1, 2], [4, 5]]), name='x0')
f = be.sum(x0, axis=0)
assert(f.shape == (1, 2))
f = be.sum(x0, axis=1)
assert(f.shape == (2, 1))
f = be.sum(x0)
assert(f.shape == (1, 1))
def test_reduction(self):
# TODO Reduction only allowed along one axis per kernel.
for _ in range(self.test_epoch):
# tensor
x0, x1, x2, x3 = [self._rand_gen() for _ in range(4)]
# functioncall
f_str = (' (x0 + x2) + be.sum(x0, axis=0)'
'- (x0 - x1) - be.mean(x3, axis=1)'
'+ (x2 + x3) + be.var(x0, axis=0)'
'+ (x2 + x3) + be.std(x0)'
'- (x2 - x3) - be.max(x3, axis=1)'
'- (x2 - x3) - be.min(x3, axis=0)'
'- (x2 - x3) - be.argmax(x3, axis=1)'
'- (x2 - x3) - be.argmin(x3, axis=0)')
# gradient
self._assert_grad_equal(f_str, [x0, x1, x2, x3], rtol=1e-2)
def test_batchnorm(self):
for _ in range(self.test_epoch):
# tensor
x0 = np.random.randn(10, 64)
x1 = np.random.randn(10, 1) # gamma
x2 = np.random.randn(10, 1) # beta
next_error = np.random.randn(10, 64) / 64.
f_str = '((x0 - be.mean(x0, axis=1)) / be.sqrt(be.var(x0, axis=1) + 1e-6)) * x1 + x2'
# gradient
self._assert_grad_equal(f_str, [x0, x1, x2], rtol=1e-1, atol=1e-2,
next_error=next_error)
def test_positive(self):
# TODO potentially problematic
for _ in range(self.test_epoch):
# tensor
x0, x1, x2, x3 = [self._rand_gen('pos') for _ in range(4)]
# function
f_str = ('0.9 ** 0.9 ** x0 + x1 ** 0.9 ** 0.9'
'+ (be.sqrt(x0 + x1 + x2) + x3)'
'- (be.exp(x0 + x1 + x2) + x3)'
'+ (be.exp2(x0 + x1 + x2) + x3)'
'- (be.log(x0 + x1 + x2) + x3)'
'+ (be.log2(x0 + x1 + x2) + x3)')
# gradient
self._assert_grad_equal(f_str, [x0, x1, x2, x3], rtol=1e-2)
def test_real(self):
for _ in range(self.test_epoch):
# tensor
x0, x1, x2, x3 = [self._rand_gen() for _ in range(4)]
# function
f_str = ('x0 + be.absolute(x1 + x2) + x3'
'- (x0 + be.square(x1 + x2) + x3)'
'+ (x0 + be.sig(x1 + x2) + x3)'
'- (x0 + be.sig2(x1 + x2) + x3)'
'+ (x0 + be.tanh(x1 + x2) + x3)'
'- (x0 + be.tanh2(x1 + x2) + x3)'
'+ (x0 + be.maximum(x0 + x1, x2 + x3) + x3)')
# gradient
self._assert_grad_equal(f_str, [x0, x1, x2, x3], rtol=1e-2)
def test_unbroadcast(self):
for _ in range(self.test_epoch):
# scaler, matrix
x0 = self._rand_gen('scalar')
x1 = self._rand_gen()
# function
f_str = ('x0 + x0 + x1 + x1')
# gradient
self._assert_grad_equal(f_str, [x0, x1], rtol=1e-2)
# col_vector, matrix
x0 = self._rand_gen('col')
x1 = self._rand_gen()
# function
f_str = ('x0 + x0 + x1 + x1 + x0')
# gradient
self._assert_grad_equal(f_str, [x0, x1], rtol=1e-2)
# row_vector, matrix
x0 = self._rand_gen('row')
x1 = self._rand_gen()
# function
f_str = ('x0 + x0 + x1 + x1 + x0')
# gradient
self._assert_grad_equal(f_str, [x0, x1], rtol=1e-2)
# scalar, row, col and matrix
x0 = self._rand_gen('scalar')
x1 = self._rand_gen('row')
x2 = self._rand_gen('col')
x3 = self._rand_gen()
# function
f_str = ('x0 + x1 + x3 * x2 + x0 + be.tanh(x1) + x3')
# gradient
self._assert_grad_equal(f_str, [x0, x1, x2, x3], rtol=1e-2)
def test_hard_coded(self):
"""
The most basic test case
"""
be = self.be
x0 = be.array(np.ones((3, 3)) * 1, name='x0', dtype=self.dtype)
x1 = be.array(np.ones((3, 3)) * 2, name='x1', dtype=self.dtype)
x2 = be.array(np.ones((3, 3)) * 3, name='x2', dtype=self.dtype)
x3 = be.array(np.ones((3, 3)) * 5, name='x3', dtype=self.dtype)
f = x0 * x0 - x1 * x0 + x0 * x2 - x2 * x1 * x0 + x3 * x3 * x3
ad = Autodiff(f, be)
x0_grad = be.array(np.ones((3, 3)) * -3, dtype=self.dtype)
x1_grad = be.array(np.ones((3, 3)) * -4, dtype=self.dtype)
x2_grad = be.array(np.ones((3, 3)) * -1, dtype=self.dtype)
x3_grad = be.array(np.ones((3, 3)) * 75, dtype=self.dtype)
assert np.allclose(ad.get_grad_asnumpyarray([x0])[0], x0_grad.get(), atol=1e-5)
assert np.allclose(ad.get_grad_asnumpyarray([x1])[0], x1_grad.get(), atol=1e-5)
assert np.allclose(ad.get_grad_asnumpyarray([x2])[0], x2_grad.get(), atol=1e-5)
assert np.allclose(ad.get_grad_asnumpyarray([x3])[0], x3_grad.get(), atol=1e-5)
|
|
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class binance (Exchange):
def describe(self):
return self.deep_extend(super(binance, self).describe(), {
'id': 'binance',
'name': 'Binance',
'countries': 'CN', # China
'rateLimit': 500,
'hasCORS': False,
# obsolete metainfo interface
'hasFetchTickers': True,
'hasFetchOHLCV': True,
'hasFetchMyTrades': True,
'hasFetchOrder': True,
'hasFetchOrders': True,
'hasFetchOpenOrders': True,
'hasWithdraw': True,
# new metainfo interface
'has': {
'fetchTickers': True,
'fetchOHLCV': True,
'fetchMyTrades': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
'3d': '3d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/29604020-d5483cdc-87ee-11e7-94c7-d1a8d9169293.jpg',
'api': {
'web': 'https://www.binance.com',
'wapi': 'https://api.binance.com/wapi/v3',
'public': 'https://api.binance.com/api/v1',
'private': 'https://api.binance.com/api/v3',
'v3': 'https://api.binance.com/api/v3',
},
'www': 'https://www.binance.com',
'doc': 'https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md',
'fees': [
'https://binance.zendesk.com/hc/en-us/articles/115000429332',
'https://support.binance.com/hc/en-us/articles/115000583311',
],
},
'api': {
'web': {
'get': [
'exchange/public/product',
],
},
'wapi': {
'post': [
'withdraw',
],
'get': [
'depositHistory',
'withdrawHistory',
'depositAddress',
],
},
'v3': {
'get': [
'ticker/price',
'ticker/bookTicker',
],
},
'public': {
'get': [
'exchangeInfo',
'ping',
'time',
'depth',
'aggTrades',
'klines',
'ticker/24hr',
'ticker/allPrices',
'ticker/allBookTickers',
],
},
'private': {
'get': [
'order',
'openOrders',
'allOrders',
'account',
'myTrades',
],
'post': [
'order',
'order/test',
'userDataStream',
],
'put': [
'userDataStream'
],
'delete': [
'order',
'userDataStream',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.001,
'maker': 0.001,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BNB': 1.0,
'BTC': 0.001,
'ETH': 0.01,
'LTC': 0.01,
'NEO': 0.0,
'QTUM': 0.01,
'SNT': 10.0,
'BNT': 1.2,
'EOS': 0.7,
'BCH': 0.0005,
'GAS': 0.0,
'USDT': 25.0,
'OAX': 6.0,
'DNT': 60.0,
'MCO': 0.3,
'ICN': 2.0,
'WTC': 0.4,
'OMG': 0.3,
'ZRX': 10.0,
'STRAT': 0.1,
'SNGLS': 20.0,
'BQX': 2.0,
'KNC': 2.0,
'FUN': 80.0,
'SNM': 20.0,
'LINK': 10.0,
'XVG': 0.1,
'CTR': 7.0,
'SALT': 0.4,
'IOTA': 0.5,
'MDA': 2.0,
'MTL': 0.5,
'SUB': 4.0,
'ETC': 0.01,
'MTH': 35.0,
'ENG': 5.0,
'AST': 10.0,
'BTG': None,
'DASH': 0.002,
'EVX': 2.5,
'REQ': 15.0,
'LRC': 12.0,
'VIB': 20.0,
'HSR': 0.0001,
'TRX': 30.0,
'POWR': 5.0,
'ARK': 0.1,
'YOYO': 10.0,
'XRP': 0.15,
'MOD': 2.0,
'ENJ': 80.0,
'STORJ': 3.0,
'VEN': 5.0,
'KMD': 1.0,
'NULS': 4.0,
'RCN': 20.0,
'RDN': 0.3,
'XMR': 0.04,
'DLT': 15.0,
'AMB': 10.0,
'BAT': 15.0,
'ZEC': 0.005,
'BCPT': 14.0,
'ARN': 7.0,
'GVT': 0.5,
'CDT': 35.0,
'GXS': 0.3,
'POE': 50.0,
'QSP': 30.0,
'BTS': 1.0,
'XZC': 0.02,
'LSK': 0.1,
'TNT': 35.0,
'FUEL': 60.0,
'MANA': 30.0,
'BCD': 0.0005,
'DGD': 0.03,
'ADX': 2.0,
'ADA': 1.0,
'PPT': 0.1,
'PPT': 0.1,
'CMT': 15.0,
'XLM': 0.01,
'CND': 180.0,
'LEND': 50.0,
'WABI': 4.0,
'TNB': 70.0,
'WAVES': 0.002,
'ICX': 1.5,
'GTO': 30.0,
'OST': 15.0,
'ELF': 2.0,
'AION': 1.0,
'NEBL': 0.01,
'BRD': 3.0,
'EDO': 1.5,
'WINGS': 3.0,
'NAV': 0.2,
'LUN': 0.3,
'TRIG': 5.0,
},
'deposit': {
'BNB': 0,
'BTC': 0,
'ETH': 0,
'LTC': 0,
'NEO': 0,
'QTUM': 0,
'SNT': 0,
'BNT': 0,
'EOS': 0,
'BCH': 0,
'GAS': 0,
'USDT': 0,
'OAX': 0,
'DNT': 0,
'MCO': 0,
'ICN': 0,
'WTC': 0,
'OMG': 0,
'ZRX': 0,
'STRAT': 0,
'SNGLS': 0,
'BQX': 0,
'KNC': 0,
'FUN': 0,
'SNM': 0,
'LINK': 0,
'XVG': 0,
'CTR': 0,
'SALT': 0,
'IOTA': 0,
'MDA': 0,
'MTL': 0,
'SUB': 0,
'ETC': 0,
'MTH': 0,
'ENG': 0,
'AST': 0,
'BTG': 0,
'DASH': 0,
'EVX': 0,
'REQ': 0,
'LRC': 0,
'VIB': 0,
'HSR': 0,
'TRX': 0,
'POWR': 0,
'ARK': 0,
'YOYO': 0,
'XRP': 0,
'MOD': 0,
'ENJ': 0,
'STORJ': 0,
},
},
},
})
async def fetch_markets(self):
response = await self.publicGetExchangeInfo()
markets = response['symbols']
result = []
for i in range(0, len(markets)):
market = markets[i]
id = market['symbol']
base = self.common_currency_code(market['baseAsset'])
quote = self.common_currency_code(market['quoteAsset'])
symbol = base + '/' + quote
filters = self.index_by(market['filters'], 'filterType')
precision = {
'base': market['baseAssetPrecision'],
'quote': market['quotePrecision'],
'amount': market['baseAssetPrecision'],
'price': market['quotePrecision'],
}
active = (market['status'] == 'TRADING')
lot = -1 * math.log10(precision['amount'])
entry = self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
'lot': lot,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': lot,
'max': None,
},
'price': {
'min': -1 * math.log10(precision['price']),
'max': None,
},
'cost': {
'min': lot,
'max': None,
},
},
})
if 'PRICE_FILTER' in filters:
filter = filters['PRICE_FILTER']
entry['precision']['price'] = self.precision_from_string(filter['tickSize'])
entry['limits']['price'] = {
'min': float(filter['minPrice']),
'max': float(filter['maxPrice']),
}
if 'LOT_SIZE' in filters:
filter = filters['LOT_SIZE']
entry['precision']['amount'] = self.precision_from_string(filter['stepSize'])
entry['lot'] = float(filter['stepSize'])
entry['limits']['amount'] = {
'min': float(filter['minQty']),
'max': float(filter['maxQty']),
}
if 'MIN_NOTIONAL' in filters:
entry['limits']['cost']['min'] = float(filters['MIN_NOTIONAL']['minNotional'])
result.append(entry)
return result
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.fee_to_precision(symbol, cost)),
}
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetAccount(params)
result = {'info': response}
balances = response['balances']
for i in range(0, len(balances)):
balance = balances[i]
asset = balance['asset']
currency = self.common_currency_code(asset)
account = {
'free': float(balance['free']),
'used': float(balance['locked']),
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.publicGetDepth(self.extend({
'symbol': market['id'],
'limit': 100, # default = maximum = 100
}, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_integer(ticker, 'closeTime')
if timestamp is None:
timestamp = self.milliseconds()
symbol = ticker['symbol']
if not market:
if symbol in self.markets_by_id:
market = self.markets_by_id[symbol]
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highPrice'),
'low': self.safe_float(ticker, 'lowPrice'),
'bid': self.safe_float(ticker, 'bidPrice'),
'ask': self.safe_float(ticker, 'askPrice'),
'vwap': self.safe_float(ticker, 'weightedAvgPrice'),
'open': self.safe_float(ticker, 'openPrice'),
'close': self.safe_float(ticker, 'prevClosePrice'),
'first': None,
'last': self.safe_float(ticker, 'lastPrice'),
'change': self.safe_float(ticker, 'priceChangePercent'),
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'quoteVolume'),
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTicker24hr(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
rawTickers = await self.publicGetTicker24hr(params)
tickers = []
for i in range(0, len(rawTickers)):
tickers.append(self.parse_ticker(rawTickers[i]))
tickersBySymbol = self.index_by(tickers, 'symbol')
# return all of them if no symbols were passed in the first argument
if symbols is None:
return tickersBySymbol
# otherwise filter by symbol
result = {}
for i in range(0, len(symbols)):
symbol = symbols[i]
if symbol in tickersBySymbol:
result[symbol] = tickersBySymbol[symbol]
return result
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0],
float(ohlcv[1]),
float(ohlcv[2]),
float(ohlcv[3]),
float(ohlcv[4]),
float(ohlcv[5]),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
request['limit'] = limit if (limit) else 500 # default == max == 500
if since:
request['startTime'] = since
response = await self.publicGetKlines(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
timestampField = 'T' if ('T' in list(trade.keys())) else 'time'
timestamp = trade[timestampField]
priceField = 'p' if ('p' in list(trade.keys())) else 'price'
price = float(trade[priceField])
amountField = 'q' if ('q' in list(trade.keys())) else 'qty'
amount = float(trade[amountField])
idField = 'a' if ('a' in list(trade.keys())) else 'id'
id = str(trade[idField])
side = None
order = None
if 'orderId' in trade:
order = str(trade['orderId'])
if 'm' in trade:
side = 'sell' if trade['m'] else 'buy' # self is reversed intentionally
else:
side = 'buy' if (trade['isBuyer']) else 'sell' # self is a True side
fee = None
if 'commission' in trade:
fee = {
'cost': float(trade['commission']),
'currency': self.common_currency_code(trade['commissionAsset']),
}
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': id,
'order': order,
'type': None,
'side': side,
'price': price,
'cost': price * amount,
'amount': amount,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since:
request['startTime'] = since
request['endTime'] = since + 3600000
if limit:
request['limit'] = limit
# 'fromId': 123, # ID to get aggregate trades from INCLUSIVE.
# 'startTime': 456, # Timestamp in ms to get aggregate trades from INCLUSIVE.
# 'endTime': 789, # Timestamp in ms to get aggregate trades until INCLUSIVE.
# 'limit': 500, # default = maximum = 500
response = await self.publicGetAggTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_order_status(self, status):
if status == 'NEW':
return 'open'
if status == 'PARTIALLY_FILLED':
return 'open'
if status == 'FILLED':
return 'closed'
if status == 'CANCELED':
return 'canceled'
return status.lower()
def parse_order(self, order, market=None):
status = self.parse_order_status(order['status'])
symbol = None
if market:
symbol = market['symbol']
else:
id = order['symbol']
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
timestamp = order['time']
price = float(order['price'])
amount = float(order['origQty'])
filled = self.safe_float(order, 'executedQty', 0.0)
remaining = max(amount - filled, 0.0)
result = {
'info': order,
'id': str(order['orderId']),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': order['type'].lower(),
'side': order['side'].lower(),
'price': price,
'amount': amount,
'cost': price * amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
return result
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
order = {
'symbol': market['id'],
'quantity': self.amount_to_string(symbol, amount),
'type': type.upper(),
'side': side.upper(),
}
if type == 'limit':
order = self.extend(order, {
'price': self.price_to_precision(symbol, price),
'timeInForce': 'GTC', # 'GTC' = Good To Cancel(default), 'IOC' = Immediate Or Cancel
})
response = await self.privatePostOrder(self.extend(order, params))
return {
'info': response,
'id': str(response['orderId']),
}
async def fetch_order(self, id, symbol=None, params={}):
if not symbol:
raise ExchangeError(self.id + ' fetchOrder requires a symbol param')
await self.load_markets()
market = self.market(symbol)
response = await self.privateGetOrder(self.extend({
'symbol': market['id'],
'orderId': int(id),
}, params))
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if not symbol:
raise ExchangeError(self.id + ' fetchOrders requires a symbol param')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit:
request['limit'] = limit
response = await self.privateGetAllOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if not symbol:
raise ExchangeError(self.id + ' fetchOpenOrders requires a symbol param')
await self.load_markets()
market = self.market(symbol)
response = await self.privateGetOpenOrders(self.extend({
'symbol': market['id'],
}, params))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = await self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
async def cancel_order(self, id, symbol=None, params={}):
if not symbol:
raise ExchangeError(self.id + ' cancelOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
response = None
try:
response = await self.privateDeleteOrder(self.extend({
'symbol': market['id'],
'orderId': int(id),
# 'origClientOrderId': id,
}, params))
except Exception as e:
if self.last_http_response.find('UNKNOWN_ORDER') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() error: ' + self.last_http_response)
raise e
return response
def nonce(self):
return self.milliseconds()
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if not symbol:
raise ExchangeError(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit:
request['limit'] = limit
response = await self.privateGetMyTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def common_currency_code(self, currency):
if currency == 'BCC':
return 'BCH'
return currency
def currency_id(self, currency):
if currency == 'BCH':
return 'BCC'
return currency
async def fetch_deposit_address(self, currency, params={}):
response = await self.wapiGetDepositAddress(self.extend({
'asset': self.currency_id(currency),
}, params))
if 'success' in response:
if response['success']:
address = self.safe_string(response, 'address')
return {
'currency': currency,
'address': address,
'status': 'ok',
'info': response,
}
raise ExchangeError(self.id + ' fetchDepositAddress failed: ' + self.last_http_response)
async def withdraw(self, currency, amount, address, params={}):
response = await self.wapiPostWithdraw(self.extend({
'asset': self.currency_id(currency),
'address': address,
'amount': float(amount),
}, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
url += '/' + path
if api == 'wapi':
url += '.html'
if (api == 'private') or (api == 'wapi'):
self.check_required_credentials()
nonce = self.milliseconds()
query = self.urlencode(self.extend({
'timestamp': nonce,
'recvWindow': 100000,
}, params))
signature = self.hmac(self.encode(query), self.encode(self.secret))
query += '&' + 'signature=' + signature
headers = {
'X-MBX-APIKEY': self.apiKey,
}
if (method == 'GET') or (api == 'wapi'):
url += '?' + query
else:
body = query
headers['Content-Type'] = 'application/x-www-form-urlencoded'
else:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body):
if code >= 400:
if code == 418:
raise DDoSProtection(self.id + ' ' + str(code) + ' ' + reason + ' ' + body)
if body.find('MIN_NOTIONAL') >= 0:
raise InvalidOrder(self.id + ' order cost = amount * price should be >(0.001 BTC or 0.01 ETH or 1 BNB or 1 USDT)' + body)
if body.find('LOT_SIZE') >= 0:
raise InvalidOrder(self.id + ' order amount should be evenly divisible by lot size, use self.amount_to_lots(symbol, amount) ' + body)
if body.find('PRICE_FILTER') >= 0:
raise InvalidOrder(self.id + ' order price exceeds allowed price precision or invalid, use self.price_to_precision(symbol, amount) ' + body)
if body.find('Order does not exist') >= 0:
raise OrderNotFound(self.id + ' ' + body)
if body[0] == "{":
response = json.loads(body)
error = self.safe_value(response, 'code')
if error is not None:
if error == -2010:
raise InsufficientFunds(self.id + ' ' + self.json(response))
elif error == -2011:
raise OrderNotFound(self.id + ' ' + self.json(response))
elif error < 0:
raise ExchangeError(self.id + ' ' + self.json(response))
|
|
from conans.client.output import Color
from conans.model.ref import PackageReference
from conans.model.ref import ConanFileReference
from collections import OrderedDict
class Printer(object):
""" Print some specific information """
INDENT_COLOR = {0: Color.BRIGHT_CYAN,
1: Color.BRIGHT_RED,
2: Color.BRIGHT_GREEN,
3: Color.BRIGHT_YELLOW,
4: Color.BRIGHT_MAGENTA}
INDENT_SPACES = 4
def __init__(self, out):
self._out = out
def print_graph(self, deps_graph, registry):
""" Simple pretty printing of a deps graph, can be improved
with options, info like licenses, etc
"""
self._out.writeln("Requirements", Color.BRIGHT_YELLOW)
for node in sorted(deps_graph.nodes):
ref, _ = node
if not ref:
continue
remote = registry.get_ref(ref)
from_text = "from local" if not remote else "from %s" % remote.name
self._out.writeln(" %s %s" % (repr(ref), from_text), Color.BRIGHT_CYAN)
self._out.writeln("Packages", Color.BRIGHT_YELLOW)
for node in sorted(deps_graph.nodes):
ref, conanfile = node
if not ref:
continue
ref = PackageReference(ref, conanfile.info.package_id())
self._out.writeln(" %s" % repr(ref), Color.BRIGHT_CYAN)
self._out.writeln("")
def print_info(self, deps_graph, project_reference, _info, registry, graph_updates_info=None,
remote=None):
""" Print the dependency information for a conan file
Attributes:
deps_graph: the dependency graph of conan file references to print
placeholder_reference: the conan file reference that represents the conan
file for a project on the path. This may be None,
in which case the project itself will not be part
of the printed dependencies.
remote: Remote specified in install command.
Could be different from the registry one.
"""
def show(field):
if _info is True:
return True
if field in [s.lower() for s in _info.split(",")]:
return True
return False
graph_updates_info = graph_updates_info or {}
for node in sorted(deps_graph.nodes):
ref, conan = node
if not ref:
# ref is only None iff info is being printed for a project directory, and
# not a passed in reference
if project_reference is None:
continue
else:
ref = project_reference
self._out.writeln("%s" % str(ref), Color.BRIGHT_CYAN)
reg_remote = registry.get_ref(ref)
# Excludes PROJECT fake reference
remote_name = remote
if reg_remote and not remote:
remote_name = reg_remote.name
if isinstance(ref, ConanFileReference) and show("remote"):
if reg_remote:
self._out.writeln(" Remote: %s=%s" % (reg_remote.name, reg_remote.url),
Color.BRIGHT_GREEN)
else:
self._out.writeln(" Remote: None", Color.BRIGHT_GREEN)
url = getattr(conan, "url", None)
license_ = getattr(conan, "license", None)
author = getattr(conan, "author", None)
if url and show("url"):
self._out.writeln(" URL: %s" % url, Color.BRIGHT_GREEN)
if license_ and show("license"):
if isinstance(license_, (list, tuple, set)):
self._out.writeln(" Licenses: %s" % ", ".join(license_), Color.BRIGHT_GREEN)
else:
self._out.writeln(" License: %s" % license_, Color.BRIGHT_GREEN)
if author and show("author"):
self._out.writeln(" Author: %s" % author, Color.BRIGHT_GREEN)
if isinstance(ref, ConanFileReference) and show("update"): # Excludes PROJECT
update = graph_updates_info.get(ref)
update_messages = {
None: ("Version not checked", Color.WHITE),
0: ("You have the latest version (%s)" % remote_name, Color.BRIGHT_GREEN),
1: ("There is a newer version (%s)" % remote_name, Color.BRIGHT_YELLOW),
-1: ("The local file is newer than remote's one (%s)" % remote_name,
Color.BRIGHT_RED)
}
self._out.writeln(" Updates: %s" % update_messages[update][0],
update_messages[update][1])
dependants = deps_graph.inverse_neighbors(node)
if isinstance(ref, ConanFileReference) and show("required"): # Excludes
self._out.writeln(" Required by:", Color.BRIGHT_GREEN)
for d in dependants:
ref = repr(d.conan_ref) if d.conan_ref else project_reference
self._out.writeln(" %s" % ref, Color.BRIGHT_YELLOW)
if show("requires"):
depends = deps_graph.neighbors(node)
if depends:
self._out.writeln(" Requires:", Color.BRIGHT_GREEN)
for d in depends:
self._out.writeln(" %s" % repr(d.conan_ref), Color.BRIGHT_YELLOW)
def print_search_recipes(self, references, pattern):
""" Print all the exported conans information
param pattern: wildcards, e.g., "opencv/*"
"""
if not references:
warn_msg = "There are no packages"
pattern_msg = " matching the %s pattern" % pattern
self._out.info(warn_msg + pattern_msg if pattern else warn_msg)
return
self._out.info("Existing package recipes:\n")
for conan_ref in sorted(references):
self._print_colored_line(str(conan_ref), indent=0)
def print_search_packages(self, packages_props, reference, recipe_hash, packages_query):
if not packages_props:
if packages_query:
warn_msg = "There are no packages for reference '%s' matching the query '%s'" % (str(reference), packages_query)
else:
warn_msg = "There are no packages for pattern '%s'" % str(reference)
self._out.info(warn_msg)
return
self._out.info("Existing packages for recipe %s:\n" % str(reference))
# Each package
for package_id, properties in sorted(packages_props.items()):
self._print_colored_line("Package_ID", package_id, 1)
for section in ("options", "settings", "full_requires"):
attrs = properties.get(section, [])
if attrs:
section_name = {"full_requires": "requires"}.get(section, section)
self._print_colored_line("[%s]" % section_name, indent=2)
if isinstance(attrs, dict): # options, settings
attrs = OrderedDict(sorted(attrs.items()))
for key, value in attrs.items():
self._print_colored_line(key, value=value, indent=3)
elif isinstance(attrs, list): # full requires
for key in sorted(attrs):
self._print_colored_line(key, indent=3)
package_recipe_hash = properties.get("recipe_hash", None)
# Always compare outdated with local recipe, simplification, if a remote check is needed install recipe first
if recipe_hash:
self._print_colored_line("outdated from recipe: %s" % (recipe_hash != package_recipe_hash), indent=2)
self._out.writeln("")
def print_profile(self, name, profile):
self._out.info("Configuration for profile %s:\n" % name)
self._print_profile_section("settings", profile.settings)
self._print_profile_section("env", profile.env)
scopes = profile.scopes.dumps().splitlines()
self._print_colored_line("[scopes]")
for scope in scopes:
self._print_colored_line(scope, indent=1)
def _print_profile_section(self, name, items, indent=0):
self._print_colored_line("[%s]" % name, indent=indent)
for key, value in items:
self._print_colored_line(key, value=str(value), indent=indent+1)
def _print_colored_line(self, text, value=None, indent=0):
""" Print a colored line depending on its indentation level
Attributes:
text: string line
split_symbol: if you want an output with different in-line colors
indent_plus: integer to add a plus indentation
"""
text = text.strip()
if not text:
return
text_color = Printer.INDENT_COLOR.get(indent, Color.BRIGHT_WHITE)
indent_text = ' ' * Printer.INDENT_SPACES * indent
if value is not None:
value_color = Color.BRIGHT_WHITE
self._out.write('%s%s: ' % (indent_text, text), text_color)
self._out.writeln(value, value_color)
else:
self._out.writeln('%s%s' % (indent_text, text), text_color)
|
|
import re
import six
from ..api import APIClient
from ..errors import BuildError
from ..utils.json_stream import json_stream
from .resource import Collection, Model
class Image(Model):
"""
An image on the server.
"""
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
@property
def labels(self):
"""
The labels of an image as dictionary.
"""
result = self.attrs['Config'].get('Labels')
return result or {}
@property
def short_id(self):
"""
The ID of the image truncated to 10 characters, plus the ``sha256:``
prefix.
"""
if self.id.startswith('sha256:'):
return self.id[:17]
return self.id[:10]
@property
def tags(self):
"""
The image's tags.
"""
tags = self.attrs.get('RepoTags')
if tags is None:
tags = []
return [tag for tag in tags if tag != '<none>:<none>']
def history(self):
"""
Show the history of an image.
Returns:
(str): The history of the image.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.history(self.id)
def save(self):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Returns:
(urllib3.response.HTTPResponse object): The response from the
daemon.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.images.get("fedora:latest")
>>> resp = image.save()
>>> f = open('/tmp/fedora-latest.tar', 'w')
>>> for chunk in resp.stream():
>>> f.write(chunk)
>>> f.close()
"""
return self.client.api.get_image(self.id)
def tag(self, repository, tag=None, **kwargs):
"""
Tag this image into a repository. Similar to the ``docker tag``
command.
Args:
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
(bool): ``True`` if successful
"""
return self.client.api.tag(self.id, repository, tag=tag, **kwargs)
class ImageCollection(Collection):
model = Image
def build(self, **kwargs):
"""
Build an image and return it. Similar to the ``docker build``
command. Either ``path`` or ``fileobj`` must be set.
If you have a tar file for the Docker build context (including a
Dockerfile) already, pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is compressed
also, set ``encoding`` to the correct value (e.g ``gzip``).
If you want to get the raw output of the build, use the
:py:meth:`~docker.api.build.BuildApiMixin.build` method in the
low-level API.
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (list): A list of images used for build cache
resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
Returns:
(:py:class:`Image`): The built image.
Raises:
:py:class:`docker.errors.BuildError`
If there is an error during the build.
:py:class:`docker.errors.APIError`
If the server returns any other error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
resp = self.client.api.build(**kwargs)
if isinstance(resp, six.string_types):
return self.get(resp)
last_event = None
image_id = None
for chunk in json_stream(resp):
if 'error' in chunk:
raise BuildError(chunk['error'])
if 'stream' in chunk:
match = re.search(
r'(^Successfully built |sha256:)([0-9a-f]+)$',
chunk['stream']
)
if match:
image_id = match.group(2)
last_event = chunk
if image_id:
return self.get(image_id)
raise BuildError(last_event or 'Unknown')
def get(self, name):
"""
Gets an image.
Args:
name (str): The name of the image.
Returns:
(:py:class:`Image`): The image.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_image(name))
def list(self, name=None, all=False, filters=None):
"""
List images on the server.
Args:
name (str): Only show images belonging to the repository ``name``
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- ``label`` (str): format either ``key`` or ``key=value``
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.images(name=name, all=all, filters=filters)
return [self.get(r["Id"]) for r in resp]
def load(self, data):
"""
Load an image that was previously saved using
:py:meth:`~docker.models.images.Image.save` (or ``docker save``).
Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
Returns:
(generator): Progress output as JSON objects
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.load_image(data)
def pull(self, name, tag=None, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
low-level API.
Args:
name (str): The repository to pull
tag (str): The tag to pull
insecure_registry (bool): Use an insecure registry
auth_config (dict): Override the credentials that
:py:meth:`~docker.client.DockerClient.login` has set for
this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
Returns:
(:py:class:`Image`): The image that has been pulled.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = client.images.pull('busybox')
"""
self.client.api.pull(name, tag=tag, **kwargs)
return self.get('{0}:{1}'.format(name, tag) if tag else name)
def push(self, repository, tag=None, **kwargs):
return self.client.api.push(repository, tag=tag, **kwargs)
push.__doc__ = APIClient.push.__doc__
def remove(self, *args, **kwargs):
self.client.api.remove_image(*args, **kwargs)
remove.__doc__ = APIClient.remove_image.__doc__
def search(self, *args, **kwargs):
return self.client.api.search(*args, **kwargs)
search.__doc__ = APIClient.search.__doc__
def prune(self, filters=None):
return self.client.api.prune_images(filters=filters)
prune.__doc__ = APIClient.prune_images.__doc__
|
|
"""
Unit test for dynamic parameters.
Tests __get__, __set__ and that inspect_value() and
get_value_generator() work.
Originally implemented as doctests in Topographica in the file
testDynamicParameter.txt
"""
import copy
import unittest
import param
import numbergen
class TestDynamicParameters(unittest.TestCase):
def setUp(self):
param.Dynamic.time_dependent = False
class TestPO1(param.Parameterized):
x = param.Dynamic(default=numbergen.UniformRandom(lbound=-1,ubound=1,seed=1),doc="nothing")
y = param.Dynamic(default=1)
class TestPO2(param.Parameterized):
x = param.Dynamic(default=numbergen.UniformRandom(lbound=-1,ubound=1,seed=30))
y = param.Dynamic(default=1.0)
self.TestPO2 = TestPO2
self.TestPO1 = TestPO1
self.t1 = self.TestPO1()
self.t2 = self.TestPO1(x=numbergen.UniformRandom(lbound=-1,ubound=1,seed=10))
self.t3 = self.TestPO1(x=numbergen.UniformRandom(lbound=-1,ubound=1,seed=10))
self.t2.set_dynamic_time_fn(None)
self.t3.set_dynamic_time_fn(None)
self.t6 = self.TestPO2()
self.t7 = self.TestPO2()
class TestDynamicParameterBasics(TestDynamicParameters):
def test_set_dynamic_time_fn_x(self):
self.t1.set_dynamic_time_fn(None)
self.assertEqual(
self.t1.params()['x']._value_is_dynamic(self.t1), True)
def test_set_dynamic_time_fn_y(self):
self.assertEqual(
self.t1.params()['y']._value_is_dynamic(self.t1), False)
def test_inspect_x(self):
"no value generated yet"
self.assertEqual(self.t1.inspect_value('x'), None)
def test_inspect_y(self):
self.assertEqual(self.t1.inspect_value('y'), 1)
def test_inspect_y_set(self):
self.t1.y = 2
self.assertEqual(self.t1.inspect_value('y'), 2)
def test_set_dynamic_numbergen(self):
is_numbergen = isinstance(self.t2.get_value_generator('x'),
numbergen.UniformRandom)
self.assertEqual(is_numbergen, True)
def test_matching_numbergen_streams(self):
"check that t2 and t3 have identical streams"
self.assertEqual(self.t2.x, self.t3.x)
def test_numbergen_objects_distinct(self):
"check t2 and t3 do not share UniformRandom objects"
self.t2.x
self.assertNotEqual(self.t2.inspect_value('x'),
self.t3.inspect_value('x'))
def test_numbergen_inspect(self):
" inspect_value() should return last generated value "
self.t2.x # Call 1
self.t2.x # Call 2
t2_last_value = self.t2.x # advance t2 beyond t3
self.assertEqual(self.t2.inspect_value('x'),
t2_last_value)
# ensure last_value is not shared
self.assertNotEqual(self.t3.inspect_value('x'), t2_last_value)
def test_dynamic_value_instantiated(self):
t6_first_value = self.t6.x
self.assertNotEqual(self.t7.inspect_value('x'),
t6_first_value)
def test_non_dynamic_value_not_instantiated(self):
" non-dynamic value not instantiated"
self.TestPO2.y = 4
self.assertEqual(self.t6.y, 4)
self.assertEqual(self.t7.y, 4)
def test_dynamic_value_setting(self):
self.t6.y = numbergen.UniformRandom()
t8 = self.TestPO2()
self.TestPO2.y = 10
# t6 got a dynamic value, but shouldn't have changed Parameter's instantiate
self.assertEqual(t8.y, 10)
def test_setting_y_param_numbergen(self):
self.TestPO2.y=numbergen.UniformRandom() # now the Parameter instantiate should be true
t9 = self.TestPO2()
self.assertEqual('_y_param_value' in t9.__dict__, True)
def test_shared_numbergen(self):
"""
Instances of TestPO2 that don't have their own value for the
parameter share one UniformRandom object
"""
self.TestPO2.y=numbergen.UniformRandom() # now the Parameter instantiate should be true
self.assertEqual(self.t7.get_value_generator('y') is self.TestPO2().params()['y'].default, True)
self.assertEqual(self.TestPO2().params()['y'].default.__class__.__name__, 'UniformRandom')
def test_copy_match(self):
"check a copy is the same"
t9 = copy.deepcopy(self.t7)
self.assertEqual(t9.get_value_generator('y') is self.TestPO2().params()['y'].default, True)
class TestDynamicTimeDependent(TestDynamicParameters):
def setUp(self):
super(TestDynamicTimeDependent, self).setUp()
param.Dynamic.time_dependent = True
class TestPO3(param.Parameterized):
x = param.Dynamic(default=numbergen.UniformRandom(name='xgen',
time_dependent=True))
class TestPO4(self.TestPO1):
"Nested parameterized objects"
z = param.Parameter(default=self.TestPO1())
self.TestPO3 = TestPO3
self.TestPO4 = TestPO4
self.t10 = self.TestPO1()
self.t11 = TestPO3()
def test_dynamic_values_unchanged_dependent(self):
param.Dynamic.time_dependent = True
call_1 = self.t10.x
call_2 = self.t10.x
call_3 = self.t10.x
self.assertEqual(call_1, call_2)
self.assertEqual(call_2, call_3)
def test_dynamic_values_changed_independent(self):
param.Dynamic.time_dependent = False
call_1 = self.t10.x
call_2 = self.t10.x
call_3 = self.t10.x
self.assertNotEqual(call_1, call_2)
self.assertNotEqual(call_2, call_3)
def test_dynamic_values_change(self):
param.Dynamic.time_dependent = True
with param.Dynamic.time_fn as t:
t(0)
call_1 = self.t10.x
t += 1
call_2 = self.t10.x
t(0)
call_3 = self.t10.x
self.assertNotEqual(call_1, call_2)
self.assertNotEqual(call_1, call_3)
def test_dynamic_values_time_dependent(self):
param.Dynamic.time_dependent = True
with param.Dynamic.time_fn as t:
t(0)
call_1 = self.t11.x
t += 1
call_2 = self.t11.x
t(0)
call_3 = self.t11.x
self.assertNotEqual(call_1, call_2)
self.assertEqual(call_1, call_3)
def test_class_dynamic_values_change(self):
call_1 = self.TestPO3.x
call_2 = self.TestPO3.x
self.assertEqual(call_1, call_2)
with param.Dynamic.time_fn as t:
t += 1
call_3 = self.TestPO3.x
self.assertNotEqual(call_2, call_3)
def test_dynamic_value_change_independent(self):
t12 = self.TestPO1()
t12.set_dynamic_time_fn(None)
self.assertNotEqual(t12.x, t12.x)
self.assertEqual(t12.y, t12.y)
def test_dynamic_value_change_disabled(self):
" time_fn set on the UniformRandom() when t13.y was set"
t13 = self.TestPO1()
t13.set_dynamic_time_fn(None)
t13.y = numbergen.UniformRandom()
self.assertNotEqual(t13.y, t13.y)
def test_dynamic_value_change_enabled(self):
" time_fn set on the UniformRandom() when t13.y was set"
t14 = self.TestPO1()
t14.y = numbergen.UniformRandom()
self.assertEqual(t14.y, t14.y)
def test_dynamic_time_fn_not_inherited(self):
" time_fn not inherited"
t15 = self.TestPO4()
t15.set_dynamic_time_fn(None)
with param.Dynamic.time_fn as t:
call_1 = t15.z.x
t += 1
call_2 = t15.z.x
self.assertNotEqual(call_1, call_2)
class TestDynamicSharedNumbergen(TestDynamicParameters):
"Check shared generator"
def setUp(self):
super(TestDynamicSharedNumbergen, self).setUp()
self.shared = numbergen.UniformRandom(lbound=-1,ubound=1,seed=20)
def test_dynamic_shared_numbergen(self):
param.Dynamic.time_dependent = True
t11 = self.TestPO1(x=self.shared)
t12 = self.TestPO1(x=self.shared)
with param.Dynamic.time_fn as t:
t += 1
call_1 = t11.x
self.assertEqual(call_1, t12.x)
t += 1
self.assertNotEqual(call_1, t12.x)
if __name__ == "__main__":
import nose
nose.runmodule()
# Commented out block in the original doctest version.
# Maybe these are features originally planned but never implemented
"""
It is not yet possible to set time_fn for a Parameter instance
>>> class TestPO5(param.Parameterized):
... x = param.Dynamic(default=numbergen.UniformRandom(),dynamic_time_fn=None)
"""
"""
We currently don't support iterators/generators in Dynamic unless
they're wrapped.
>>> i = iter([1,2,3])
>>> t11.x = i
>>> topo.sim.run(1)
>>> t11.x
1
>>> def gen():
... yield 2
... yield 4
... yield 6
>>> g = gen()
>>> t11.x = g
>>> t11.x
2
>>> topo.sim.run(1)
>>> t11.x
4
"""
|
|
#!/usr/bin/env python
# encoding: UTF-8
import asyncio
import datetime
import sqlite3
import unittest
import uuid
from cloudhands.burst.agent import message_handler
from cloudhands.burst.appliance import PreCheckAgent
from cloudhands.burst.appliance import PreDeleteAgent
from cloudhands.burst.appliance import PreOperationalAgent
from cloudhands.burst.appliance import PreProvisionAgent
from cloudhands.burst.appliance import ProvisioningAgent
from cloudhands.burst.appliance import PreStartAgent
from cloudhands.burst.appliance import PreStopAgent
import cloudhands.common
from cloudhands.common.connectors import Registry
from cloudhands.common.connectors import initialise
from cloudhands.common.schema import Appliance
from cloudhands.common.schema import CatalogueChoice
from cloudhands.common.schema import CatalogueItem
from cloudhands.common.schema import Component
from cloudhands.common.schema import IPAddress
from cloudhands.common.schema import Label
from cloudhands.common.schema import NATRouting
from cloudhands.common.schema import Node
from cloudhands.common.schema import Organisation
from cloudhands.common.schema import Provider
from cloudhands.common.schema import ProviderReport
from cloudhands.common.schema import ProviderToken
from cloudhands.common.schema import Registration
from cloudhands.common.schema import SoftwareDefinedNetwork
from cloudhands.common.schema import State
from cloudhands.common.schema import Subscription
from cloudhands.common.schema import Touch
from cloudhands.common.schema import User
from cloudhands.common.states import ApplianceState
from cloudhands.common.states import RegistrationState
class AgentTesting(unittest.TestCase):
def setUp(self):
""" Populate test database"""
session = Registry().connect(sqlite3, ":memory:").session
initialise(session)
session.add_all((
Organisation(
uuid=uuid.uuid4().hex,
name="TestOrg"),
Provider(
uuid=uuid.uuid4().hex,
name="cloudhands.jasmin.vcloud.phase04.cfg"
),
Registration(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__
),
User(
handle="Anon",
uuid=uuid.uuid4().hex
),
))
session.commit()
org = session.query(Organisation).one()
prvdr = session.query(Provider).one()
reg = session.query(Registration).one()
user = session.query(User).one()
session.add_all((
Subscription(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
provider=prvdr
),
CatalogueItem(
uuid=uuid.uuid4().hex,
name="Web Server",
description="Apache server VM",
note=None,
logo=None,
natrouted=True,
organisation=org,
),
CatalogueItem(
uuid=uuid.uuid4().hex,
name="File Server",
description="OpenSSH server VM",
note=None,
logo=None,
natrouted=False,
organisation=org,
)
))
session.commit()
valid = session.query(
RegistrationState).filter(
RegistrationState.name == "valid").one()
for val in (
"expiredexpiredexpiredexpired",
"validvalidvalidvalidvalidval"
):
now = datetime.datetime.utcnow()
act = Touch(artifact=reg, actor=user, state=valid, at=now)
token = ProviderToken(
touch=act, provider=prvdr,
key="T-Auth", value=val)
session.add(token)
session.commit()
def tearDown(self):
""" Every test gets its own in-memory database """
r = Registry()
r.disconnect(sqlite3, ":memory:")
class PreCheckAgentTesting(AgentTesting):
def test_handler_registration(self):
q = asyncio.Queue()
agent = PreCheckAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
self.assertEqual(
agent.touch_to_operational,
message_handler.dispatch(PreCheckAgent.CheckedAsOperational)
)
self.assertEqual(
agent.touch_to_preoperational,
message_handler.dispatch(PreCheckAgent.CheckedAsPreOperational)
)
self.assertEqual(
agent.touch_to_provisioning,
message_handler.dispatch(PreCheckAgent.CheckedAsProvisioning)
)
def setup_appliance_check(self):
session = Registry().connect(sqlite3, ":memory:").session
# 0. Set up User
user = session.query(User).one()
org = session.query(Organisation).one()
# 1. User creates new appliances
now = datetime.datetime.utcnow()
then = now - datetime.timedelta(seconds=45)
requested = session.query(ApplianceState).filter(
ApplianceState.name == "requested").one()
apps = (
Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org),
Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org),
)
acts = (
Touch(artifact=apps[0], actor=user, state=requested, at=then),
Touch(artifact=apps[1], actor=user, state=requested, at=now)
)
tmplt = session.query(CatalogueItem).first()
choices = (
CatalogueChoice(
provider=None, touch=acts[0], natrouted=True,
**{k: getattr(tmplt, k, None)
for k in ("name", "description", "logo")}),
CatalogueChoice(
provider=None, touch=acts[1], natrouted=True,
**{k: getattr(tmplt, k, None)
for k in ("name", "description", "logo")})
)
session.add_all(choices)
session.commit()
now = datetime.datetime.utcnow()
then = now - datetime.timedelta(seconds=45)
configuring = session.query(ApplianceState).filter(
ApplianceState.name == "configuring").one()
acts = (
Touch(artifact=apps[0], actor=user, state=configuring, at=then),
Touch(artifact=apps[1], actor=user, state=configuring, at=now)
)
session.add_all(acts)
session.commit()
self.assertEqual(
2, session.query(Touch).join(Appliance).filter(
Appliance.id == apps[1].id).count())
# 2. One Appliance is configured interactively by user
latest = apps[1].changes[-1]
now = datetime.datetime.utcnow()
act = Touch(
artifact=apps[1], actor=user, state=latest.state, at=now)
label = Label(
name="test_server01",
description="This is just for kicking tyres",
touch=act)
session.add(label)
session.commit()
self.assertEqual(
3, session.query(Touch).join(Appliance).filter(
Appliance.id == apps[1].id).count())
# 3. Skip to provisioning
now = datetime.datetime.utcnow()
preprovision = session.query(ApplianceState).filter(
ApplianceState.name == "provisioning").one()
session.add(
Touch(
artifact=apps[1],
actor=user,
state=preprovision, at=now))
# 4. Schedule for check
now = datetime.datetime.utcnow()
precheck = session.query(ApplianceState).filter(
ApplianceState.name == "pre_check").one()
session.add(
Touch(
artifact=apps[1],
actor=user,
state=precheck, at=now))
session.add(act)
session.commit()
q = PreCheckAgent.queue(None, None, loop=None)
agent = PreCheckAgent(q, args=None, config=None)
jobs = list(agent.jobs(session))
self.assertEqual(1, len(jobs))
q.put_nowait(jobs[0])
self.assertEqual(1, q.qsize())
return q
def test_job_query_and_transmit(self):
q = self.setup_appliance_check()
job = q.get_nowait()
self.assertEqual(5, len(job.artifact.changes))
self.assertEqual(3, len(job.token))
def test_job_has_latest_creds(self):
q = self.setup_appliance_check()
job = q.get_nowait()
self.assertIn("valid", job.token[2])
def test_queue_creation(self):
self.assertIsInstance(
PreCheckAgent.queue(None, None, loop=None),
asyncio.Queue
)
def test_operational_msg_dispatch_and_touch(self):
session = Registry().connect(sqlite3, ":memory:").session
org = session.query(Organisation).one()
user = session.query(User).one()
now = datetime.datetime.utcnow()
requested = session.query(ApplianceState).filter(
ApplianceState.name == "requested").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=requested, at=now)
session.add(act)
session.commit()
self.assertEqual(0, session.query(ProviderReport).count())
q = PreCheckAgent.queue(None, None, loop=None)
agent = PreCheckAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
msg = PreCheckAgent.CheckedAsOperational(
app.uuid, datetime.datetime.utcnow(),
"cloudhands.jasmin.vcloud.phase04.cfg",
"192.168.2.1", "deployed", "on", None)
rv = message_handler(msg, session)
self.assertIsInstance(rv, Touch)
self.assertEqual(1, session.query(ProviderReport).count())
report = session.query(ProviderReport).one()
self.assertEqual(report.creation, "deployed")
self.assertEqual(report.power, "on")
self.assertEqual("operational", app.changes[-1].state.name)
def test_preoperational_msg_dispatch_and_touch(self):
session = Registry().connect(sqlite3, ":memory:").session
user = session.query(User).one()
org = session.query(Organisation).one()
now = datetime.datetime.utcnow()
requested = session.query(ApplianceState).filter(
ApplianceState.name == "requested").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=requested, at=now)
session.add(act)
session.commit()
self.assertEqual(0, session.query(ProviderReport).count())
q = PreCheckAgent.queue(None, None, loop=None)
agent = PreCheckAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
msg = PreCheckAgent.CheckedAsPreOperational(
app.uuid, datetime.datetime.utcnow(),
"cloudhands.jasmin.vcloud.phase04.cfg",
"192.168.2.1", "deployed", "off", None)
rv = message_handler(msg, session)
self.assertIsInstance(rv, Touch)
self.assertEqual(1, session.query(ProviderReport).count())
report = session.query(ProviderReport).one()
self.assertEqual(report.creation, "deployed")
self.assertEqual(report.power, "off")
self.assertEqual("pre_operational", app.changes[-1].state.name)
class PreDeleteAgentTesting(AgentTesting):
def test_handler_registration(self):
q = asyncio.Queue()
agent = PreDeleteAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
self.assertEqual(
agent.touch_to_deleted,
message_handler.dispatch(PreDeleteAgent.Message)
)
def test_queue_creation(self):
self.assertIsInstance(
PreDeleteAgent.queue(None, None, loop=None),
asyncio.Queue
)
def test_msg_dispatch_and_touch(self):
session = Registry().connect(sqlite3, ":memory:").session
user = session.query(User).one()
org = session.query(Organisation).one()
now = datetime.datetime.utcnow()
operational = session.query(ApplianceState).filter(
ApplianceState.name == "operational").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=operational, at=now)
session.add(act)
session.commit()
self.assertEqual(0, session.query(ProviderReport).count())
q = PreDeleteAgent.queue(None, None, loop=None)
agent = PreDeleteAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
msg = PreDeleteAgent.Message(
app.uuid, datetime.datetime.utcnow(),
"cloudhands.jasmin.vcloud.phase04.cfg")
rv = message_handler(msg, session)
self.assertIsInstance(rv, Touch)
self.assertEqual("deleted", app.changes[-1].state.name)
class PreOperationalAgentTesting(AgentTesting):
def test_handler_registration(self):
q = asyncio.Queue()
agent = PreOperationalAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
self.assertEqual(
agent.touch_to_operational,
message_handler.dispatch(PreOperationalAgent.OperationalMessage)
)
self.assertEqual(
agent.touch_to_prestop,
message_handler.dispatch(
PreOperationalAgent.ResourceConstrainedMessage)
)
def test_queue_creation(self):
self.assertIsInstance(
ProvisioningAgent.queue(None, None, loop=None),
asyncio.Queue
)
def test_msg_dispatch_and_touch(self):
session = Registry().connect(sqlite3, ":memory:").session
user = session.query(User).one()
org = session.query(Organisation).one()
now = datetime.datetime.utcnow()
requested = session.query(ApplianceState).filter(
ApplianceState.name == "requested").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=requested, at=now)
session.add(act)
session.commit()
self.assertEqual(0, session.query(NATRouting).count())
q = PreOperationalAgent.queue(None, None, loop=None)
agent = PreOperationalAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
msg = PreOperationalAgent.OperationalMessage(
app.uuid, datetime.datetime.utcnow(),
"cloudhands.jasmin.vcloud.phase04.cfg",
"192.168.2.1",
"172.16.151.166")
rv = message_handler(msg, session)
self.assertIsInstance(rv, Touch)
self.assertEqual(1, session.query(NATRouting).count())
self.assertEqual("operational", app.changes[-1].state.name)
class PreProvisionAgentTesting(AgentTesting):
def test_handler_registration(self):
q = asyncio.Queue()
agent = PreProvisionAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
self.assertEqual(
agent.touch_to_provisioning,
message_handler.dispatch(PreProvisionAgent.Message)
)
def test_queue_creation(self):
self.assertIsInstance(
PreProvisionAgent.queue(None, None, loop=None),
asyncio.Queue
)
def setup_appliance(self):
session = Registry().connect(sqlite3, ":memory:").session
# 0. Set up User
user = session.query(User).one()
org = session.query(Organisation).one()
# 1. User creates new appliances
now = datetime.datetime.utcnow()
then = now - datetime.timedelta(seconds=45)
requested = session.query(ApplianceState).filter(
ApplianceState.name == "requested").one()
apps = (
Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org),
Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org),
)
acts = (
Touch(artifact=apps[0], actor=user, state=requested, at=then),
Touch(artifact=apps[1], actor=user, state=requested, at=now)
)
tmplt = session.query(CatalogueItem).first()
choices = (
CatalogueChoice(
provider=None, touch=acts[0], natrouted=True,
**{k: getattr(tmplt, k, None)
for k in ("name", "description", "logo")}),
CatalogueChoice(
provider=None, touch=acts[1], natrouted=False,
**{k: getattr(tmplt, k, None)
for k in ("name", "description", "logo")})
)
session.add_all(choices)
session.commit()
now = datetime.datetime.utcnow()
then = now - datetime.timedelta(seconds=45)
configuring = session.query(ApplianceState).filter(
ApplianceState.name == "configuring").one()
acts = (
Touch(artifact=apps[0], actor=user, state=configuring, at=then),
Touch(artifact=apps[1], actor=user, state=configuring, at=now)
)
session.add_all(acts)
session.commit()
self.assertEqual(
2, session.query(Touch).join(Appliance).filter(
Appliance.id == apps[1].id).count())
# 2. One Appliance is configured interactively by user
latest = apps[1].changes[-1]
now = datetime.datetime.utcnow()
act = Touch(
artifact=apps[1], actor=user, state=latest.state, at=now)
label = Label(
name="test_server01",
description="This is just for kicking tyres",
touch=act)
session.add(label)
session.commit()
self.assertEqual(
3, session.query(Touch).join(Appliance).filter(
Appliance.id == apps[1].id).count())
# 3. When user is happy, clicks 'Go'
now = datetime.datetime.utcnow()
preprovision = session.query(ApplianceState).filter(
ApplianceState.name == "pre_provision").one()
act = Touch(
artifact=apps[1], actor=user, state=preprovision, at=now)
session.add(act)
session.commit()
q = PreProvisionAgent.queue(None, None, loop=None)
agent = PreProvisionAgent(q, args=None, config=None)
jobs = list(agent.jobs(session))
self.assertEqual(1, len(jobs))
q.put_nowait(jobs[0])
self.assertEqual(1, q.qsize())
return q
def test_job_query_and_transmit(self):
q = self.setup_appliance()
job = q.get_nowait()
self.assertEqual(4, len(job.artifact.changes))
self.assertEqual(3, len(job.token))
return q
def test_job_has_latest_creds(self):
q = self.setup_appliance()
job = q.get_nowait()
self.assertIn("valid", job.token[2])
def test_msg_dispatch_and_touch(self):
session = Registry().connect(sqlite3, ":memory:").session
user = session.query(User).one()
org = session.query(Organisation).one()
now = datetime.datetime.utcnow()
requested = session.query(ApplianceState).filter(
ApplianceState.name == "requested").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=requested, at=now)
session.add(act)
session.commit()
self.assertEqual(0, session.query(Node).count())
q = PreProvisionAgent.queue(None, None, loop=None)
agent = PreProvisionAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
msg = PreProvisionAgent.Message(
app.uuid, datetime.datetime.utcnow(),
"cloudhands.jasmin.vcloud.phase04.cfg",
"https://vjasmin-vcloud-test.jc.rl.ac.uk/api/vApp/"
"vapp-a24617ae-7af0-4e83-92db-41e081b67102")
rv = message_handler(msg, session)
self.assertIsInstance(rv, Touch)
self.assertEqual(1, session.query(Node).count())
self.assertEqual("provisioning", app.changes[-1].state.name)
class ProvisioningAgentTesting(AgentTesting):
def test_handler_registration(self):
q = asyncio.Queue()
agent = ProvisioningAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
self.assertEqual(
agent.touch_to_precheck,
message_handler.dispatch(ProvisioningAgent.Message)
)
def test_queue_creation(self):
self.assertIsInstance(
ProvisioningAgent.queue(None, None, loop=None),
asyncio.Queue
)
def test_msg_dispatch_and_touch(self):
session = Registry().connect(sqlite3, ":memory:").session
user = session.query(User).one()
org = session.query(Organisation).one()
now = datetime.datetime.utcnow()
requested = session.query(ApplianceState).filter(
ApplianceState.name == "requested").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=requested, at=now)
session.add(act)
session.commit()
q = ProvisioningAgent.queue(None, None, loop=None)
agent = PreProvisionAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
msg = ProvisioningAgent.Message(
app.uuid, datetime.datetime.utcnow())
rv = message_handler(msg, session)
self.assertIsInstance(rv, Touch)
self.assertEqual("pre_check", app.changes[-1].state.name)
def test_job_query_and_transmit(self):
session = Registry().connect(sqlite3, ":memory:").session
# 0. Set up User
user = session.query(User).one()
org = session.query(Organisation).one()
# 1. User creates new appliances
now = datetime.datetime.utcnow()
then = now - datetime.timedelta(seconds=45)
requested = session.query(ApplianceState).filter(
ApplianceState.name == "requested").one()
apps = (
Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org),
Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org),
)
acts = (
Touch(artifact=apps[0], actor=user, state=requested, at=then),
Touch(artifact=apps[1], actor=user, state=requested, at=now)
)
tmplt = session.query(CatalogueItem).first()
choices = (
CatalogueChoice(
provider=None, touch=acts[0], natrouted=True,
**{k: getattr(tmplt, k, None)
for k in ("name", "description", "logo")}),
CatalogueChoice(
provider=None, touch=acts[1], natrouted=True,
**{k: getattr(tmplt, k, None)
for k in ("name", "description", "logo")})
)
session.add_all(choices)
session.commit()
now = datetime.datetime.utcnow()
then = now - datetime.timedelta(seconds=45)
provisioning = session.query(ApplianceState).filter(
ApplianceState.name == "provisioning").one()
acts = (
Touch(artifact=apps[0], actor=user, state=provisioning, at=then),
Touch(artifact=apps[1], actor=user, state=provisioning, at=now)
)
session.add_all(acts)
session.commit()
self.assertEqual(
2, session.query(Touch).join(Appliance).filter(
Appliance.id == apps[1].id).count())
q = ProvisioningAgent.queue(None, None, loop=None)
agent = ProvisioningAgent(q, args=None, config=None)
jobs = list(agent.jobs(session))
self.assertEqual(1, len(jobs))
self.assertEqual(apps[0].uuid, jobs[0].uuid)
class PreStartAgentTesting(AgentTesting):
def test_handler_registration(self):
q = asyncio.Queue()
agent = PreStartAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
self.assertEqual(
agent.touch_to_running,
message_handler.dispatch(PreStartAgent.Message)
)
def test_queue_creation(self):
self.assertIsInstance(
PreStartAgent.queue(None, None, loop=None),
asyncio.Queue
)
def test_msg_dispatch_and_touch(self):
session = Registry().connect(sqlite3, ":memory:").session
user = session.query(User).one()
org = session.query(Organisation).one()
now = datetime.datetime.utcnow()
operational = session.query(ApplianceState).filter(
ApplianceState.name == "operational").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=operational, at=now)
session.add(act)
session.commit()
self.assertEqual(0, session.query(ProviderReport).count())
q = PreStartAgent.queue(None, None, loop=None)
agent = PreStartAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
msg = PreStartAgent.Message(
app.uuid, datetime.datetime.utcnow(),
"cloudhands.jasmin.vcloud.phase04.cfg")
rv = message_handler(msg, session)
self.assertIsInstance(rv, Touch)
self.assertEqual("running", app.changes[-1].state.name)
class PreStopAgentTesting(AgentTesting):
def test_handler_registration(self):
q = asyncio.Queue()
agent = PreStopAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
self.assertEqual(
agent.touch_to_stopped,
message_handler.dispatch(PreStopAgent.Message)
)
def test_queue_creation(self):
self.assertIsInstance(
PreStopAgent.queue(None, None, loop=None),
asyncio.Queue
)
def test_msg_dispatch_and_touch(self):
session = Registry().connect(sqlite3, ":memory:").session
user = session.query(User).one()
org = session.query(Organisation).one()
now = datetime.datetime.utcnow()
operational = session.query(ApplianceState).filter(
ApplianceState.name == "operational").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=operational, at=now)
session.add(act)
session.commit()
self.assertEqual(0, session.query(ProviderReport).count())
q = PreStopAgent.queue(None, None, loop=None)
agent = PreStopAgent(q, args=None, config=None)
for typ, handler in agent.callbacks:
message_handler.register(typ, handler)
msg = PreStopAgent.Message(
app.uuid, datetime.datetime.utcnow(),
"cloudhands.jasmin.vcloud.phase04.cfg")
rv = message_handler(msg, session)
self.assertIsInstance(rv, Touch)
self.assertEqual("stopped", app.changes[-1].state.name)
class ApplianceTesting(AgentTesting):
def test_appliance_lifecycle(self):
session = Registry().connect(sqlite3, ":memory:").session
# 0. Set up User
org = session.query(Organisation).one()
user = session.query(User).one()
# 1. User creates a new appliance
now = datetime.datetime.utcnow()
requested = session.query(ApplianceState).filter(
ApplianceState.name == "requested").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=requested, at=now)
tmplt = session.query(CatalogueItem).first()
choice = CatalogueChoice(
provider=None, touch=act, natrouted=True,
**{k: getattr(tmplt, k, None)
for k in ("name", "description", "logo")})
session.add(choice)
session.commit()
self.assertEqual(
1, session.query(CatalogueChoice).join(Touch).join(
Appliance).filter(Appliance.id == app.id).count())
now = datetime.datetime.utcnow()
configuring = session.query(ApplianceState).filter(
ApplianceState.name == "configuring").one()
act = Touch(artifact=app, actor=user, state=configuring, at=now)
session.add(act)
session.commit()
self.assertEqual(
2, session.query(Touch).join(Appliance).filter(
Appliance.id == app.id).count())
# 2. Appliance persists and is configured interactively by user
latest = app.changes[-1]
now = datetime.datetime.utcnow()
act = Touch(
artifact=app, actor=user, state=latest.state, at=now)
label = Label(
name="test_server01",
description="This is just for kicking tyres",
touch=act)
session.add(label)
session.commit()
self.assertEqual(
3, session.query(Touch).join(Appliance).filter(
Appliance.id == app.id).count())
# 3. When user is happy, clicks 'Go'
now = datetime.datetime.utcnow()
preprovision = session.query(ApplianceState).filter(
ApplianceState.name == "pre_provision").one()
act = Touch(
artifact=app, actor=user, state=preprovision, at=now)
session.add(act)
session.commit()
self.assertEqual(
4, session.query(Touch).join(Appliance).filter(
Appliance.id == app.id).count())
# 4. Burst controller finds hosts in 'pre_provision' and actions them
latest = (h.changes[-1] for h in session.query(Appliance).all())
jobs = [
(t.actor, t.artifact) for t in latest
if t.state is preprovision]
self.assertIn((user, app), jobs)
now = datetime.datetime.utcnow()
provisioning = session.query(ApplianceState).filter(
ApplianceState.name == "provisioning").one()
app.changes.append(
Touch(artifact=app, actor=user, state=provisioning, at=now))
session.commit()
# 5. Burst controller raises a node
now = datetime.datetime.utcnow()
provider = session.query(Provider).one()
act = Touch(artifact=app, actor=user, state=provisioning, at=now)
label = session.query(Label).join(Touch).join(Appliance).filter(
Appliance.id == app.id).first()
node = Node(name=label.name, touch=act, provider=provider)
sdn = SoftwareDefinedNetwork(name="bridge_routed_external", touch=act)
session.add_all((sdn, node))
session.commit()
# 6. Burst controller allocates an IP
now = datetime.datetime.utcnow()
act = Touch(artifact=app, actor=user, state=provisioning, at=now)
app.changes.append(act)
ip = IPAddress(value="192.168.1.4", touch=act, provider=provider)
session.add(ip)
self.assertIn(act, session)
session.commit()
# 7. Burst controller marks Host as pre_operational
now = datetime.datetime.utcnow()
preoperational = session.query(ApplianceState).filter(
ApplianceState.name == "pre_operational").one()
app.changes.append(
Touch(artifact=app, actor=user, state=preoperational, at=now))
# 8. Recovering details of provisioning of this host
resources = [r for i in session.query(Touch).filter(
Touch.artifact == app).all() for r in i.resources]
self.assertIn(node, resources)
self.assertIn(sdn, resources)
self.assertIn(ip, resources)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# CAVEAT UTILITOR
#
# This file was automatically generated by Grako.
#
# https://pypi.python.org/pypi/grako/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import print_function, division, absolute_import, unicode_literals
from grako.parsing import graken, Parser
from grako.util import re, RE_FLAGS # noqa
__version__ = (2016, 6, 22, 22, 24, 59, 2)
__all__ = [
'text2kwlParser',
'text2kwlSemantics',
'main'
]
class text2kwlParser(Parser):
def __init__(self,
whitespace=None,
nameguard=None,
comments_re=None,
eol_comments_re=None,
ignorecase=None,
left_recursion=True,
**kwargs):
super(text2kwlParser, self).__init__(
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
left_recursion=left_recursion,
**kwargs
)
@graken()
def _text2kwl_(self):
def block1():
self._sentence_()
self._closure(block1)
self.ast['@'] = self.last_node
@graken()
def _sentence_(self):
with self._choice():
with self._option():
self._sentence_()
self._join_()
self._sentence_()
with self._option():
self._command_()
with self._option():
self._question_()
with self._option():
self._statement_()
with self._option():
self._expression_()
self._error('no available options')
@graken()
def _statement_(self):
self._expression_()
self.ast['@'] = self.last_node
self._token('.')
@graken()
def _command_(self):
self._expression_()
self.ast['@'] = self.last_node
self._token('!')
@graken()
def _question_(self):
self._expression_()
self.ast['@'] = self.last_node
self._token('?')
@graken()
def _expression_(self):
with self._choice():
with self._option():
self._conjunction_()
with self._option():
self._clause_()
with self._option():
self._triple_()
with self._option():
self._tuple_()
with self._option():
self._singleton_()
with self._option():
self._function_()
with self._option():
self._n_p_()
with self._option():
self._raw_()
with self._option():
pass
self._error('no available options')
@graken()
def _conjunction2_(self):
with self._choice():
with self._option():
self._conjunction_()
with self._option():
self._token('{')
self._conjunction_()
self.ast['@'] = self.last_node
self._token('}')
self._error('no available options')
@graken()
def _conjunction_(self):
with self._choice():
with self._option():
self._entry_()
self._join_()
self._entry_()
with self._option():
self._ifthen_()
with self._option():
self._token('{')
self._ifthen_()
self.ast['@'] = self.last_node
self._token('}')
self._error('no available options')
@graken()
def _ifthen_(self):
self._token('if')
self._entry_()
self.ast['@'] = self.last_node
self._token('then')
self.ast['@'] = self.last_node
self._entry_()
self.ast['@'] = self.last_node
@graken()
def _triple_(self):
self._determiner_()
self._adjective_()
self._noun_()
@graken()
def _clause_(self):
with self._choice():
with self._option():
self._subject_verb_object_()
with self._option():
self._verb_object_()
with self._option():
self._pronoun_()
self._verb_()
self._n_p_()
with self._option():
self._verb_()
self._noun_()
self._error('no available options')
@graken()
def _subject_verb_object_(self):
with self._choice():
with self._option():
self._n_p_()
self.ast['subject'] = self.last_node
self._action_()
self.ast['verb'] = self.last_node
self._n_p_()
self.ast['object'] = self.last_node
with self._option():
self._n_p_()
self.ast['subject'] = self.last_node
self._action_()
self.ast['verb'] = self.last_node
self._preposition_p_()
self.ast['object'] = self.last_node
self._error('no available options')
self.ast._define(
['subject', 'verb', 'object'],
[]
)
@graken()
def _subject_verb_(self):
self._n_p_()
self.ast['subject'] = self.last_node
self._action_()
self.ast['verb'] = self.last_node
self.ast._define(
['subject', 'verb'],
[]
)
@graken()
def _verb_object_(self):
with self._choice():
with self._option():
self._action_()
self.ast['verb'] = self.last_node
self._n_p_()
self.ast['object'] = self.last_node
with self._option():
self._action_()
self.ast['verb'] = self.last_node
self._preposition_p_()
self.ast['object'] = self.last_node
self._error('no available options')
self.ast._define(
['verb', 'object'],
[]
)
@graken()
def _preposition_p_(self):
self._preposition_()
self._n_p_()
@graken()
def _n_p_(self):
with self._choice():
with self._option():
self._tuple_()
with self._option():
self._plural_()
with self._option():
self._title_()
with self._option():
self._noun_()
with self._option():
self._pronoun_()
with self._option():
self._adjective_()
self._error('no available options')
@graken()
def _function_(self):
with self._choice():
with self._option():
self._word_()
with self._option():
self._token('{')
self._word_()
self.ast['@'] = self.last_node
self._token('}')
self._error('no available options')
@graken()
def _word_(self):
with self._choice():
with self._option():
self._function_()
self.ast['t'] = self.last_node
self._token('(')
self._args_()
self.ast['v'] = self.last_node
self._token(')')
with self._option():
self._function_()
self.ast['t'] = self.last_node
self._token(':')
self._token_()
self.ast['v'] = self.last_node
self._error('no available options')
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _args_(self):
with self._choice():
with self._option():
self._singleton_()
self._token(',')
self._singleton_()
with self._option():
self._singleton_()
with self._option():
pass
self._error('no available options')
@graken()
def _plural_(self):
self._token('plural')
self.ast['t'] = self.last_node
self._token('(')
self._noun_()
self.ast['v'] = self.last_node
self._token(')')
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _title_(self):
self._token('title')
self.ast['t'] = self.last_node
self._token('(')
self._entry_()
self.ast['v'] = self.last_node
self._token(')')
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _tuple_(self):
with self._choice():
with self._option():
self._determiner_()
self._noun_()
with self._option():
self._adjective_()
self._noun_()
with self._option():
self._possessive_()
self._noun_()
with self._option():
self._raw_()
self._noun_()
with self._option():
self._noun_()
self._raw_()
with self._option():
self._noun_()
self._noun_()
with self._option():
self._raw_()
self._raw_()
self._error('no available options')
@graken()
def _singleton_(self):
with self._choice():
with self._option():
self._entry_()
with self._option():
self._action_()
self._error('no available options')
@graken()
def _action_(self):
with self._choice():
with self._option():
self._conjugated_verb_()
with self._option():
self._verb_()
self._error('no available options')
@graken()
def _conjugated_verb_(self):
with self._choice():
with self._option():
self._tenses_()
self._token('(')
self._conjugations_()
self._token('(')
self._verb_()
self._token('))')
with self._option():
self._tenses_()
self._token('(')
self._conjugations_()
self._token('(')
self._tuple_verb_()
self._token('))')
self._error('no available options')
@graken()
def _tuple_verb_(self):
self._verb_()
self.ast['@'] = self.last_node
self._adverb_()
self.ast['@'] = self.last_node
@graken()
def _join_(self):
with self._choice():
with self._option():
self._token('and')
with self._option():
self._token(':')
with self._option():
self._token(';')
with self._option():
self._token(',')
with self._option():
self._token('of')
with self._option():
self._token('or')
with self._option():
self._token('so')
with self._option():
self._token('then')
with self._option():
self._token('when')
self._error('expecting one of: , : ; and of or so then when')
@graken()
def _formatting_(self):
with self._choice():
with self._option():
self._token('defn')
with self._option():
self._token('plural')
with self._option():
self._token('quote')
with self._option():
self._token('sample')
with self._option():
self._token('title')
self._error('expecting one of: defn plural quote sample title')
@graken()
def _conjugations_(self):
with self._choice():
with self._option():
self._token('je')
with self._option():
self._token('tu')
with self._option():
self._token('il')
with self._option():
self._token('elle')
with self._option():
self._token('nous')
with self._option():
self._token('vous')
with self._option():
self._token('ils')
with self._option():
self._token('elles')
self._error('expecting one of: elle elles il ils je nous tu vous')
@graken()
def _tenses_(self):
with self._choice():
with self._option():
self._token('cmd')
with self._option():
self._token('done_tdy')
with self._option():
self._token('done_tmw')
with self._option():
self._token('done_ydy')
with self._option():
self._token('not_tdy')
with self._option():
self._token('not_tmw')
with self._option():
self._token('not_ydy')
with self._option():
self._token('now_tdy')
with self._option():
self._token('now_tmw')
with self._option():
self._token('now_ydy')
with self._option():
self._token('tdy')
with self._option():
self._token('tmw')
with self._option():
self._token('ydy')
self._error('expecting one of: cmd done_tdy done_tmw done_ydy not_tdy not_tmw not_ydy now_tdy now_tmw now_ydy tdy tmw ydy')
@graken()
def _adjective_(self):
self._token('adj')
self.ast['t'] = self.last_node
self._token(':')
self._token_()
self.ast['v'] = self.last_node
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _adverb_(self):
self._token('adv')
self.ast['t'] = self.last_node
self._token(':')
self._token_()
self.ast['v'] = self.last_node
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _determiner_(self):
self._token('det')
self.ast['t'] = self.last_node
self._token(':')
self._token_()
self.ast['v'] = self.last_node
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _noun_(self):
self._token('nom')
self.ast['t'] = self.last_node
self._token(':')
self._token_()
self.ast['v'] = self.last_node
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _possessive_(self):
self._token('pos')
self.ast['t'] = self.last_node
self._token(':')
self._token_()
self.ast['v'] = self.last_node
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _preposition_(self):
self._token('pre')
self.ast['t'] = self.last_node
self._token(':')
self._token_()
self.ast['v'] = self.last_node
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _pronoun_(self):
self._token('pro')
self.ast['t'] = self.last_node
self._token(':')
self._token_()
self.ast['v'] = self.last_node
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _raw_(self):
with self._choice():
with self._option():
self._pattern(r'raw\((.*?)\)')
with self._option():
self._pattern(r'date\((.*?)\)')
self._error('expecting one of: date\\((.*?)\\) raw\\((.*?)\\)')
@graken()
def _verb_(self):
self._token('act')
self.ast['t'] = self.last_node
self._token(':')
self._token_()
self.ast['v'] = self.last_node
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _entry_(self):
self._pos_()
self.ast['t'] = self.last_node
self._token(':')
self._token_()
self.ast['v'] = self.last_node
self.ast._define(
['t', 'v'],
[]
)
@graken()
def _pos_(self):
with self._choice():
with self._option():
self._token('act')
with self._option():
self._token('adj')
with self._option():
self._token('adv')
with self._option():
self._token('det')
with self._option():
self._token('exc')
with self._option():
self._token('kg')
with self._option():
self._token('nom')
with self._option():
self._token('pos')
with self._option():
self._token('pre')
with self._option():
self._token('pro')
with self._option():
self._token('sci')
self._error('expecting one of: act adj adv det exc kg nom pos pre pro sci')
@graken()
def _token_(self):
self._pattern(r'[a-zA-Z0-9#]*')
class text2kwlSemantics(object):
def text2kwl(self, ast):
return ast
def sentence(self, ast):
return ast
def statement(self, ast):
return ast
def command(self, ast):
return ast
def question(self, ast):
return ast
def expression(self, ast):
return ast
def conjunction2(self, ast):
return ast
def conjunction(self, ast):
return ast
def ifthen(self, ast):
return ast
def triple(self, ast):
return ast
def clause(self, ast):
return ast
def subject_verb_object(self, ast):
return ast
def subject_verb(self, ast):
return ast
def verb_object(self, ast):
return ast
def preposition_p(self, ast):
return ast
def n_p(self, ast):
return ast
def function(self, ast):
return ast
def word(self, ast):
return ast
def args(self, ast):
return ast
def plural(self, ast):
return ast
def title(self, ast):
return ast
def tuple(self, ast):
return ast
def singleton(self, ast):
return ast
def action(self, ast):
return ast
def conjugated_verb(self, ast):
return ast
def tuple_verb(self, ast):
return ast
def join(self, ast):
return ast
def formatting(self, ast):
return ast
def conjugations(self, ast):
return ast
def tenses(self, ast):
return ast
def adjective(self, ast):
return ast
def adverb(self, ast):
return ast
def determiner(self, ast):
return ast
def noun(self, ast):
return ast
def possessive(self, ast):
return ast
def preposition(self, ast):
return ast
def pronoun(self, ast):
return ast
def raw(self, ast):
return ast
def verb(self, ast):
return ast
def entry(self, ast):
return ast
def pos(self, ast):
return ast
def token(self, ast):
return ast
def main(filename, startrule, trace=False, whitespace=None, nameguard=None):
import json
with open(filename) as f:
text = f.read()
parser = text2kwlParser(parseinfo=False)
ast = parser.parse(
text,
startrule,
filename=filename,
trace=trace,
whitespace=whitespace,
nameguard=nameguard)
print('AST:')
print(ast)
print()
print('JSON:')
print(json.dumps(ast, indent=2))
print()
if __name__ == '__main__':
import argparse
import string
import sys
class ListRules(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
print('Rules:')
for r in text2kwlParser.rule_list():
print(r)
print()
sys.exit(0)
parser = argparse.ArgumentParser(description="Simple parser for text2kwl.")
parser.add_argument('-l', '--list', action=ListRules, nargs=0,
help="list all rules and exit")
parser.add_argument('-n', '--no-nameguard', action='store_true',
dest='no_nameguard',
help="disable the 'nameguard' feature")
parser.add_argument('-t', '--trace', action='store_true',
help="output trace information")
parser.add_argument('-w', '--whitespace', type=str, default=string.whitespace,
help="whitespace specification")
parser.add_argument('file', metavar="FILE", help="the input file to parse")
parser.add_argument('startrule', metavar="STARTRULE",
help="the start rule for parsing")
args = parser.parse_args()
main(
args.file,
args.startrule,
trace=args.trace,
whitespace=args.whitespace,
nameguard=not args.no_nameguard
)
|
|
# -*- coding: utf-8 -*-
from __future__ import division
import click
import os
import collections
import contextlib
import signal
import swiftclient
import keystoneclient
import subprocess32 as subprocess
import copy
import sqlalchemy
AUTH_VERSION = 2
# An object which we can use to pass state
# from the group to subcommands.
State = collections.namedtuple('state', ('verbose', 'engine'))
# Fall back to the usual sigpipe behaviour.
# This fixes cases of weird errors when you pipe to less.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def echo_error(message):
"""A utility function to print the message to stderr."""
click.secho(message, fg='red', err=True)
# ====================================
# Database Definitions
# ====================================
DEFAULT_DB_URL = 'sqlite:///swiftbulkuploader.db'
metadata = sqlalchemy.MetaData()
paths = sqlalchemy.Table('paths', metadata,
sqlalchemy.Column('id', sqlalchemy.Integer,
sqlalchemy.Sequence('paths_id_seq'),
primary_key=True),
sqlalchemy.Column('path', sqlalchemy.Text()),
sqlalchemy.Column('pathtype', sqlalchemy.Enum('directory',
'file')),
sqlalchemy.Column('accessible', sqlalchemy.Boolean),
sqlalchemy.Column('status', sqlalchemy.Enum('unprocessed',
'processed',
'error')),
sqlalchemy.Column('bytes', sqlalchemy.BigInteger),
sqlalchemy.Column('objectname', sqlalchemy.Text),
)
# ====================================
# Main Command Group
# ====================================
@click.group()
@click.option('--verbose', default=False, is_flag=True,
help='Enable verbose output.')
@click.option('--db-url', default=DEFAULT_DB_URL,
help='The sqlalchemy database URL to use. Default: {}'.format(
DEFAULT_DB_URL))
@click.pass_context
def cli(ctx, verbose, db_url):
if verbose:
click.echo('Verbose mode on.')
try:
engine = sqlalchemy.create_engine(db_url, echo=verbose)
metadata.create_all(engine)
if verbose: click.echo("Created table 'paths' if it did not exist.")
ctx.obj = State(verbose, engine)
except sqlalchemy.exc.SQLAlchemyError as e:
echo_error('Unable to connect to {}.'.format(db_url))
echo_error('{}.'.format(e))
raise click.Abort
# ====================================
# Prepare Subcommand
# ====================================
@cli.command(short_help='Add file paths from directories.')
@click.argument('directories', type=click.Path(exists=True,
dir_okay=True, readable=True, resolve_path=True),
nargs=-1)
@click.option('--cleanup',
help=('Remove the parent directory path '
'from the completed file name in Swift. '
'For example: /home/archivist/a1/b2/c3/file.txt '
'found under the provided path /home/archivist/a1/b2/c3 '
'would be found in Swift with the filename file.txt.'),
default=False, is_flag=True)
@click.pass_context
def prepare(ctx, directories, cleanup):
"""Add the file paths of files in the given directories to the database.
This command does not follow symbolic links to directories,
to avoid infinite recursion.
Any files which cannot be read or directories which cannot
be entered are stored as well, for later review.
"""
if ctx.obj.verbose and len(directories) == 0:
click.echo('No directories specified!')
# Create a closure which accepts an error thrown by os.listdir()
# if there's an error during os.walk()
def error_catch(error):
if ctx.obj.verbose:
echo_error('Error accessing {}'.format(error.filename))
pathtype = 'directory' if os.path.isdir(error.filename) else 'file'
with ctx.obj.engine.begin() as transaction:
exists = transaction.execute(
sqlalchemy.select([paths.c.id]).\
where(paths.c.path == error.filename)).\
fetchone()
if exists == None:
transaction.execute(paths.insert().values(
path=error.filename,
pathtype=pathtype,
accessible=False,
status='unprocessed',
bytes=0,
objectname=''))
file_counter = 0
for directory in directories:
for root, dirs, files in os.walk(directory, onerror=error_catch):
for name in files:
filepath = os.path.join(root, name)
access = os.access(filepath, os.R_OK)
size = os.path.getsize(filepath) if access else 0
if cleanup and filepath.startswith(directory):
objectname = filepath[len(directory):]
else:
objectname = filepath
with ctx.obj.engine.begin() as transaction:
exists = transaction.execute(
sqlalchemy.select([paths.c.id]).\
where(paths.c.path == filepath)).\
fetchone()
if exists == None:
transaction.execute(paths.insert().values(
path=filepath,
pathtype='file',
accessible=access,
status='unprocessed',
bytes=size,
objectname=objectname))
file_counter += 1
if ctx.obj.verbose:
click.echo(filepath)
if ctx.obj.verbose:
click.echo('Number of files processed: {}'.format(file_counter))
# ====================================
# Count Subcommand
# ====================================
@cli.command(short_help='Count stored paths.')
@click.pass_context
def count(ctx):
"""Outputs the number paths stored in the database."""
count_all = sqlalchemy.select([sqlalchemy.func.count(paths.c.id)])
count_unprocessed = count_all.\
where(paths.c.status == 'unprocessed').\
where(paths.c.accessible == True)
count_processed = count_all.\
where(paths.c.status == 'processed').\
where(paths.c.accessible == True)
count_error = count_all.\
where(paths.c.status == 'error').\
where(paths.c.accessible == True)
count_inaccessible_files = count_all.\
where(paths.c.accessible == False).\
where(paths.c.pathtype == 'file')
count_inaccessible_dirs = count_all.\
where(paths.c.accessible == False).\
where(paths.c.pathtype == 'directory')
with ctx.obj.engine.connect() as connection:
click.echo(' Stored file paths: {}'.format(
connection.execute(count_all).fetchone()[0]))
click.echo(' Accessible, Not Uploaded: {}'.format(
connection.execute(count_unprocessed).fetchone()[0]))
click.echo(' Accessible, Uploaded: {}'.format(
connection.execute(count_processed).fetchone()[0]))
click.echo(' Accessible, Upload Error: {}'.format(
connection.execute(count_error).fetchone()[0]))
click.echo(' Inaccessible file paths: {}'.format(
connection.execute(count_inaccessible_files).fetchone()[0]))
click.echo('Inaccessible directory paths: {}'.format(
connection.execute(count_inaccessible_dirs).fetchone()[0]))
# ====================================
# Dump Subcommand
# ====================================
@cli.command(short_help='Dump stored paths.')
@click.pass_context
def dump(ctx):
"""Outputs the file and directory paths stored in the database."""
with ctx.obj.engine.connect() as connection:
count_all = sqlalchemy.select([sqlalchemy.func.count(paths.c.id)])
if connection.execute(count_all).fetchone()[0] == 0:
click.echo('No paths in database.')
ctx.abort()
click.echo(('Path, Type, Accessible '
'Status, Size in Bytes, Objectname'))
for row in connection.execute(sqlalchemy.select([paths]).\
order_by(paths.c.path)):
click.echo('{}, {}, {}, {}, {}, {}'.format(
row[paths.c.path],
row[paths.c.pathtype],
row[paths.c.accessible],
row[paths.c.status],
row[paths.c.bytes],
row[paths.c.objectname]))
# ====================================
# Clear Subcommand
# ====================================
@cli.command(short_help='Delete all stored paths.')
@click.pass_context
def clear(ctx):
"""Clears the database of stored paths."""
metadata.drop_all(ctx.obj.engine)
# ====================================
# Upload Subcommand
# ====================================
@cli.command(short_help='Upload all stored paths.')
@click.option('--username',
help='Username, or use OS_USERNAME environment variable.',
required=True, envvar='OS_USERNAME')
@click.option('--password',
help='Password, or use OS_PASSWORD environment variable.',
prompt=True, hide_input=True,
required=True, envvar='OS_PASSWORD',)
@click.option('--tenant-name',
help='Tenant Name, or use OS_TENANT_NAME environment variable.',
required=True, envvar='OS_TENANT_NAME')
@click.option('--auth-url',
help='Auth URL, or use OS_AUTH_URL environment variable.',
required=True, envvar='OS_AUTH_URL')
@click.option('--auth-version',
help='Auth version.',
default=AUTH_VERSION)
@click.option('--region-name',
help='Region Name, or use OS_REGION_NAME environment variable.',
required=True, envvar='OS_REGION_NAME')
@click.option('--debug',
help=('Pass swift upload the debug option, '
'which will show the curl commands and results of all '
'http queries regardless of result status.'),
default=False, is_flag=True)
@click.option('--info',
help=('Pass swift upload the info option, '
'which will show the curl commands and results of all '
'http queries which return an error.'),
default=False, is_flag=True)
@click.option('--segment-size',
help='Pass swift upload the segment-size option. Default is 1G.',
default='1G')
@click.option('--batch-size',
help='Number of subcommands to run in parallel. Default is 5.',
default=5)
@click.argument('container', required=True, nargs=1)
@click.pass_context
def upload(ctx, username, password, tenant_name,
auth_url, auth_version, region_name,
debug, info, segment_size, batch_size, container):
"""Upload all accessible paths to the given container."""
count = sqlalchemy.select([sqlalchemy.func.count(paths.c.id)]).\
where(paths.c.status == 'unprocessed').\
where(paths.c.accessible == True)
with ctx.obj.engine.connect() as connection:
number_of_paths = connection.execute(count).fetchone()[0]
width_number_paths = len(str(number_of_paths))
if number_of_paths == 0:
click.echo('No files are ready to upload.')
ctx.abort()
bytes_sum = sqlalchemy.select([sqlalchemy.func.sum(paths.c.bytes)]).\
where(paths.c.status == 'unprocessed').\
where(paths.c.accessible == True)
with ctx.obj.engine.connect() as connection:
total_bytes = connection.execute(bytes_sum).fetchone()[0]
width_total_bytes = len(str(total_bytes))
# Build up the swift command
command = ['swift']
if ctx.obj.verbose:
command.append('--verbose')
else:
command.append('--quiet')
if debug:
command.append('--debug')
if info:
command.append('--info')
command.append('--os-username={}'.format(username))
command.append('--os-password={}'.format(password))
command.append('--os-tenant-name={}'.format(tenant_name))
command.append('--os-auth-url={}'.format(auth_url))
command.append('--auth-version={}'.format(auth_version))
command.append('upload')
command.append('--segment-size={}'.format(segment_size))
command.append(container)
# Make the output more pretty by justifying the headers.
widest_possible_paths_progress = len("{0}/{0} 100.00%".format(
number_of_paths))
widest_possible_bytes_progress = len("{0}/{0} 100.00%".format(
total_bytes))
paths_header = 'Number of Paths Processed'.ljust(
widest_possible_paths_progress)
bytes_header = 'Number of Bytes Processed'.ljust(
widest_possible_bytes_progress)
# Print the header
click.echo('{} | {} | Current File Path'.format(
paths_header,
bytes_header))
# Keep track of our batched jobs
Job = collections.namedtuple('job', ('path', 'process'))
jobs = []
# A closure that is run to collect the output of each job.
def check_result(job):
returncode = job.process.wait()
if returncode != 0:
status_update = 'error'
echoerror('File Upload Error: {}.'.format(job.path))
else:
status_update = 'processed'
with ctx.obj.engine.begin() as transaction:
transaction.execute(paths.update().\
values(status=status_update).\
where(paths.c.path == job.path))
# A select statement to retrieve one unprocessed path.
path_to_upload = sqlalchemy.select([paths]).\
where(paths.c.status == 'unprocessed').\
where(paths.c.accessible == True).\
limit(batch_size)
# Keep track of our progress with these variables
processed_paths = 0
processed_bytes = 0
# Using a surrounding try to catch KeyboardInterrupt (ctrl^c)
# during execution, loop forever while there are still paths
# left to process.
try:
while(True):
with ctx.obj.engine.connect() as connection:
rows = connection.execute(path_to_upload).fetchall()
if len(rows) == 0: #out of rows
break
for row in rows:
uploadcommand = copy.deepcopy(command)
uploadcommand.append('--object-name={}'.format(
row[paths.c.objectname]))
uploadcommand.append(row[paths.c.path])
if ctx.obj.verbose:
click.echo('\nRunning command:\n{}'.format(
' '.join(uploadcommand)))
# subprocess.Popen issues the command in a new process.
# start_new_session=True means that the subprocess
# won't get the SIGINT signal when we press ctrl-c
# or equivalent, letting the last batch of subcommands
# complete.
jobs.append(Job(row[paths.c.path],
subprocess.Popen(uploadcommand,
start_new_session=True)))
processed_bytes += row[paths.c.bytes]
processed_paths += 1
# Use the carriage return to move the cursor
# back to the beginning of the line.
if not ctx.obj.verbose:
click.echo('\r', nl=False)
# Pretty print the progress indicators
j_processed_paths = str(processed_paths).rjust(width_number_paths)
paths_percent = processed_paths/number_of_paths
paths_progess = '{}/{} {:.2%}'.format(j_processed_paths,
number_of_paths,
paths_percent)
j_paths_progess = paths_progess.rjust(len(paths_header))
j_processed_bytes = str(processed_bytes).rjust(width_total_bytes)
bytes_percent = processed_bytes/total_bytes
bytes_progress = '{}/{} {:.2%}'.format(j_processed_bytes,
total_bytes,
bytes_percent)
j_bytes_progress = bytes_progress.rjust(len(bytes_header))
click.echo('{} | {} | {}'.format(j_paths_progess,
j_bytes_progress,
row[paths.c.path]), nl=False)
for job in jobs:
check_result(job)
jobs = []
except KeyboardInterrupt:
try:
click.echo("\nWaiting for upload commands to complete... ", nl=False)
for job in jobs:
check_result(job)
click.echo("Done!")
except KeyboardInterrupt:
click.echo("\nKilling outstanding upload commands... ", nl=False)
for job in jobs:
job.process.kill()
click.echo("Done!")
raise
raise
click.echo("\nDone!")
if __name__ == '__main__':
cli()
|
|
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
import copy as cp
from warnings import warn
import numpy as np
from .. import Epochs, compute_proj_evoked, compute_proj_epochs
from ..utils import logger, verbose
from .. import pick_types
from ..io import make_eeg_average_ref_proj
from .ecg import find_ecg_events
from .eog import find_eog_events
def _safe_del_key(dict_, key):
""" Aux function
Use this function when preparing rejection parameters
instead of directly deleting keys.
"""
if key in dict_:
del dict_[key]
@verbose
def _compute_exg_proj(mode, raw, raw_event, tmin, tmax,
n_grad, n_mag, n_eeg, l_freq, h_freq,
average, filter_length, n_jobs, ch_name,
reject, flat, bads, avg_ref, no_proj, event_id,
exg_l_freq, exg_h_freq, tstart, qrs_threshold,
filter_method, iir_params=None, verbose=None):
"""Compute SSP/PCA projections for ECG or EOG artifacts
Note: raw has to be constructed with preload=True (or string)
Warning: raw will be modified by this function
Parameters
----------
mode : string ('ECG', or 'EOG')
What type of events to detect.
raw : mne.io.Raw
Raw input file.
raw_event : mne.io.Raw or None
Raw file to use for event detection (if None, raw is used).
tmin : float
Time before event in seconds.
tmax : float
Time after event in seconds.
n_grad : int
Number of SSP vectors for gradiometers.
n_mag : int
Number of SSP vectors for magnetometers.
n_eeg : int
Number of SSP vectors for EEG.
l_freq : float | None
Filter low cut-off frequency in Hz.
h_freq : float | None
Filter high cut-off frequency in Hz.
average : bool
Compute SSP after averaging.
filter_length : str | int | None
Number of taps to use for filtering.
n_jobs : int
Number of jobs to run in parallel.
ch_name : string (or None)
Channel to use for ECG event detection.
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
bads : list
List with (additional) bad channels.
avg_ref : bool
Add EEG average reference proj.
no_proj : bool
Exclude the SSP projectors currently in the fiff file.
event_id : int
ID to use for events.
exg_l_freq : float
Low pass frequency applied for filtering EXG channel.
exg_h_freq : float
High pass frequency applied for filtering EXG channel.
tstart : float
Start artifact detection after tstart seconds.
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min). Only for ECG.
filter_method : str
Method for filtering ('iir' or 'fft').
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
proj : list
Computed SSP projectors.
events : ndarray
Detected events.
"""
if not raw.preload:
raise ValueError('raw needs to be preloaded, '
'use preload=True in constructor')
if no_proj:
projs = []
else:
projs = cp.deepcopy(raw.info['projs'])
logger.info('Including %d SSP projectors from raw file'
% len(projs))
if avg_ref:
eeg_proj = make_eeg_average_ref_proj(raw.info)
projs.append(eeg_proj)
if raw_event is None:
raw_event = raw
if mode == 'ECG':
logger.info('Running ECG SSP computation')
events, _, _ = find_ecg_events(raw_event, ch_name=ch_name,
event_id=event_id, l_freq=exg_l_freq,
h_freq=exg_h_freq, tstart=tstart,
qrs_threshold=qrs_threshold,
filter_length=filter_length)
elif mode == 'EOG':
logger.info('Running EOG SSP computation')
events = find_eog_events(raw_event, event_id=event_id,
l_freq=exg_l_freq, h_freq=exg_h_freq,
filter_length=filter_length, ch_name=ch_name,
tstart=tstart)
else:
raise ValueError("mode must be 'ECG' or 'EOG'")
# Check to make sure we actually got at least one useable event
if events.shape[0] < 1:
warn('No %s events found, returning None for projs' % mode)
return None, events
logger.info('Computing projector')
my_info = cp.deepcopy(raw.info)
my_info['bads'] += bads
# Handler rejection parameters
if reject is not None: # make sure they didn't pass None
if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'grad')
if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'mag')
if len(pick_types(my_info, meg=False, eeg=True, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'eeg')
if len(pick_types(my_info, meg=False, eeg=False, eog=True,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'eog')
if flat is not None: # make sure they didn't pass None
if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'grad')
if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'mag')
if len(pick_types(my_info, meg=False, eeg=True, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'eeg')
if len(pick_types(my_info, meg=False, eeg=False, eog=True,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'eog')
# exclude bad channels from projection
picks = pick_types(my_info, meg=True, eeg=True, eog=True, ref_meg=False,
exclude='bads')
raw.filter(l_freq, h_freq, picks=picks, filter_length=filter_length,
n_jobs=n_jobs, method=filter_method, iir_params=iir_params)
epochs = Epochs(raw, events, None, tmin, tmax, baseline=None, preload=True,
picks=picks, reject=reject, flat=flat, proj=True)
epochs.drop_bad_epochs()
if epochs.events.shape[0] < 1:
warn('No good epochs found, returning None for projs')
return None, events
if average:
evoked = epochs.average()
ev_projs = compute_proj_evoked(evoked, n_grad=n_grad, n_mag=n_mag,
n_eeg=n_eeg)
else:
ev_projs = compute_proj_epochs(epochs, n_grad=n_grad, n_mag=n_mag,
n_eeg=n_eeg, n_jobs=n_jobs)
for p in ev_projs:
p['desc'] = mode + "-" + p['desc']
projs.extend(ev_projs)
logger.info('Done.')
return projs, events
@verbose
def compute_proj_ecg(raw, raw_event=None, tmin=-0.2, tmax=0.4,
n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
average=False, filter_length='10s', n_jobs=1,
ch_name=None, reject=dict(grad=2000e-13, mag=3000e-15,
eeg=50e-6, eog=250e-6),
flat=None, bads=[], avg_ref=False,
no_proj=False, event_id=999, ecg_l_freq=5, ecg_h_freq=35,
tstart=0., qrs_threshold='auto', filter_method='fft',
iir_params=None, copy=True, verbose=None):
"""Compute SSP/PCA projections for ECG artifacts
Note: raw has to be constructed with preload=True (or string)
Warning: raw will be modified by this function
Parameters
----------
raw : mne.io.Raw
Raw input file.
raw_event : mne.io.Raw or None
Raw file to use for event detection (if None, raw is used).
tmin : float
Time before event in seconds.
tmax : float
Time after event in seconds.
n_grad : int
Number of SSP vectors for gradiometers.
n_mag : int
Number of SSP vectors for magnetometers.
n_eeg : int
Number of SSP vectors for EEG.
l_freq : float | None
Filter low cut-off frequency in Hz.
h_freq : float | None
Filter high cut-off frequency in Hz.
average : bool
Compute SSP after averaging.
filter_length : str | int | None
Number of taps to use for filtering.
n_jobs : int
Number of jobs to run in parallel.
ch_name : string (or None)
Channel to use for ECG detection (Required if no ECG found).
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
bads : list
List with (additional) bad channels.
avg_ref : bool
Add EEG average reference proj.
no_proj : bool
Exclude the SSP projectors currently in the fiff file.
event_id : int
ID to use for events.
ecg_l_freq : float
Low pass frequency applied for filtering ECG channel.
ecg_h_freq : float
High pass frequency applied for filtering ECG channel.
tstart : float
Start artifact detection after tstart seconds.
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min).
filter_method : str
Method for filtering ('iir' or 'fft').
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
copy : bool
If False, filtering raw data is done in place. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
proj : list
Computed SSP projectors.
ecg_events : ndarray
Detected ECG events.
"""
if copy is True:
raw = raw.copy()
projs, ecg_events = _compute_exg_proj('ECG', raw, raw_event, tmin, tmax,
n_grad, n_mag, n_eeg, l_freq, h_freq,
average, filter_length, n_jobs,
ch_name, reject, flat, bads, avg_ref,
no_proj, event_id, ecg_l_freq,
ecg_h_freq, tstart, qrs_threshold,
filter_method, iir_params)
return projs, ecg_events
@verbose
def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2,
n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
average=False, filter_length='10s', n_jobs=1,
reject=dict(grad=2000e-13, mag=3000e-15, eeg=500e-6,
eog=np.inf), flat=None, bads=[],
avg_ref=False, no_proj=False, event_id=998, eog_l_freq=1,
eog_h_freq=10, tstart=0., filter_method='fft',
iir_params=None, ch_name=None, copy=True, verbose=None):
"""Compute SSP/PCA projections for EOG artifacts
Note: raw has to be constructed with preload=True (or string)
Warning: raw will be modified by this function
Parameters
----------
raw : mne.io.Raw
Raw input file.
raw_event : mne.io.Raw or None
Raw file to use for event detection (if None, raw is used).
tmin : float
Time before event in seconds.
tmax : float
Time after event in seconds.
n_grad : int
Number of SSP vectors for gradiometers.
n_mag : int
Number of SSP vectors for magnetometers.
n_eeg : int
Number of SSP vectors for EEG.
l_freq : float | None
Filter low cut-off frequency in Hz.
h_freq : float | None
Filter high cut-off frequency in Hz.
average : bool
Compute SSP after averaging.
filter_length : str | int | None
Number of taps to use for filtering.
n_jobs : int
Number of jobs to run in parallel.
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
bads : list
List with (additional) bad channels.
avg_ref : bool
Add EEG average reference proj.
no_proj : bool
Exclude the SSP projectors currently in the fiff file.
event_id : int
ID to use for events.
eog_l_freq : float
Low pass frequency applied for filtering E0G channel.
eog_h_freq : float
High pass frequency applied for filtering E0G channel.
tstart : float
Start artifact detection after tstart seconds.
filter_method : str
Method for filtering ('iir' or 'fft').
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
ch_name: str | None
If not None, specify EOG channel name.
copy : bool
If False, filtering raw data is done in place. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
proj: list
Computed SSP projectors.
eog_events: ndarray
Detected EOG events.
"""
if copy is True:
raw = raw.copy()
projs, eog_events = _compute_exg_proj('EOG', raw, raw_event, tmin, tmax,
n_grad, n_mag, n_eeg, l_freq, h_freq,
average, filter_length, n_jobs,
ch_name, reject, flat, bads, avg_ref,
no_proj, event_id, eog_l_freq,
eog_h_freq, tstart,
qrs_threshold='auto',
filter_method=filter_method,
iir_params=iir_params)
return projs, eog_events
|
|
# GTK+ GUI for Snidget
from __future__ import print_function
from datetime import date, timedelta
import sys
import pygtk
pygtk.require('2.0')
import gtk
from snidget import settings, database, transaction, plotter
#! special transfer dialog
#! dialog for user settings
#! Add and delete types
#! File/Undo or Reload --> Warning, this will undo all your changes since the last save
#! Accounts/New
#! Accounts/Delete
#! Accounts/Transfer Funds
#! View modes (by type and by recipient, as in -t and -r options)
#! Totals
#! Tooltips
class SnidgetGUI(object):
# --------------------------------------------------------------------------
# Dialogs
# --------------------------------------------------------------------------
def dialog_save(self):
""" Dialog to ask if we should save """
# Create the dialog
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_YES_NO,
None)
dialog.set_markup("<b>Database has been changed.</b>")
dialog.format_secondary_markup("Do you want to save?")
dialog.show_all()
response = dialog.run()
dialog.destroy()
# Return True if dialog answered Yes or No
# Return False if dialog is quit without answering
if response == gtk.RESPONSE_YES:
self.save_database()
return True
elif response == gtk.RESPONSE_NO:
return True
return False
def dialog_edit(self, record=None):
""" Dialog to edit transactions """
dialog = gtk.Dialog("Edit Record",
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK))
# Get the record or get a new record
if record is None:
record = transaction.Transaction(self.database, self.settings)
is_new = True # use to know whether to add or not
else:
is_new = False
# Shorthand because vbox didn't always refer to this vbox
vbox = dialog.vbox
hbox_top = gtk.HBox(True, 0)
vbox.pack_start(hbox_top)
# date
frame_date = gtk.Frame("Date")
hbox_date = gtk.HBox()
frame_date.add(hbox_date)
# date spinners
adjust_year = gtk.Adjustment(record.date.year, 1900, 3000, 1, 1)
spinner_year = gtk.SpinButton(adjust_year, 1, 0)
adjust_month = gtk.Adjustment(record.date.month, 1, 12, 1, 1)
spinner_month = gtk.SpinButton(adjust_month, 1, 0)
adjust_day = gtk.Adjustment(record.date.day, 1, 31, 1, 1)
spinner_day = gtk.SpinButton(adjust_day, 1, 0)
hbox_date.pack_start(spinner_year, False, False, 0)
hbox_date.pack_start(spinner_month, False, False, 0)
hbox_date.pack_start(spinner_day, False, False, 0)
hbox_top.pack_start(frame_date, False, False, 5)
# type
frame_type = gtk.Frame("Type")
type_menu = gtk.combo_box_new_text()
types = self.settings.types()
for index, expense_type in enumerate(types):
type_menu.append_text(expense_type)
if record.type == expense_type:
type_menu.set_active(index)
frame_type.add(type_menu)
hbox_top.pack_start(frame_type, False, False, 5)
# id
frame_id = gtk.Frame("ID (Optional)")
entry_id = gtk.Entry()
entry_id.set_text(record.id)
frame_id.add(entry_id)
hbox_top.pack_start(frame_id, False, False, 5)
# location
entry_location = gtk.Entry()
entry_location.set_text(record.dest)
frame_location = gtk.Frame("Location")
# Setup the auto-completion widget
location_completer = gtk.EntryCompletion()
entry_location.set_completion(location_completer)
location_list = gtk.ListStore(str)
for place in self.database.places():
location_list.append([place])
location_completer.set_model(location_list)
location_completer.set_text_column(0)
location_completer.set_minimum_key_length(2)
frame_location.add(entry_location)
vbox.pack_start(frame_location, False, False, 5)
# description
frame_description = gtk.Frame("Description")
entry_description = gtk.Entry()
entry_description.set_text(record.desc)
# Setup the auto-completion widget
description_completer = gtk.EntryCompletion()
entry_description.set_completion(description_completer)
description_list = gtk.ListStore(str)
for place in self.database.descriptions():
description_list.append([place])
description_completer.set_model(description_list)
description_completer.set_text_column(0)
description_completer.set_minimum_key_length(2)
frame_description.add(entry_description)
vbox.pack_start(frame_description, False, False, 5)
# deltas, one per account
hbox_deltas = gtk.HBox()
acc_frames = []
acc_adjusts = []
acc_spinners = []
for acc in self.settings.visible_accounts():
this_frame = gtk.Frame(self.settings.account_name(acc))
if record.deltas.has_key(acc):
this_delta = record.deltas[acc]
else:
this_delta = 0.0
this_adjust = gtk.Adjustment(this_delta, -9999999999, 9999999999, 0.01, 1)
this_spinner = gtk.SpinButton(this_adjust, 0.01, 2)
acc_frames.append(this_frame)
acc_adjusts.append(this_adjust)
acc_spinners.append(this_spinner)
this_frame.add(this_spinner)
hbox_deltas.pack_start(this_frame, False, False, 0)
vbox.pack_start(hbox_deltas, False, False, 5)
# Now show the dialog and get the input!
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
#! Error correction???
record.date = date(spinner_year.get_value_as_int(),
spinner_month.get_value_as_int(),
spinner_day.get_value_as_int())
record.type = types[type_menu.get_active()]
record.dest = entry_location.get_text()
record.desc = entry_description.get_text()
for ind in range(0, len(self.settings.visible_accounts())):
value = acc_spinners[ind].get_value()
if value != 0.0:
if record.type in self.settings.positive_types():
record.deltas[self.settings.visible_account_keys()[ind]] = value
else:
record.deltas[self.settings.visible_account_keys()[ind]] = value*-1.0
record.id = entry_id.get_text()
if is_new:
self.database.add(record)
#! Dialog might have completed correctly without changes
self.database.is_changed = True
self.set_status("Added record with UID %s" % record.uid)
#! Would be nice to only update the one row
self.write_table()
def dialog_value(self):
""" Dialog to set the Value filter """
# Create the dialog
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
None)
dialog.set_markup("<b>Range of values:</b>")
dialog.format_secondary_markup("Use checkbox to impose max/min limits.")
# Default values are zeros, no limits imposed
init_min = 0.0
init_max = 0.0
use_min = False
use_max = False
# Get current filter values
current_values = self.database.filters['values']
if current_values is not None:
# There is a filter
current_values = current_values.split(',')
if current_values[0] != '':
# Use the minimum impoesd
use_min = True
init_min = float(current_values[0])
if current_values[1] != '':
# Use the maximum imposed
use_max = True
init_max = float(current_values[0])
# Hboxes for the check box and spinners
hbox_min = gtk.HBox()
hbox_max = gtk.HBox()
# Check to impose limit or not
check_min = gtk.CheckButton(label='Minimum:')
check_max = gtk.CheckButton(label='Maximum:')
# Set defaults
check_min.set_active(use_min)
check_max.set_active(use_max)
# Make the spinners and their adjustments
# args: initial value, min, max, step, right click step
adjust_min = gtk.Adjustment(init_min, -9999999999, 9999999999, 0.1, 1)
adjust_max = gtk.Adjustment(init_max, -9999999999, 9999999999, 0.1, 1)
# args: adjustment, increment, decimals
spinner_min = gtk.SpinButton(adjust_min, 0.1, 2)
spinner_max = gtk.SpinButton(adjust_max, 0.1, 2)
# Pack everything in
hbox_min.pack_start(check_min)
hbox_min.pack_start(spinner_min)
hbox_max.pack_start(check_max)
hbox_max.pack_start(spinner_max)
dialog.vbox.pack_start(hbox_min)
dialog.vbox.pack_start(hbox_max)
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
min_val = spinner_min.get_value()
max_val = spinner_max.get_value()
use_min = check_min.get_active()
use_max = check_max.get_active()
new_values = None
if use_min or use_max:
# Build up a filter string
new_values = ''
if use_min:
new_values = str(min_val)
new_values += ','
if use_max:
new_values += str(max_val)
self.database.filters['values'] = new_values
#! Should make status smarter about what happened
self.set_status("Limiting values: %s" % new_values)
self.write_table()
def dialog_type(self):
""" Dialog to set the Type filter """
# Create the dialog
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
None)
dialog.set_markup("<b>Included Types:</b>")
# Get which types are currently included by the filter
# None means all types are included
current_types = self.database.filters['types']
if current_types is None:
current_types = self.settings.types()
else:
current_types = current_types.split(',')
# Add all the types to the dialog, checked or not
type_checks = []
for expense_type in self.settings.types():
this_check = gtk.CheckButton(label=expense_type)
if expense_type in current_types:
this_check.set_active(True)
type_checks.append(this_check)
dialog.vbox.pack_start(this_check, False, False, 0)
# could use something like
#button.connect("toggled", self.callback, "check button 1")
# to make live updates? could get intense for big tables
# maybe a "preview" options?
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
# Write the selected types into a comma separated list
new_types = ''
for index, type_check in enumerate(type_checks):
if type_check.get_active() is True:
new_types += self.settings.types()[index] + ','
# Cut off the last comma
new_types = new_types[0:-1]
# Set the new filter
if not new_types:
self.database.filters['types'] = None
else:
self.database.filters['types'] = new_types
# Update the table
self.set_status("Showing types: %s" % new_types)
self.write_table()
def dialog_account(self):
""" Dialog to set the Account filter """
# Create the dialog
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
None)
dialog.set_markup("<b>Included Accounts:</b>")
# Get which accounts are currently included by the filter
# None means all accounts are included
# Recall that accounts are given by NAME in the filter
current_accounts = self.database.filters['accounts']
if current_accounts is None:
current_accounts = self.settings.account_names()
else:
current_accounts = current_accounts.split(',')
# Add all the types to the dialog, checked or not
account_checks = []
for account in self.settings.account_names():
this_check = gtk.CheckButton(label=account)
if account in current_accounts:
this_check.set_active(True)
account_checks.append(this_check)
dialog.vbox.pack_start(this_check, False, False, 0)
# could use something like
#button.connect("toggled", self.callback, "check button 1")
# to make live updates? could get intense for big tables
# maybe a "preview" options?
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
# Write the selected types into a comma separated list
new_accounts = ''
for index, account_check in enumerate(account_checks):
if account_check.get_active() is True:
new_accounts += self.settings.account_names()[index] + ','
# Cut off the last comma
new_accounts = new_accounts[0:-1]
# Set the new filter
if not new_accounts:
self.database.filters['accounts'] = None
else:
self.database.filters['accounts'] = new_accounts
# Update the table
self.set_status("Showing accounts: %s" % new_accounts)
self.write_table()
def dialog_date(self):
""" Dialog to set the Date range """
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
None)
dialog.set_markup("<b>Set Date Range</b>")
dialog.format_secondary_markup("From the first date up to but not including the second.")
cal_start = gtk.Calendar()
cal_end = gtk.Calendar()
# Set the minimum and maximum possible ranges
date_min = self.database.records[0].date
date_max = self.database.settings.TODAY + timedelta(1)
# Parse the filter to set the initial dates
if self.database.filters['dates'] is None:
date_start = date_min
date_end = date_max
elif str.find(self.database.filters['dates'], 'W') >= 0:
# Set start date to nweeks ago
nweeks = int(self.database.filters['dates'][1:])
date_start = self.database.settings.TODAY - timedelta(nweeks*7)
date_end = date_max
else:
dates = str.split(self.database.filters['dates'], ',')
if dates[0] == '':
date_start = date_min
else:
newdate = str.split(dates[0], "-")
date_start = date(int(newdate[0]), int(newdate[1]), int(newdate[2]))
if dates[1] == '':
date_end = date_max
else:
newdate = str.split(dates[1], "-")
date_end = date(int(newdate[0]), int(newdate[1]), int(newdate[2]))
# Note gtk.Calendar starts counting months at 0
cal_start.select_month(date_start.month-1, date_start.year)
cal_start.select_day(date_start.day)
cal_end.select_month(date_end.month-1, date_end.year)
cal_end.select_day(date_end.day)
hbox = gtk.HBox()
hbox.pack_start(cal_start)
hbox.pack_start(cal_end)
dialog.vbox.pack_end(hbox, True, True, 0)
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
date_start = cal_start.get_date()
date_end = cal_end.get_date()
# Again, remember months start at 0
filter_string = "%04d-%02d-%02d,%04d-%02d-%02d" % (
date_start[0], date_start[1]+1, date_start[2],
date_end[0], date_end[1]+1, date_end[2]
)
self.database.filters['dates'] = filter_string
self.set_status("Set date range to %s." % filter_string)
self.write_table()
def dialog_text(self, prompt="Enter text:", default=""):
""" Generic dialog to get text input """
#! Need to make these look prettier
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
None)
dialog.set_markup(prompt)
# entry field
entry = gtk.Entry()
entry.set_text(default)
# This allows you to press enter to submit
entry.connect("activate", self.dialog_response, dialog, gtk.RESPONSE_OK)
hbox = gtk.HBox()
#hbox.pack_start(gtk.Label(prompt), False, 5, 5)
hbox.pack_end(entry)
# Has a vbox built in
dialog.vbox.pack_end(hbox, True, True, 0)
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
text = entry.get_text()
return text
return None
def dialog_uid(self):
""" Dialog for UID filter """
#! Need to make these look prettier
#! Either include current selection or have a right-click menu
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
None)
dialog.set_markup("<b>Exclude UIDs</b>")
dialog.format_secondary_markup("Enter a comma separated list.")
# entry field
entry = gtk.Entry()
# Set current filter as default
current_uids = self.database.filters['uid']
if current_uids is None:
default = ""
else:
default = self.database.filters['uid']
entry.set_text(default)
# This allows you to press enter to submit
entry.connect("activate", self.dialog_response, dialog, gtk.RESPONSE_OK)
# Has a vbox built in
dialog.vbox.pack_end(entry, True, True, 0)
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
new_uids = entry.get_text()
if new_uids == '':
new_uids = None
self.database.filters['uid'] = new_uids
self.set_status("Excluded UID %s" % str(new_uids))
self.write_table()
def dialog_string(self):
# Make the dialog
dialog = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
None)
dialog.set_markup("<b>Filter by string</b>")
dialog.format_secondary_markup("Use '!' as the first character to exclude a string.")
# entry field
entry = gtk.Entry()
# Set the default if a string filter exists
if self.database.filters['string'] is None:
default = ''
else:
default = self.database.filters['string']
entry.set_text(default)
# This allows you to press enter to submit
entry.connect("activate", self.dialog_response, dialog, gtk.RESPONSE_OK)
hbox = gtk.HBox()
#hbox.pack_start(gtk.Label(prompt), False, 5, 5)
hbox.pack_end(entry)
# Has a vbox built in
dialog.vbox.pack_end(hbox, True, True, 0)
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
text = entry.get_text()
if text == '':
# empty string means no filter
self.database.filters['string'] = None
self.set_status("String filter removed.")
else:
self.database.filters['string'] = str(text)
self.set_status("String filter '%s' applied." % str(text))
self.write_table()
def dialog_response(self, entry, dialog, response):
#? I don't know what this is for
dialog.response(response)
# --------------------------------------------------------------------------
# Button callback functions
# --------------------------------------------------------------------------
def call_showall(self, widget, data):
""" Write table with no filters """
self.database.reset_filters()
self.set_status("Reset all filters.")
self.write_table()
def call_defaults(self, widget, data):
""" Write table with default filters """
self.database.set_filter_defaults()
self.set_status("Applied default filters.")
self.write_table()
def call_expenses(self, widget, data):
""" Set filter to expense types """
#! Will want to generalize what an expense type is
self.database.filters['types'] = 'Food,School,Household,Extras'
self.set_status("Applied filter types: %s" % self.database.filters['types'])
self.write_table()
def call_type(self, widget, data):
""" Call the type filter dialog """
self.dialog_type()
def call_dates(self, widget, data):
""" Call the date filter dialog """
self.dialog_date()
def call_account(self, widget, data):
self.dialog_account()
def call_string(self, widget, data):
""" Call the string filter dialog """
self.dialog_string()
def call_value(self, widget, data):
self.dialog_value()
def call_uid(self, widget, data):
self.dialog_uid()
def call_plot(self, widget, data):
plotter.plot_window(self.database)
def call_new(self, widget, data):
self.dialog_edit()
def call_delete(self, widget, data):
print("Not implemented.")
def call_download(self, widget, data):
print("Not implemented.")
def call_sort(self, widget, data):
self.database.sort()
self.write_table(save_state=False)
# --------------------------------------------------------------------------
# Menu functions
# --------------------------------------------------------------------------
def menu_new(self, action):
self.dialog_edit()
def menu_back(self, action):
self.back_state()
def menu_forward(self, action):
self.forward_state()
def menu_recipients(self, action):
self.display_mode = "Recipients"
self.set_status("Viewing by recipient")
self.write_table()
def menu_types(self, action):
self.display_mode = "Types"
self.set_status("Viewing by type")
self.write_table()
def menu_transactions(self, action):
self.display_mode = "Transactions"
self.set_status("Viewing by transaction")
self.write_table()
def menu_quit(self, action):
self.quit_program()
# --------------------------------------------------------------------------
# Save and change state
# --------------------------------------------------------------------------
def save_state(self):
""" Push current state onto history """
state = {
'mode': self.display_mode,
'filters': self.database.filters.copy(),
'status': self.status_text
}
# Drop things in the forward direction
self.history = self.history[0:self.history_index+1]
self.history.append(state) # add our state to the end
self.history_index = len(self.history) - 1 #record out new position in the history
def back_state(self):
""" Move back one step in the view history """
if self.history_index > 0:
# move one down in the history list
self.history_index = self.history_index - 1
# Load the state
state = self.history[self.history_index]
self.display_mode = state['mode']
self.database.filters = state['filters']
self.set_status(state['status'])
self.write_table(save_state=False)
def forward_state(self):
""" Move forward one step in the view history """
if self.history_index < len(self.history) - 1:
self.history_index = self.history_index + 1
state = self.history[self.history_index]
self.display_mode = state['mode']
self.database.filters = state['filters']
self.set_status(state['status'])
self.write_table(save_state=False)
# --------------------------------------------------------------------------
# Dealing with the table
# --------------------------------------------------------------------------
def get_table(self):
""" Write the database into a ListStore for TreeView """
listmodel = gtk.ListStore(object)
self.database.apply_filters()
for record in self.database.records:
if record.visible:
listmodel.append([record])
return listmodel
def write_table(self, save_state=True):
""" Set the TreeView with the current database """
# First we need to save the current state
if save_state:
self.save_state()
if self.display_mode == "Transactions":
# Now get the new listmodel
listmodel = self.get_table()
self.treeview.set_model(listmodel)
self.treeview.columns_autosize()
#! Tooltips... have a column of .value()?
#! Is there not a function I can define?
self.treeview.set_tooltip_column(0)
return
elif self.display_mode == "Types":
print("Don't know how to write table in this display mode")
elif self.display_mode == "Recipients":
print("Don't know how to write table in this display mode")
else:
print("Don't know this display mode")
def cell_value(self, column, cell, model, iter, n):
""" Get the value of the current record in column n """
record = model.get_value(iter, 0)
cell.set_property('text', record.tuple()[n])
return
def row_doubleclick(self, treeview, path, column):
# First we get the TreeSelection object
treeselection = treeview.get_selection()
# Then we get a (model, iter) tuple
treemodeliter = treeselection.get_selected()
# We use the model and iter to get the record
model = treemodeliter[0]
it = treemodeliter[1]
if self.display_mode == "Transactions":
record = model.get_value(it, 0)
# Now we can act on the record
self.dialog_edit(record)
elif self.display_mode == "Types":
pass
elif self.display_mode == "Recipients":
pass
else:
print("Error: display mode not recognized on double click.")
# --------------------------------------------------------------------------
# Program management type stuff
# --------------------------------------------------------------------------
def delete_event(self, widget, event, data=None):
""" Window's close button pressed """
self.quit_program()
def save_database(self):
""" Save the database """
self.database.save()
self.set_status("Saved the database.")
def quit_program(self):
ok = True # will be made False if save dialog canceled
if self.database.is_changed is True:
ok = self.dialog_save()
# We will be quitting if OK, so print the "Saved" message to the terminal
if ok is True:
print("Saved database")
# Go ahead and quit unless something happened
if ok is True:
gtk.main_quit()
def set_status(self, string=""):
""" Set the text of the status bar """
#! This needs to be done BEFORE write_table to keep state history accurate
#! ... meaning this should probably be done IN write_table before save_state
context_id = self.statusbar.get_context_id('status')
self.statusbar.push(context_id, string)
self.status_text = string
def __init__(self):
self.settings = settings.Settings()
self.database = database.Database(settings)
# Internals
self.display_mode = "Transactions"
self.history = [] # to save view history
self.history_index = 0
self.status_text = '' # because there's no gtk.StatusBar.get_text()?
# Gui
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("Snidget")
self.window.set_border_width(0)
self.window.set_default_size(1000, 600)
self.window.set_icon_from_file("%s/%s" % (sys.path[0], "snidget.png"))
# For signals from window manager
self.window.connect("delete_event", self.delete_event)
# A VBox for Menu / Main / Statusbar
self.box_vmain = gtk.VBox(False, 0)
self.window.add(self.box_vmain)
# Status bar
self.statusbar = gtk.Statusbar()
self.box_vmain.pack_end(self.statusbar, False, False, 0)
self.set_status("Welcome to Snidget!")
self.statusbar.set_has_resize_grip(True)
self.statusbar.show()
# Define menu/toolbars for the top of vbox
ui = '''<ui>
<menubar name="MenuBar">
<menu action="File">
<menuitem action="New"/>
<menuitem action="Save"/>
<menuitem action="Quit"/>
</menu>
<menu action="View">
<menuitem action="Back"/>
<menuitem action="Forward"/>
</menu>
</menubar>
</ui>'''
# to be added to menu UI when view mode functionality works
# <separator name="sep1"/>
# <menuitem action="By Transaction"/>
# <menuitem action="By Recipient"/>
# <menuitem action="By Category"/>
uimanager = gtk.UIManager()
# Add accelerator group to top level window
accelgroup = uimanager.get_accel_group()
self.window.add_accel_group(accelgroup)
# Create an ActionGroup and add stuff to it
actiongroup = gtk.ActionGroup('SnidgetGUI')
self.actiongroup = actiongroup
actiongroup.add_actions([
('File', None, '_File'),
('New', None, '_New', "<control>n", "New transaction", self.menu_new),
('Save', None, '_Save', "<control>s", "Save the database", self.save_database()),
('Quit', None, '_Quit', "<control>q", "Quit Snidget", self.menu_quit),
('View', None, '_View'),
('Back', None, '_Back', "<control>b", "Go back", self.menu_back),
('Forward', None, '_Forward', "<control>f", "Go forward", self.menu_forward),
('By Recipient', None, '_By Recipient', "<control>r",
"View total for each recipient", self.menu_recipients),
('By Transaction', None, '_By Transaction', "<control>t",
"View transactions individually", self.menu_transactions),
('By Category', None, '_By Category', "<control>c",
"View total for each type of transaction", self.menu_types)
])
# Add actiongroup and ui to uimanager
uimanager.insert_action_group(actiongroup, 0)
uimanager.add_ui_from_string(ui)
# Create and pack menubar into vbox
menubar = uimanager.get_widget('/MenuBar')
self.box_vmain.pack_start(menubar, False)
menubar.show()
# Main box where the main UI (table and buttons) go
self.box_main = gtk.HBox(False, 0) # Inhomogeneous sizing, spacing=0
self.box_vmain.pack_start(self.box_main, True, True, 0)
self.box_left = gtk.VBox(True, 0)
self.box_right = gtk.VBox(False, 0)
self.box_main.pack_start(self.box_left, True, True, 0)
self.box_main.pack_start(self.box_right, False, False, 2)
# -- Buttons for the right hand side ---------------
expand = False # True -- button will expand to fill available space
fill = False # True -- when button expands, make button bigger
padding = 0
# Add a button
self.button_showall = gtk.Button("Show All")
self.button_showall.set_tooltip_text("Clear filters and show all transactions")
self.button_showall.connect("clicked", self.call_showall, "Showall button")
self.box_right.pack_start(self.button_showall, expand, fill, padding)
self.button_showall.show()
self.button_defaults = gtk.Button("Defaults")
self.button_defaults.set_tooltip_text("Reset filters to default")
self.button_defaults.connect("clicked", self.call_defaults, "Defaults button")
self.box_right.pack_start(self.button_defaults, expand, fill, padding)
self.button_defaults.show()
self.separator1 = gtk.HSeparator()
self.box_right.pack_start(self.separator1, expand, fill, 5)
self.separator1.show()
self.button_expenses = gtk.Button("Expenses")
self.button_expenses.set_tooltip_text("Show only expenses")
self.button_expenses.connect("clicked", self.call_expenses, "Expenses button")
self.box_right.pack_start(self.button_expenses, expand, fill, padding)
self.button_expenses.show()
self.button_type = gtk.Button("Type")
self.button_type.set_tooltip_text("Select only particular types")
self.button_type.connect("clicked", self.call_type, "Type button")
self.box_right.pack_start(self.button_type, expand, fill, padding)
self.button_type.show()
self.button_dates = gtk.Button("Dates")
self.button_dates.set_tooltip_text("Set the date range")
self.button_dates.connect("clicked", self.call_dates, "Dates button")
self.box_right.pack_start(self.button_dates, expand, fill, padding)
self.button_dates.show()
self.button_account = gtk.Button("Account")
self.button_account.set_tooltip_text("Show transactions on an account")
self.button_account.connect("clicked", self.call_account, "Account button")
self.box_right.pack_start(self.button_account, expand, fill, padding)
self.button_account.show()
self.button_string = gtk.Button("String")
self.button_string.set_tooltip_text("Filter by location or description")
self.button_string.connect("clicked", self.call_string, "String button")
self.box_right.pack_start(self.button_string, expand, fill, padding)
self.button_string.show()
self.button_value = gtk.Button("Value")
self.button_value.set_tooltip_text("Filter by value")
self.button_value.connect("clicked", self.call_value, "Value button")
self.box_right.pack_start(self.button_value, expand, fill, padding)
self.button_value.show()
self.button_uid = gtk.Button("UID")
self.button_uid.set_tooltip_text("Exclude particular transactions")
self.button_uid.connect("clicked", self.call_uid, "UID button")
self.box_right.pack_start(self.button_uid, expand, fill, padding)
self.button_uid.show()
# Bottom buttons
self.button_quit = gtk.Button("Quit")
self.button_quit.set_tooltip_text("Save and quit")
self.button_quit.connect("clicked", self.delete_event, None)
self.box_right.pack_end(self.button_quit, expand, fill, padding)
self.button_quit.show()
self.separator2 = gtk.HSeparator()
self.box_right.pack_end(self.separator2, expand, fill, 5)
self.separator2.show()
self.button_sort = gtk.Button("Sort")
self.button_sort.set_tooltip_text("Sort transactions by date")
self.button_sort.connect("clicked", self.call_sort, "Sort button")
self.box_right.pack_end(self.button_sort, expand, fill, padding)
self.button_sort.show()
self.button_delete = gtk.Button("Delete")
self.button_delete.set_tooltip_text("Delete transaction")
self.button_delete.connect("clicked", self.call_delete, "Delete button")
self.box_right.pack_end(self.button_delete, expand, fill, padding)
self.button_delete.show()
self.button_new = gtk.Button("New")
self.button_new.set_tooltip_text("New transaction")
self.button_new.connect("clicked", self.call_new, "New button")
self.box_right.pack_end(self.button_new, expand, fill, padding)
self.button_new.show()
self.button_plot = gtk.Button("Plot")
self.button_plot.set_tooltip_text("Plot current transactions")
self.button_plot.connect("clicked", self.call_plot, "Plot button")
self.box_right.pack_end(self.button_plot, expand, fill, padding)
self.button_plot.show()
# TreeView for showing transactions
self.treeview = gtk.TreeView()
#! Need to make columns flexible for different view modes
column_names = self.database.headings()
self.tvcolumn = [None] * len(column_names)
for index, column_name in enumerate(column_names):
cell = gtk.CellRendererText()
self.tvcolumn[index] = gtk.TreeViewColumn(column_name, cell)
if index > 3:
cell.set_property('xalign', 1.0)
self.tvcolumn[index].set_cell_data_func(cell, self.cell_value, index)
self.tvcolumn[index].set_resizable(True)
if index < 4 and index > 0:
self.tvcolumn[index].set_expand(True)
self.treeview.append_column(self.tvcolumn[index])
self.treeview.connect('row-activated', self.row_doubleclick)
# Left box
self.scrollbox = gtk.ScrolledWindow()
self.scrollbox.add(self.treeview)
self.scrollbox.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
# Allow scrollbox to expand to fill the space
self.box_left.pack_start(self.scrollbox, True, True, 0)
self.treeview.show()
self.scrollbox.show()
# Populate the table
self.write_table()
# Display the UI
self.box_left.show()
self.box_right.show()
self.box_vmain.show()
self.box_main.show()
#self.window.maximize()
self.window.show()
def start(self):
gtk.main()
return 0
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._server_dns_aliases_operations import build_acquire_request_initial, build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_server_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServerDnsAliasesOperations:
"""ServerDnsAliasesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
server_name: str,
dns_alias_name: str,
**kwargs: Any
) -> "_models.ServerDnsAlias":
"""Gets a server DNS alias.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server that the alias is pointing to.
:type server_name: str
:param dns_alias_name: The name of the server dns alias.
:type dns_alias_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerDnsAlias, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ServerDnsAlias
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerDnsAlias"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
dns_alias_name=dns_alias_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServerDnsAlias', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/dnsAliases/{dnsAliasName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
dns_alias_name: str,
**kwargs: Any
) -> Optional["_models.ServerDnsAlias"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServerDnsAlias"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
dns_alias_name=dns_alias_name,
subscription_id=self._config.subscription_id,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerDnsAlias', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServerDnsAlias', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/dnsAliases/{dnsAliasName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
dns_alias_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ServerDnsAlias"]:
"""Creates a server DNS alias.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server that the alias is pointing to.
:type server_name: str
:param dns_alias_name: The name of the server dns alias.
:type dns_alias_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServerDnsAlias or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.ServerDnsAlias]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerDnsAlias"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
dns_alias_name=dns_alias_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ServerDnsAlias', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/dnsAliases/{dnsAliasName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
server_name: str,
dns_alias_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
dns_alias_name=dns_alias_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/dnsAliases/{dnsAliasName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
server_name: str,
dns_alias_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the server DNS alias with the given name.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server that the alias is pointing to.
:type server_name: str
:param dns_alias_name: The name of the server dns alias.
:type dns_alias_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
dns_alias_name=dns_alias_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/dnsAliases/{dnsAliasName}'} # type: ignore
@distributed_trace
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServerDnsAliasListResult"]:
"""Gets a list of server DNS aliases for a server.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server that the alias is pointing to.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServerDnsAliasListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.ServerDnsAliasListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerDnsAliasListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_server.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ServerDnsAliasListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/dnsAliases'} # type: ignore
async def _acquire_initial(
self,
resource_group_name: str,
server_name: str,
dns_alias_name: str,
parameters: "_models.ServerDnsAliasAcquisition",
**kwargs: Any
) -> Optional["_models.ServerDnsAlias"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServerDnsAlias"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ServerDnsAliasAcquisition')
request = build_acquire_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
dns_alias_name=dns_alias_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._acquire_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerDnsAlias', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_acquire_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/dnsAliases/{dnsAliasName}/acquire'} # type: ignore
@distributed_trace_async
async def begin_acquire(
self,
resource_group_name: str,
server_name: str,
dns_alias_name: str,
parameters: "_models.ServerDnsAliasAcquisition",
**kwargs: Any
) -> AsyncLROPoller["_models.ServerDnsAlias"]:
"""Acquires server DNS alias from another server.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server that the alias is pointing to.
:type server_name: str
:param dns_alias_name: The name of the server dns alias.
:type dns_alias_name: str
:param parameters:
:type parameters: ~azure.mgmt.sql.models.ServerDnsAliasAcquisition
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServerDnsAlias or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.ServerDnsAlias]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerDnsAlias"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._acquire_initial(
resource_group_name=resource_group_name,
server_name=server_name,
dns_alias_name=dns_alias_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ServerDnsAlias', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_acquire.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/dnsAliases/{dnsAliasName}/acquire'} # type: ignore
|
|
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import Mock, call
from kubernetes import client
from .watch import Watch
class WatchTests(unittest.TestCase):
def setUp(self):
# counter for a test that needs test global state
self.callcount = 0
def test_watch_with_decode(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'{"type": "ADDED", "object": {"metadata": {"name": "test1",'
'"resourceVersion": "1"}, "spec": {}, "status": {}}}\n',
'{"type": "ADDED", "object": {"metadata": {"name": "test2",'
'"resourceVersion": "2"}, "spec": {}, "sta',
'tus": {}}}\n'
'{"type": "ADDED", "object": {"metadata": {"name": "test3",'
'"resourceVersion": "3"}, "spec": {}, "status": {}}}\n',
'should_not_happened\n'])
fake_api = Mock()
fake_api.get_namespaces = Mock(return_value=fake_resp)
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
w = Watch()
count = 1
for e in w.stream(fake_api.get_namespaces):
self.assertEqual("ADDED", e['type'])
# make sure decoder worked and we got a model with the right name
self.assertEqual("test%d" % count, e['object'].metadata.name)
# make sure decoder worked and updated Watch.resource_version
self.assertEqual(
"%d" % count, e['object'].metadata.resource_version)
self.assertEqual("%d" % count, w.resource_version)
count += 1
# make sure we can stop the watch and the last event with won't be
# returned
if count == 4:
w.stop()
fake_api.get_namespaces.assert_called_once_with(
_preload_content=False, watch=True)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
def test_watch_for_follow(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'log_line_1\n',
'log_line_2\n'])
fake_api = Mock()
fake_api.read_namespaced_pod_log = Mock(return_value=fake_resp)
fake_api.read_namespaced_pod_log.__doc__ = ':param bool follow:\n:return: str'
w = Watch()
count = 1
for e in w.stream(fake_api.read_namespaced_pod_log):
self.assertEqual("log_line_1", e)
count += 1
# make sure we can stop the watch and the last event with won't be
# returned
if count == 2:
w.stop()
fake_api.read_namespaced_pod_log.assert_called_once_with(
_preload_content=False, follow=True)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
def test_watch_resource_version_set(self):
# https://github.com/kubernetes-client/python/issues/700
# ensure watching from a resource version does reset to resource
# version 0 after k8s resets the watch connection
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
values = [
'{"type": "ADDED", "object": {"metadata": {"name": "test1",'
'"resourceVersion": "1"}, "spec": {}, "status": {}}}\n',
'{"type": "ADDED", "object": {"metadata": {"name": "test2",'
'"resourceVersion": "2"}, "spec": {}, "sta',
'tus": {}}}\n'
'{"type": "ADDED", "object": {"metadata": {"name": "test3",'
'"resourceVersion": "3"}, "spec": {}, "status": {}}}\n'
]
# return nothing on the first call and values on the second
# this emulates a watch from a rv that returns nothing in the first k8s
# watch reset and values later
def get_values(*args, **kwargs):
self.callcount += 1
if self.callcount == 1:
return []
else:
return values
fake_resp.stream = Mock(
side_effect=get_values)
fake_api = Mock()
fake_api.get_namespaces = Mock(return_value=fake_resp)
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
w = Watch()
# ensure we keep our requested resource version or the version latest
# returned version when the existing versions are older than the
# requested version
# needed for the list existing objects, then watch from there use case
calls = []
iterations = 2
# first two calls must use the passed rv, the first call is a
# "reset" and does not actually return anything
# the second call must use the same rv but will return values
# (with a wrong rv but a real cluster would behave correctly)
# calls following that will use the rv from those returned values
calls.append(call(_preload_content=False, watch=True,
resource_version="5"))
calls.append(call(_preload_content=False, watch=True,
resource_version="5"))
for i in range(iterations):
# ideally we want 5 here but as rv must be treated as an
# opaque value we cannot interpret it and order it so rely
# on k8s returning the events completely and in order
calls.append(call(_preload_content=False, watch=True,
resource_version="3"))
for c, e in enumerate(w.stream(fake_api.get_namespaces,
resource_version="5")):
if c == len(values) * iterations:
w.stop()
# check calls are in the list, gives good error output
fake_api.get_namespaces.assert_has_calls(calls)
# more strict test with worse error message
self.assertEqual(fake_api.get_namespaces.mock_calls, calls)
def test_watch_stream_twice(self):
w = Watch(float)
for step in ['first', 'second']:
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=['{"type": "ADDED", "object": 1}\n'] * 4)
fake_api = Mock()
fake_api.get_namespaces = Mock(return_value=fake_resp)
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
count = 1
for e in w.stream(fake_api.get_namespaces):
count += 1
if count == 3:
w.stop()
self.assertEqual(count, 3)
fake_api.get_namespaces.assert_called_once_with(
_preload_content=False, watch=True)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
def test_watch_stream_loop(self):
w = Watch(float)
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=['{"type": "ADDED", "object": 1}\n'])
fake_api = Mock()
fake_api.get_namespaces = Mock(return_value=fake_resp)
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
count = 0
# when timeout_seconds is set, auto-exist when timeout reaches
for e in w.stream(fake_api.get_namespaces, timeout_seconds=1):
count = count + 1
self.assertEqual(count, 1)
# when no timeout_seconds, only exist when w.stop() is called
for e in w.stream(fake_api.get_namespaces):
count = count + 1
if count == 2:
w.stop()
self.assertEqual(count, 2)
self.assertEqual(fake_api.get_namespaces.call_count, 2)
self.assertEqual(fake_resp.stream.call_count, 2)
self.assertEqual(fake_resp.close.call_count, 2)
self.assertEqual(fake_resp.release_conn.call_count, 2)
def test_unmarshal_with_float_object(self):
w = Watch()
event = w.unmarshal_event('{"type": "ADDED", "object": 1}', 'float')
self.assertEqual("ADDED", event['type'])
self.assertEqual(1.0, event['object'])
self.assertTrue(isinstance(event['object'], float))
self.assertEqual(1, event['raw_object'])
def test_unmarshal_with_no_return_type(self):
w = Watch()
event = w.unmarshal_event('{"type": "ADDED", "object": ["test1"]}',
None)
self.assertEqual("ADDED", event['type'])
self.assertEqual(["test1"], event['object'])
self.assertEqual(["test1"], event['raw_object'])
def test_unmarshal_with_custom_object(self):
w = Watch()
event = w.unmarshal_event('{"type": "ADDED", "object": {"apiVersion":'
'"test.com/v1beta1","kind":"foo","metadata":'
'{"name": "bar", "resourceVersion": "1"}}}',
'object')
self.assertEqual("ADDED", event['type'])
# make sure decoder deserialized json into dictionary and updated
# Watch.resource_version
self.assertTrue(isinstance(event['object'], dict))
self.assertEqual("1", event['object']['metadata']['resourceVersion'])
self.assertEqual("1", w.resource_version)
def test_unmarshal_with_bookmark(self):
w = Watch()
event = w.unmarshal_event(
'{"type":"BOOKMARK","object":{"kind":"Job","apiVersion":"batch/v1"'
',"metadata":{"resourceVersion":"1"},"spec":{"template":{'
'"metadata":{},"spec":{"containers":null}}},"status":{}}}',
'V1Job')
self.assertEqual("BOOKMARK", event['type'])
# Watch.resource_version is *not* updated, as BOOKMARK is treated the
# same as ERROR for a quick fix of decoding exception,
# resource_version in BOOKMARK is *not* used at all.
self.assertEqual(None, w.resource_version)
def test_watch_with_exception(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(side_effect=KeyError('expected'))
fake_api = Mock()
fake_api.get_thing = Mock(return_value=fake_resp)
w = Watch()
try:
for _ in w.stream(fake_api.get_thing):
self.fail(self, "Should fail on exception.")
except KeyError:
pass
# expected
fake_api.get_thing.assert_called_once_with(
_preload_content=False, watch=True)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
def test_watch_with_error_event(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'{"type": "ERROR", "object": {"code": 410, '
'"reason": "Gone", "message": "error message"}}\n'])
fake_api = Mock()
fake_api.get_thing = Mock(return_value=fake_resp)
w = Watch()
# No events are generated when no initial resourceVersion is passed
# No retry is attempted either, preventing an ApiException
assert not list(w.stream(fake_api.get_thing))
fake_api.get_thing.assert_called_once_with(
_preload_content=False, watch=True)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
def test_watch_retries_on_error_event(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'{"type": "ERROR", "object": {"code": 410, '
'"reason": "Gone", "message": "error message"}}\n'])
fake_api = Mock()
fake_api.get_thing = Mock(return_value=fake_resp)
w = Watch()
try:
for _ in w.stream(fake_api.get_thing, resource_version=0):
self.fail(self, "Should fail with ApiException.")
except client.rest.ApiException:
pass
# Two calls should be expected during a retry
fake_api.get_thing.assert_has_calls(
[call(resource_version=0, _preload_content=False, watch=True)] * 2)
fake_resp.stream.assert_has_calls(
[call(amt=None, decode_content=False)] * 2)
assert fake_resp.close.call_count == 2
assert fake_resp.release_conn.call_count == 2
def test_watch_with_error_event_and_timeout_param(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'{"type": "ERROR", "object": {"code": 410, '
'"reason": "Gone", "message": "error message"}}\n'])
fake_api = Mock()
fake_api.get_thing = Mock(return_value=fake_resp)
w = Watch()
try:
for _ in w.stream(fake_api.get_thing, timeout_seconds=10):
self.fail(self, "Should fail with ApiException.")
except client.rest.ApiException:
pass
fake_api.get_thing.assert_called_once_with(
_preload_content=False, watch=True, timeout_seconds=10)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import absolute_import
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from .models import Book
class ArchiveIndexViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def _make_books(self, n, base_date):
for i in range(n):
b = Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100+i,
pubdate=base_date - datetime.timedelta(days=1))
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'], Book.objects.dates('pubdate', 'year')[::-1])
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_context_object_name(self):
res = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'], Book.objects.dates('pubdate', 'year')[::-1])
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertFalse('latest' in res.context)
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'], Book.objects.dates('pubdate', 'year')[::-1])
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'], Book.objects.dates('pubdate', 'year')[::-1])
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_archive_view_invalid(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/dates/books/invalid/')
def test_paginated_archive_view(self):
self._make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'], Book.objects.dates('pubdate', 'year')[::-1])
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
def test_paginated_archive_view_does_not_load_entire_table(self):
# Regression test for #18087
self._make_books(20, base_date=datetime.date.today())
# 1 query for years list + 1 query for books
with self.assertNumQueries(2):
self.client.get('/dates/books/')
# same as above + 1 query to test if books exist
with self.assertNumQueries(3):
self.client.get('/dates/books/paginated/')
class YearArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2008, 10, 1)])
self.assertEqual(res.context['year'], '2008')
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_make_object_list(self):
res = self.client.get('/dates/books/2006/make_object_list/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2006, 5, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_empty(self):
res = self.client.get('/dates/books/1999/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/1999/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
def test_year_view_allow_future(self):
# Create a new book in the future
year = datetime.date.today().year + 1
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=datetime.date(year, 1, 1))
res = self.client.get('/dates/books/%s/' % year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/allow_empty/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
res = self.client.get('/dates/books/%s/allow_future/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(year, 1, 1)])
def test_year_view_paginated(self):
res = self.client.get('/dates/books/2006/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_invalid_pattern(self):
res = self.client.get('/dates/books/no_year/')
self.assertEqual(res.status_code, 404)
class MonthArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_month_view(self):
res = self.client.get('/dates/books/2008/oct/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2008, 10, 1)])
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['month'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev months must be valid (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['month'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], datetime.date(2000, 2, 1))
self.assertEqual(res.context['previous_month'], datetime.date(1999, 12, 1))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], None)
def test_month_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60)).replace(day=1)
urlbit = future.strftime('%Y/%b').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'][0].date(), b.pubdate)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['month'], future)
# Since it's allow_future but not allow_empty, next/prev are not
# allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month. So next
# should be in the future (yup, #7164, again)
res = self.client.get('/dates/books/2008/oct/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], future)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/')
self.assertEqual(res.status_code, 200)
def test_month_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/no_month/')
self.assertEqual(res.status_code, 404)
def test_previous_month_without_content(self):
"Content can exist on any day of the previous month. Refs #14711"
self.pubdate_list = [
datetime.date(2010, month, day)
for month,day in ((9,1), (10,2), (11,3))
]
for pubdate in self.pubdate_list:
name = str(pubdate)
Book.objects.create(name=name, slug=name, pages=100, pubdate=pubdate)
res = self.client.get('/dates/books/2010/nov/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,10,1))
# The following test demonstrates the bug
res = self.client.get('/dates/books/2010/nov/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,10,1))
# The bug does not occur here because a Book with pubdate of Sep 1 exists
res = self.client.get('/dates/books/2010/oct/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,9,1))
class WeekArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_week_view(self):
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
self.assertEqual(res.context['book_list'][0], Book.objects.get(pubdate=datetime.date(2008, 10, 1)))
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
def test_week_view_allow_empty(self):
res = self.client.get('/dates/books/2008/week/12/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/2008/week/12/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
def test_week_view_allow_future(self):
# January 7th always falls in week 1, given Python's definition of week numbers
future = datetime.date(datetime.date.today().year + 1, 1, 7)
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/week/1/' % future.year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/week/1/allow_future/' % future.year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
def test_week_view_paginated(self):
week_start = datetime.date(2008, 9, 28)
week_end = week_start + datetime.timedelta(days=7)
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
def test_week_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/week/no_week/')
self.assertEqual(res.status_code, 404)
def test_week_start_Monday(self):
# Regression for #14752
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
res = self.client.get('/dates/books/2008/week/39/monday/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 29))
class DayArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_day_view(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['day'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev days must be valid.
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/1/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/1/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['day'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_day'], datetime.date(2000, 1, 2))
self.assertEqual(res.context['previous_day'], datetime.date(1999, 12, 31))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/%d/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], None)
def test_day_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['day'], future)
# allow_future but not allow_empty, next/prev must be valid
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month.
res = self.client.get('/dates/books/2008/oct/01/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], future)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
def test_next_prev_context(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.content, "Archive for Oct. 1, 2008. Previous day is May 1, 2006")
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/')
self.assertEqual(res.status_code, 200)
def test_day_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/oct/no_day/')
self.assertEqual(res.status_code, 404)
def test_today_view(self):
res = self.client.get('/dates/books/today/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/today/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['day'], datetime.date.today())
class DateDetailViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_date_detail_by_pk(self):
res = self.client.get('/dates/books/2008/oct/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=1))
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_date_detail_by_slug(self):
res = self.client.get('/dates/books/2006/may/01/byslug/dreaming-in-code/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(slug='dreaming-in-code'))
def test_date_detail_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
def test_date_detail_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", slug="new-new", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/new-new/' % urlbit)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/%s/allow_future/' % (urlbit, b.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], b)
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, "/dates/books/2008/oct/01/nopk/")
def test_get_object_custom_queryset(self):
"""
Ensure that custom querysets are used when provided to
BaseDateDetailView.get_object()
Refs #16918.
"""
res = self.client.get(
'/dates/books/get_object_custom_queryset/2006/may/01/2/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=2))
self.assertEqual(res.context['book'], Book.objects.get(pk=2))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
res = self.client.get(
'/dates/books/get_object_custom_queryset/2008/oct/01/1/')
self.assertEqual(res.status_code, 404)
|
|
# PSU Capstone Intel Mood Light 2015-2016
# This program functions as the user interface to allow the user to set profiles in the the arduino
# Connect to the arduino over a serial com port connection to send and receive information from the arduino
#
# Adrian Steele ([email protected])
# Bander Alenezi ([email protected])
# Dusan Micic ([email protected])
# Waleed Alhaddad ([email protected])
# Dependencies:
# windows pySerial library
# Tested on Windows 7, python 2.7.6, pyserial 2.7
import argparse
import errno
import os
import sys
import signal
import serial
import socket
import subprocess
import threading
import time
def main():
"""
Main function, user should specify the COM port that the arduino is connected on when they begin the program.
"""
parser = argparse.ArgumentParser()
parser.add_argument('comPort', action='store', help="The serial com port that the arduino is connected to")
args = parser.parse_args()
#bind a serial connection to a computer communication port
ser = serial.Serial(args.comPort, 9600, timeout=1)
#open the bluetooth connection
if ser.isOpen() == True:
print "Connected to bluetooth module"
else:
print "Could not connect to bluetooth module"
sys.exit()
#get command from user and send to arduino over serial connection
while True:
print "Main Menu"
print "1) Add profile"
print "2) Edit existing profile"
print "3) Remove profile"
print "4) Quit"
cmd = int(raw_input("Enter the number of the command to execute: "))
if cmd == 1:
addProfile(ser)
elif cmd == 2:
editProfile(ser)
elif cmd == 3:
removeProfile(ser)
elif cmd == 4:
break
def addProfile(ser):
"""
Add a new profile to the arduino
Args:
- ser: serial connection object to the arduino
Returns:
None
"""
username = raw_input("Enter the profile name: ")
print "Color options:"
print "1) Green"
print "2) Red"
print "3) Blue"
print "4) Yellow"
print "5) White"
print "6) Purple"
print "7) Orange"
color = int(raw_input("Enter the number of the color to set for the profile: "))
print "Step on the scale to calibrate the user's weight."
ser.write("menu=1,")
ser.write("name=" + username + ",")
ser.write("color=" + str(color) + ",")
ser.write("weight=1,")
while True:
resp = ser.readline()
if resp != '':
break
try:
print "Weight calibrated to: %d" % int(resp.split('=')[1].strip())
except IndexError:
print "Bluetooth communication error with Arduino, please try again.\n"
def getProfiles(ser):
"""
Gets a list of profiles currently stored in the Arduino
Args:
- ser: serial connection object to the arduino
Returns:
A tuple containing:
- Integer representing number of profiles
- List object containing dictionaries of the profiles
"""
ser.write("menu=2,")
while True:
line = ser.readline()
if line != '':
break
try:
# first thing returned from arduino should be the number of profiles it has
numProfiles = int(line.split('=')[1].strip())
except IndexError:
print "Bluetooth communication error with Arduino, please try again.\n"
return (0, [])
profiles = []
for i in range(0, numProfiles):
# each profile will be read in in one line (ending with a newline)
line = ser.readline()
try:
# each item in the profile will be separated by a comma
for item in line.split(','):
#key value pair will be separated by an '='
if "user=" in item:
user = item.split('=')[1].strip()
elif "weight=" in item:
weight = int(item.split('=')[1].strip())
elif "color=" in item:
color = int(item.split('=')[1].strip())
newProfile = {"user": user, "weight": weight, "color": mapVal2Color(color)}
profiles.append(newProfile)
except IndexError:
print "Bluetooth communication error with Arduino, please try again.\n"
return (0, [])
return (numProfiles, profiles)
def mapVal2Color(colorInt):
"""
Maps an integer to a color
Args:
- colorInt: the integer value of the color to map
Returns:
returns a string of the mapped color value
"""
colorDict = {1: "Green",
2: "Red",
3: "Blue",
4: "Yellow",
5: "White",
6: "Purple",
7: "Orange",
}
return colorDict[colorInt]
def editProfile(ser):
"""
Edit an existing profile on the arduino
Args:
- ser: serial connection object to the arduino
Returns:
None
"""
(numProfiles, profiles) = getProfiles(ser)
if numProfiles == 0:
print "No existing profiles found on the Arduino.\n"
return
print "There are currently %d profiles stored on the Arduino." % numProfiles
for i in range(0, len(profiles)):
print "Profile %d" % (i+1)
print " User: %s" % profiles[i]["user"]
print " Weight: %d" % profiles[i]["weight"]
print " Color: %s\n" % profiles[i]["color"]
editIndex = int(raw_input("Enter the number of the profile you want to make edits to: ")) - 1
if editIndex < 0 or editIndex >= len(profiles):
print "Error: Specified profile number out of range."
return
print "Profile %d" % (editIndex+1)
print " 1) User: %s" % profiles[editIndex]["user"]
print " 2) Color: %s" % profiles[editIndex]["color"]
print " 3) Weight: %d\n" % profiles[editIndex]["weight"]
item = int(raw_input("Enter the number of the item you want to modify: "))
if item < 1 or item > 3:
print "Error: specified item to edit out of range (1-3)"
return
if item == 1:
username = raw_input("Enter the profile name: ")
ser.write("menu=3,")
ser.write("profile=" + str(editIndex) + ",");
ser.write("name=" + username + ",")
elif item == 2:
print "Color options:"
print "1) Green"
print "2) Red"
print "3) Blue"
print "4) Yellow"
print "5) White"
print "6) Purple"
print "7) Orange"
color = int(raw_input("Enter the number of the color to set for the profile: "))
if color < 1 or color > 7:
print "Error: specified color out of range (1-7)."
return
ser.write("menu=3,")
ser.write("profile=" + str(editIndex) + ",")
ser.write("color=" + str(color) + ",")
elif item == 3:
ser.write("menu=3,")
ser.write("profile=" + str(editIndex) + ",")
ser.write("weight=1,");
print "Step on the scale to calibrate the user's weight."
resp = ser.readline()
while True:
resp = ser.readline()
if resp != '':
break
print [resp]
print "Weight calibrated to: %d" % int(resp.split('=')[1].strip())
def removeProfile(ser):
"""
Remove a profile from the arduino
Args:
- ser: serial connection object to the arduino
Return:
None
"""
(numProfiles, profiles) = getProfiles(ser)
if numProfiles == 0:
print "No existing profiles found on the Arduino.\n"
return
for i in range(0, len(profiles)):
print "Profile %d" % (i+1)
print " User: %s" % profiles[i]["user"]
print " Weight: %d" % profiles[i]["weight"]
print " Color: %s\n" % profiles[i]["color"]
removeIndex = int(raw_input("Enter the number of the profile you want to remove: ")) - 1
if removeIndex < 0 or removeIndex >= len(profiles):
print "Error: Specified profile number out of range."
return
ser.write("menu=4,")
ser.write("profile=" + str(removeIndex) + ",")
if __name__ == '__main__':
main()
|
|
import os
from CPAC.pipeline import nipype_pipeline_engine as pe
from ..output_func_to_standard import ants_apply_warps_func_mni
from .mocks import configuration_strategy_mock
import CPAC.utils.test_init as test_utils
import nibabel as nb
import pytest
@pytest.mark.skip(reason="no way of currently testing this")
def test_ants_apply_warp_func_mni():
test_name = 'test_ants_apply_warps_func_mni'
# get the config and strat for the mock
c, strat = configuration_strategy_mock()
num_strat = 0
node, out = strat['mean_functional']
mean_functional = node.inputs.file
# build the workflow
workflow = pe.Workflow(name='test_ants_apply_warps_func_mni')
workflow.base_dir = c.workingDirectory
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(c.crashLogDirectory)
}
workflow = ants_apply_warps_func_mni(workflow,
'mean_functional_to_standard',
'mean_functional',
'template_brain_for_func_preproc',
num_strat, strat,
interpolation_method = c.funcRegANTSinterpolation,
distcor=False,
map_node=False,
inverse=False,
input_image_type=0,
num_ants_cores=1)
workflow = ants_apply_warps_func_mni(workflow,
'mean_functional_standard_to_original',
'mean_functional_to_standard',
'mean_functional',
num_strat, strat,
interpolation_method = c.funcRegANTSinterpolation,
distcor=False,
map_node=False,
inverse=True,
input_image_type=0,
num_ants_cores=1)
workflow.run()
mean_functional_after_transform=os.path.join(c.workingDirectory, test_name,
'apply_ants_warp_mean_functional_standard_to_original_inverse_0',
'sub-M10978008_ses-NFB3_task-test_bold_calc_tshift_resample_volreg_calc_tstat_antswarp_antswarp.nii.gz')
assert(test_utils.pearson_correlation(mean_functional, mean_functional_after_transform) > .99)
@pytest.mark.skip(reason="no way of currently testing this")
def test_ants_apply_warps_func_mni_mapnode():
test_name = 'test_ants_apply_warps_func_mni_mapnode'
# get the config and strat for the mock
c, strat = configuration_strategy_mock()
num_strat = 0
node, out = strat['dr_tempreg_maps_files']
dr_spatmaps = node.inputs.file
# build the workflow
workflow = pe.Workflow(name='test_ants_apply_warps_func_mni_mapnode')
workflow.base_dir = c.workingDirectory
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(c.crashLogDirectory)
}
workflow = ants_apply_warps_func_mni(workflow,
'dr_tempreg_maps_to_standard',
'dr_tempreg_maps_files',
'template_brain_for_func_preproc',
num_strat, strat,
interpolation_method = c.funcRegANTSinterpolation,
distcor=False,
map_node=True,
inverse=False,
input_image_type=0,
num_ants_cores=1)
workflow = ants_apply_warps_func_mni(workflow,
'dr_tempreg_maps_standard_to_original',
'dr_tempreg_maps_to_standard',
'mean_functional',
num_strat, strat,
interpolation_method = c.funcRegANTSinterpolation,
distcor=False,
map_node=True,
inverse=True,
input_image_type=0,
num_ants_cores=8)
workflow.run()
dr_spatmaps_after_transform=[os.path.join(c.workingDirectory, test_name,
'apply_ants_warp_dr_tempreg_maps_standard_to_original_mapnode_inverse_0',
'mapflow', '_apply_ants_warp_dr_tempreg_maps_standard_to_original_mapnode_inverse_0{0}'.format(n),
'temp_reg_map_000{0}_antswarp_antswarp.nii.gz'.format(n))
for n in range(0,10)]
test_results = [ test_utils.pearson_correlation(orig_file, xformed_file) > 0.99 \
for orig_file, xformed_file in zip(dr_spatmaps, dr_spatmaps_after_transform)]
assert all(test_results)
@pytest.mark.skip(reason='needs refactoring')
def test_ants_apply_warp_func_mni_symm():
test_name = 'test_ants_apply_warps_func_mni_symm'
# get the config and strat for the mock
c, strat = configuration_strategy_mock()
num_strat = 0
node, out = strat['mean_functional']
mean_functional = node.inputs.file
# build the workflow
workflow = pe.Workflow(name=test_name)
workflow.base_dir = c.workingDirectory
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(c.crashLogDirectory)
}
workflow = ants_apply_warps_func_mni(workflow,
'mean_functional_to_standard_symm',
'mean_functional',
'template_brain_for_func_preproc',
num_strat, strat,
interpolation_method = c.funcRegANTSinterpolation,
distcor=False,
map_node=False,
inverse=False,
symmetry='symmetric',
input_image_type=0,
num_ants_cores=8)
workflow = ants_apply_warps_func_mni(workflow,
'mean_functional_standard_to_original_symm',
'mean_functional_to_standard_symm',
'mean_functional',
num_strat, strat,
interpolation_method = c.funcRegANTSinterpolation,
distcor=False,
map_node=False,
inverse=True,
symmetry='symmetric',
input_image_type=0,
num_ants_cores=1)
retval = workflow.run()
mean_functional_after_transform=os.path.join(c.workingDirectory, test_name,
'apply_ants_warp_mean_functional_standard_to_original_symm_inverse_0',
'sub-M10978008_ses-NFB3_task-test_bold_calc_tshift_resample_volreg_calc_tstat_antswarp_antswarp.nii.gz')
assert(test_utils.pearson_correlation(mean_functional, mean_functional_after_transform) > .93)
@pytest.mark.skip(reason='needs refactoring')
def test_ants_apply_warps_func_mni_mapnode_symm():
test_name = 'test_ants_apply_warps_func_mni_mapnode_symm'
# get the config and strat for the mock
c, strat = configuration_strategy_mock()
num_strat = 0
node, out = strat['dr_tempreg_maps_files']
dr_spatmaps = node.inputs.file
# build the workflow
workflow = pe.Workflow(name=test_name)
workflow.base_dir = c.workingDirectory
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(c.crashLogDirectory)
}
workflow = ants_apply_warps_func_mni(workflow,
'dr_tempreg_maps_to_standard_symm',
'dr_tempreg_maps_files',
'template_brain_for_func_preproc',
num_strat, strat,
interpolation_method = c.funcRegANTSinterpolation,
distcor=False,
map_node=True,
inverse=False,
symmetry='symmetric',
input_image_type=0,
num_ants_cores=8)
workflow = ants_apply_warps_func_mni(workflow,
'dr_tempreg_maps_standard_symm_to_original',
'dr_tempreg_maps_to_standard_symm',
'mean_functional',
num_strat, strat,
interpolation_method = c.funcRegANTSinterpolation,
distcor=False,
map_node=True,
inverse=True,
symmetry='symmetric',
input_image_type=0,
num_ants_cores=8)
workflow.run()
dr_spatmaps_after_transform=[os.path.join(c.workingDirectory, test_name,
'apply_ants_warp_dr_tempreg_maps_standard_symm_to_original_mapnode_inverse_0',
'mapflow', '_apply_ants_warp_dr_tempreg_maps_standard_symm_to_original_mapnode_inverse_0{0}'.format(n),
'temp_reg_map_000{0}_antswarp_antswarp.nii.gz'.format(n))
for n in range(0,10)]
r = [test_utils.pearson_correlation(orig_file, xformed_file) \
for orig_file, xformed_file in zip(dr_spatmaps, dr_spatmaps_after_transform)]
print(r)
test_results = [ r_value > 0.93 for r_value in r ]
assert all(test_results)
|
|
"""Docutils CommonMark parser"""
import sys
from os.path import splitext
from docutils import parsers, nodes
from sphinx import addnodes
from commonmark import Parser
from warnings import warn
if sys.version_info < (3, 0):
from urlparse import urlparse, unquote
else:
from urllib.parse import urlparse, unquote
__all__ = ['CommonMarkParser']
class CommonMarkParser(parsers.Parser):
"""Docutils parser for CommonMark"""
supported = ('md', 'markdown')
translate_section_name = None
default_config = {
'known_url_schemes': None,
}
def __init__(self):
self._level_to_elem = {}
def parse(self, inputstring, document):
self.document = document
self.current_node = document
self.config = self.default_config.copy()
try:
new_cfg = self.document.settings.env.config.recommonmark_config
self.config.update(new_cfg)
except AttributeError:
pass
self.setup_parse(inputstring, document)
self.setup_sections()
parser = Parser()
ast = parser.parse(inputstring + '\n')
self.convert_ast(ast)
self.finish_parse()
def convert_ast(self, ast):
for (node, entering) in ast.walker():
fn_prefix = "visit" if entering else "depart"
fn_name = "{0}_{1}".format(fn_prefix, node.t.lower())
fn_default = "default_{0}".format(fn_prefix)
fn = getattr(self, fn_name, None)
if fn is None:
fn = getattr(self, fn_default)
fn(node)
# Node type enter/exit handlers
def default_visit(self, mdnode):
pass
def default_depart(self, mdnode):
"""Default node depart handler
If there is a matching ``visit_<type>`` method for a container node,
then we should make sure to back up to it's parent element when the node
is exited.
"""
if mdnode.is_container():
fn_name = 'visit_{0}'.format(mdnode.t)
if not hasattr(self, fn_name):
warn("Container node skipped: type={0}".format(mdnode.t))
else:
self.current_node = self.current_node.parent
def visit_heading(self, mdnode):
# Test if we're replacing a section level first
if isinstance(self.current_node, nodes.section):
if self.is_section_level(mdnode.level, self.current_node):
self.current_node = self.current_node.parent
title_node = nodes.title()
title_node.line = mdnode.sourcepos[0][0]
new_section = nodes.section()
new_section.line = mdnode.sourcepos[0][0]
new_section.append(title_node)
self.add_section(new_section, mdnode.level)
# Set the current node to the title node to accumulate text children/etc
# for heading.
self.current_node = title_node
def depart_heading(self, _):
"""Finish establishing section
Wrap up title node, but stick in the section node. Add the section names
based on all the text nodes added to the title.
"""
assert isinstance(self.current_node, nodes.title)
# The title node has a tree of text nodes, use the whole thing to
# determine the section id and names
text = self.current_node.astext()
if self.translate_section_name:
text = self.translate_section_name(text)
name = nodes.fully_normalize_name(text)
section = self.current_node.parent
section['names'].append(name)
self.document.note_implicit_target(section, section)
self.current_node = section
def visit_text(self, mdnode):
self.current_node.append(nodes.Text(mdnode.literal, mdnode.literal))
def visit_softbreak(self, _):
self.current_node.append(nodes.Text('\n'))
def visit_linebreak(self, _):
self.current_node.append(nodes.raw('', '<br />', format='html'))
def visit_paragraph(self, mdnode):
p = nodes.paragraph(mdnode.literal)
p.line = mdnode.sourcepos[0][0]
self.current_node.append(p)
self.current_node = p
def visit_emph(self, _):
n = nodes.emphasis()
self.current_node.append(n)
self.current_node = n
def visit_strong(self, _):
n = nodes.strong()
self.current_node.append(n)
self.current_node = n
def visit_code(self, mdnode):
n = nodes.literal(mdnode.literal, mdnode.literal)
self.current_node.append(n)
def visit_link(self, mdnode):
ref_node = nodes.reference()
# Check destination is supported for cross-linking and remove extension
destination = mdnode.destination
_, ext = splitext(destination)
# Check if the destination starts with a url scheme, since internal and
# external links need to be handled differently.
url_check = urlparse(destination)
known_url_schemes = self.config.get('known_url_schemes')
if known_url_schemes:
scheme_known = url_check.scheme in known_url_schemes
else:
scheme_known = bool(url_check.scheme)
# TODO check for other supported extensions, such as those specified in
# the Sphinx conf.py file but how to access this information?
if not scheme_known and ext.replace('.', '') in self.supported:
destination = destination.replace(ext, '')
ref_node['refuri'] = destination
# TODO okay, so this is acutally not always the right line number, but
# these mdnodes won't have sourcepos on them for whatever reason. This
# is better than 0 though.
ref_node.line = self._get_line(mdnode)
if mdnode.title:
ref_node['title'] = mdnode.title
next_node = ref_node
# If there's not a url scheme (e.g. 'https' for 'https:...' links),
# or there is a scheme but it's not in the list of known_url_schemes,
# then assume it's a cross-reference and pass it to Sphinx as an `:any:` ref.
if not url_check.fragment and not scheme_known:
wrap_node = addnodes.pending_xref(
reftarget=unquote(destination),
reftype='any',
refdomain=None, # Added to enable cross-linking
refexplicit=True,
refwarn=True
)
# TODO also not correct sourcepos
wrap_node.line = self._get_line(mdnode)
if mdnode.title:
wrap_node['title'] = mdnode.title
wrap_node.append(ref_node)
next_node = wrap_node
self.current_node.append(next_node)
self.current_node = ref_node
def depart_link(self, mdnode):
if isinstance(self.current_node.parent, addnodes.pending_xref):
self.current_node = self.current_node.parent.parent
else:
self.current_node = self.current_node.parent
def visit_image(self, mdnode):
img_node = nodes.image()
img_node['uri'] = mdnode.destination
if mdnode.first_child and mdnode.first_child.literal:
content = [mdnode.first_child.literal]
n = mdnode.first_child
mdnode.first_child.literal = ''
mdnode.first_child = mdnode.last_child = None
while getattr(n, 'nxt'):
n.nxt, n = None, n.nxt
content.append(n.literal)
img_node['alt'] = ''.join(content)
self.current_node.append(img_node)
self.current_node = img_node
def visit_list(self, mdnode):
list_node = None
if (mdnode.list_data['type'] == "bullet"):
list_node_cls = nodes.bullet_list
else:
list_node_cls = nodes.enumerated_list
list_node = list_node_cls()
list_node.line = mdnode.sourcepos[0][0]
self.current_node.append(list_node)
self.current_node = list_node
def visit_item(self, mdnode):
node = nodes.list_item()
node.line = mdnode.sourcepos[0][0]
self.current_node.append(node)
self.current_node = node
def visit_code_block(self, mdnode):
kwargs = {}
if mdnode.is_fenced and mdnode.info:
kwargs['language'] = mdnode.info
text = ''.join(mdnode.literal)
if text.endswith('\n'):
text = text[:-1]
node = nodes.literal_block(text, text, **kwargs)
self.current_node.append(node)
def visit_block_quote(self, mdnode):
q = nodes.block_quote()
q.line = mdnode.sourcepos[0][0]
self.current_node.append(q)
self.current_node = q
def visit_html(self, mdnode):
raw_node = nodes.raw(mdnode.literal,
mdnode.literal, format='html')
if mdnode.sourcepos is not None:
raw_node.line = mdnode.sourcepos[0][0]
self.current_node.append(raw_node)
def visit_html_inline(self, mdnode):
self.visit_html(mdnode)
def visit_html_block(self, mdnode):
self.visit_html(mdnode)
def visit_thematic_break(self, _):
self.current_node.append(nodes.transition())
# Section handling
def setup_sections(self):
self._level_to_elem = {0: self.document}
def add_section(self, section, level):
parent_level = max(
section_level for section_level in self._level_to_elem
if level > section_level
)
parent = self._level_to_elem[parent_level]
parent.append(section)
self._level_to_elem[level] = section
# Prune level to limit
self._level_to_elem = dict(
(section_level, section)
for section_level, section in self._level_to_elem.items()
if section_level <= level
)
def is_section_level(self, level, section):
return self._level_to_elem.get(level, None) == section
def _get_line(self, mdnode):
while mdnode:
if mdnode.sourcepos:
return mdnode.sourcepos[0][0]
mdnode = mdnode.parent
return 0
|
|
import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Begin commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if len(path_transforms) == 0:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
|
|
"""
Commands that are available from the connect screen.
"""
import re
import traceback
from django.conf import settings
from src.players.models import PlayerDB
from src.objects.models import ObjectDB
from src.server.models import ServerConfig
from src.comms.models import Channel
from src.utils import create, logger, utils, ansi
from src.commands.default.muxcommand import MuxCommand
from src.commands.cmdhandler import CMD_LOGINSTART
# limit symbol import for API
__all__ = ("CmdUnconnectedConnect", "CmdUnconnectedCreate", "CmdUnconnectedQuit", "CmdUnconnectedLook", "CmdUnconnectedHelp", "Magic")
CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
CONNECTION_SCREEN = ""
try:
CONNECTION_SCREEN = ansi.parse_ansi(utils.string_from_module(CONNECTION_SCREEN_MODULE))
except Exception:
pass
if not CONNECTION_SCREEN:
CONNECTION_SCREEN = "\nEvennia: Error in CONNECTION_SCREEN MODULE (randomly picked connection screen variable is not a string). \nEnter 'help' for aid."
class Magic(MuxCommand):
"""
Hidden command for the web client's magic cookie authenticator.
"""
key = "magic"
def func(self):
session = self.caller
player = PlayerDB.objects.player_search(self.lhs)
if len(player) != 1:
player = None
else:
player = player[0]
if player.name.lower() != self.lhs.lower():
player=None
pswd = None
if player:
pswd = self.rhs == player.db.magic_cookie
if not (player and pswd):
# No playername or password match
session.msg("Could not verify Magic Cookie. Please email the server administrator for assistance.")
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0]==player.name for tup in bans)
or
any(tup[2].match(session.address[0]) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here."
string += "\nIf you feel this ban is in error, please email an admin.{x"
session.msg(string)
session.execute_cmd("quit")
return
session.sessionhandler.login(session, player)
class Connect(MuxCommand):
"""
Connect to the game.
Usage (at login screen):
connect playername password
connect "player name" "pass word"
Use the create command to first create an account before logging in.
If you have spaces in your name, enclose it in quotes.
"""
key = "connect"
aliases = ["conn", "con", "co"]
locks = "cmd:all()" # not really needed
def func(self):
"""
Uses the Django admin api. Note that unlogged-in commands
have a unique position in that their func() receives
a session object instead of a source_object like all
other types of logged-in commands (this is because
there is no object yet before the player has logged in)
"""
session = self.caller
args = self.args
# extract quoted parts
parts = [part.strip() for part in re.split(r"\"|\'", args) if part.strip()]
if len(parts) == 1:
# this was (hopefully) due to no quotes being found
parts = parts[0].split(None, 1)
if len(parts) != 2:
session.msg("\n\r Usage (without <>): connect <name> <password>")
return
playername, password = parts
# Match account name and check password
player = PlayerDB.objects.player_search(playername)
if len(player) != 1:
player = None
else:
player = player[0]
if player.name.lower() != playername.lower():
player=None
pswd = None
if player:
pswd = player.check_password(password)
if not (player and pswd):
# No playername or password match
string = "Wrong login information given.\nIf you have spaces in your name or "
string += "password, don't forget to enclose it in quotes. Also capitalization matters."
string += "\nIf you are new you should first create a new account "
string += "using the 'create' command."
session.msg(string)
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0]==player.name for tup in bans)
or
any(tup[2].match(session.address[0]) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here."
string += "\nIf you feel this ban is in error, please email an admin.{x"
session.msg(string)
session.execute_cmd("quit")
return
# actually do the login. This will call all other hooks:
# session.at_init()
# if character:
# at_first_login() # only once
# at_pre_login()
# player.at_post_login() - calls look if no character is set
# character.at_post_login() - this calls look command by default
session.sessionhandler.login(session, player)
class Create(MuxCommand):
"""
Create a new account.
Usage (at login screen):
create <playername> <password>
create "player name" "pass word"
This creates a new player account.
If you have spaces in your name, enclose it in quotes.
"""
key = "create"
aliases = ["cre", "cr"]
locks = "cmd:all()"
def func(self):
"Do checks and create account"
session = self.caller
args = self.args.strip()
# extract quoted parts
parts = [part.strip() for part in re.split(r"\"|\'", args) if part.strip()]
if len(parts) == 1:
# this was (hopefully) due to no quotes being found
parts = parts[0].split(None, 1)
if len(parts) != 2:
string = "\n Usage (without <>): create <name> <password>"
string += "\nIf <name> or <password> contains spaces, enclose it in quotes."
session.msg(string)
return
playername, password = parts
print "playername '%s', password: '%s'" % (playername, password)
# sanity checks
if not re.findall('^[\w. @+-]+$', playername) or not (0 < len(playername) <= 30):
# this echoes the restrictions made by django's auth module (except not
# allowing spaces, for convenience of logging in).
string = "\n\r Playername can max be 30 characters or fewer. Letters, spaces, digits and @/./+/-/_ only."
session.msg(string)
return
# strip excessive spaces in playername
playername = re.sub(r"\s+", " ", playername).strip()
if PlayerDB.objects.filter(user__username__iexact=playername) or PlayerDB.objects.filter(username__iexact=playername):
# player already exists (we also ignore capitalization here)
session.msg("Sorry, there is already a player with the name '%s'." % playername)
return
if not re.findall('^[\w. @+-]+$', password) or not (3 < len(password)):
string = "\n\r Password should be longer than 3 characers. Letters, spaces, digits and @\.\+\-\_ only."
string += "\nFor best security, make it longer than 8 characters. You can also use a phrase of"
string += "\nmany words if you enclose the password in quotes."
session.msg(string)
return
# everything's ok. Create the new player account.
try:
default_home = ObjectDB.objects.get_id(settings.CHARACTER_DEFAULT_HOME)
typeclass = settings.BASE_CHARACTER_TYPECLASS
permissions = settings.PERMISSION_PLAYER_DEFAULT
try:
new_character = create.create_player(playername, None, password,
permissions=permissions,
character_typeclass=typeclass,
character_location=default_home,
character_home=default_home)
except Exception:
session.msg("There was an error creating the default Character/Player:\n%s\n If this problem persists, contact an admin.")
return
new_player = new_character.player
# This needs to be called so the engine knows this player is logging in for the first time.
# (so it knows to call the right hooks during login later)
utils.init_new_player(new_player)
# join the new player to the public channel
pchanneldef = settings.CHANNEL_PUBLIC
if pchanneldef:
pchannel = Channel.objects.get_channel(pchanneldef[0])
if not pchannel.connect_to(new_player):
string = "New player '%s' could not connect to public channel!" % new_player.key
logger.log_errmsg(string)
# allow only the character itself and the player to puppet this character (and Immortals).
new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)" %
(new_character.id, new_player.id))
# If no description is set, set a default description
if not new_character.db.desc:
new_character.db.desc = "This is a Player."
# tell the caller everything went well.
string = "A new account '%s' was created. Welcome!"
if " " in playername:
string += "\n\nYou can now log in with the command 'connect \"%s\" <your password>'."
else:
string += "\n\nYou can now log with the command 'connect %s <your password>'."
session.msg(string % (playername, playername))
except Exception:
# We are in the middle between logged in and -not, so we have to handle tracebacks
# ourselves at this point. If we don't, we won't see any errors at all.
string = "%s\nThis is a bug. Please e-mail an admin if the problem persists."
session.msg(string % (traceback.format_exc()))
logger.log_errmsg(traceback.format_exc())
class CmdUnconnectedQuit(MuxCommand):
"""
We maintain a different version of the quit command
here for unconnected players for the sake of simplicity. The logged in
version is a bit more complicated.
"""
key = "quit"
aliases = ["q", "qu"]
locks = "cmd:all()"
def func(self):
"Simply close the connection."
session = self.caller
session.msg("Good bye! Disconnecting ...")
session.session_disconnect()
class CmdUnconnectedLook(MuxCommand):
"""
This is an unconnected version of the look command for simplicity.
This is called by the server and kicks everything in gear.
All it does is display the connect screen.
"""
key = CMD_LOGINSTART
aliases = ["look", "l"]
locks = "cmd:all()"
def func(self):
"Show the connect screen."
self.caller.msg(CONNECTION_SCREEN)
class CmdUnconnectedHelp(MuxCommand):
"""
This is an unconnected version of the help command,
for simplicity. It shows a pane of info.
"""
key = "help"
aliases = ["h", "?"]
locks = "cmd:all()"
def func(self):
"Shows help"
string = \
"""
You are not yet logged into the game. Commands available at this point:
{wcreate, connect, look, help, quit{n
To login to the system, you need to do one of the following:
{w1){n If you have no previous account, you need to use the 'create'
command.
{wcreate Anna c67jHL8p{n
Note that if you use spaces in your name, you have to enclose in quotes.
{wcreate "Anna the Barbarian" c67jHL8p{n
It's always a good idea (not only here, but everywhere on the net)
to not use a regular word for your password. Make it longer than
6 characters or write a passphrase.
{w2){n If you have an account already, either because you just created
one in {w1){n above or you are returning, use the 'connect' command:
{wconnect Anna c67jHL8p{n
(Again, if there are spaces in the name you have to enclose it in quotes).
This should log you in. Run {whelp{n again once you're logged in
to get more aid. Hope you enjoy your stay!
You can use the {wlook{n command if you want to see the connect screen again.
"""
self.caller.msg(string)
|
|
import paramiko
from paramiko import AuthenticationException, SSHException, ChannelException
from enum import Enum
from concurrent.futures import ThreadPoolExecutor
from socket import error as SocketError
from margaritashotgun.auth import AuthMethods
from margaritashotgun.exceptions import *
import logging
logger = logging.getLogger(__name__)
class Commands(Enum):
mem_size = "cat /proc/meminfo | grep MemTotal | awk '{ print $2 }'"
kernel_version = "uname -r"
lime_pattern = "{0}:{1}"
lime_check = "cat /proc/net/tcp"
load_lime = 'sudo insmod {0} "path=tcp:{1}" format={2}'
unload_lime = "sudo pkill insmod; sudo rmmod lime"
class RemoteShell():
def __init__(self, max_async_threads=2):
"""
:type args: int
:param args: maximun number of async command executors
"""
self.jump_host_ssh = None
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.executor = ThreadPoolExecutor(max_workers=max_async_threads)
self.futures = []
def connect(self, auth, address, port, jump_host, jump_auth):
"""
Creates an ssh session to a remote host
:type auth: :py:class:`margaritashotgun.auth.AuthMethods`
:param auth: Authentication object
:type address: str
:param address: remote server address
:type port: int
:param port: remote server port
"""
try:
self.target_address = address
sock = None
if jump_host is not None:
self.jump_host_ssh = paramiko.SSHClient()
self.jump_host_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect_with_auth(self.jump_host_ssh, jump_auth,
jump_host['addr'], jump_host['port'], sock)
transport = self.jump_host_ssh.get_transport()
dest_addr = (address, port)
jump_addr = (jump_host['addr'], jump_host['port'])
channel = transport.open_channel('direct-tcpip', dest_addr,
jump_addr)
self.connect_with_auth(self.ssh, auth, address, port, channel)
else:
self.connect_with_auth(self.ssh, auth, address, port, sock)
except (AuthenticationException, SSHException,
ChannelException, SocketError) as ex:
raise SSHConnectionError("{0}:{1}".format(address, port), ex)
def connect_with_auth(self, ssh, auth, address, port, sock):
"""
"""
logger.debug(("{0}: paramiko client connecting to "
"{0}:{1} with {2}".format(address,
port,
auth.method)))
if auth.method == AuthMethods.key:
self.connect_with_key(ssh, auth.username, auth.key, address,
port, sock)
elif auth.method == AuthMethods.password:
self.connect_with_password(ssh, auth.username, auth.password,
address, port, sock)
else:
raise AuthenticationMethodMissingError()
logger.debug(("{0}: paramiko client connected to "
"{0}:{1}".format(address, port)))
def connect_with_password(self, ssh, username, password, address, port, sock,
timeout=20):
"""
Create an ssh session to a remote host with a username and password
:type username: str
:param username: username used for ssh authentication
:type password: str
:param password: password used for ssh authentication
:type address: str
:param address: remote server address
:type port: int
:param port: remote server port
"""
ssh.connect(username=username,
password=password,
hostname=address,
port=port,
sock=sock,
timeout=timeout)
def connect_with_key(self, ssh, username, key, address, port, sock,
timeout=20):
"""
Create an ssh session to a remote host with a username and rsa key
:type username: str
:param username: username used for ssh authentication
:type key: :py:class:`paramiko.key.RSAKey`
:param key: paramiko rsa key used for ssh authentication
:type address: str
:param address: remote server address
:type port: int
:param port: remote server port
"""
ssh.connect(hostname=address,
port=port,
username=username,
pkey=key,
sock=sock,
timeout=timeout)
def transport(self):
transport = self.ssh.get_transport()
transport.use_compression(True)
transport.window_size = 2147483647
transport.packetizer.REKEY_BYTES = pow(2, 40)
transport.packetizer.REKEY_PACKETS = pow(2, 40)
return self.ssh.get_transport()
def execute(self, command):
"""
Executes command on remote hosts
:type command: str
:param command: command to be run on remote host
"""
try:
if self.ssh.get_transport() is not None:
logger.debug('{0}: executing "{1}"'.format(self.target_address,
command))
stdin, stdout, stderr = self.ssh.exec_command(command)
return dict(zip(['stdin', 'stdout', 'stderr'],
[stdin, stdout, stderr]))
else:
raise SSHConnectionError(self.target_address,
"ssh transport is closed")
except (AuthenticationException, SSHException,
ChannelException, SocketError) as ex:
logger.critical(("{0} execution failed on {1} with exception:"
"{2}".format(command, self.target_address,
ex)))
raise SSHCommandError(self.target_address, command, ex)
def execute_async(self, command, callback=None):
"""
Executes command on remote hosts without blocking
:type command: str
:param command: command to be run on remote host
:type callback: function
:param callback: function to call when execution completes
"""
try:
logger.debug(('{0}: execute async "{1}"'
'with callback {2}'.format(self.target_address,
command,
callback)))
future = self.executor.submit(self.execute, command)
if callback is not None:
future.add_done_callback(callback)
return future
except (AuthenticationException, SSHException,
ChannelException, SocketError) as ex:
logger.critical(("{0} execution failed on {1} with exception:"
"{2}".format(command, self.target_address,
ex)))
raise SSHCommandError(self.target_address, command, ex)
def decode(self, stream, encoding='utf-8'):
"""
Convert paramiko stream into a string
:type stream:
:param stream: stream to convert
:type encoding: str
:param encoding: stream encoding
"""
data = stream.read().decode(encoding).strip("\n")
if data != "":
logger.debug(('{0}: decoded "{1}" with encoding '
'{2}'.format(self.target_address, data, encoding)))
return data
def upload_file(self, local_path, remote_path):
"""
Upload a file from the local filesystem to the remote host
:type local_path: str
:param local_path: path of local file to upload
:type remote_path: str
:param remote_path: destination path of upload on remote host
"""
logger.debug("{0}: uploading {1} to {0}:{2}".format(self.target_address,
local_path,
remote_path))
try:
sftp = paramiko.SFTPClient.from_transport(self.transport())
sftp.put(local_path, remote_path)
sftp.close()
except SSHException as ex:
logger.warn(("{0}: LiME module upload failed with exception:"
"{1}".format(self.target_address, ex)))
def cleanup(self):
"""
Release resources used during shell execution
"""
for future in self.futures:
future.cancel()
self.executor.shutdown(wait=10)
if self.ssh.get_transport() != None:
self.ssh.close()
|
|
# manage.py
import datetime
import os
import unittest
import coverage
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from project import app, db
from project.models import (
User,
EducationalInstitutionType,
Gender,
PatentOffice,
PatentStatus,
PublicationCategory,
PresentationRole,
ResearchRole
)
app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
# migrations
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Runs the unit tests without coverage."""
tests = unittest.TestLoader().discover('tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return 1
@manager.command
def cov():
"""Runs the unit tests with coverage."""
cov = coverage.coverage(branch=True, include='project/*')
cov.start()
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
cov.stop()
cov.save()
print('Coverage Summary:')
cov.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
cov.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
cov.erase()
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
@manager.command
def create_admin():
"""Creates the admin user."""
db.session.add(
User(
email="[email protected]",
password="admin",
admin=True,
confirmed=True,
confirmed_on=datetime.datetime.now()
)
)
db.session.commit()
@manager.command
def insert_lookup_data():
# EducationalInstitutionType
db.session.add(EducationalInstitutionType(description='High School'))
db.session.add(EducationalInstitutionType(description='University'))
db.session.add(EducationalInstitutionType(
description='Technikons / Universities of Technology'))
db.session.commit()
# Gender
db.session.add(Gender(gender='Female'))
db.session.add(Gender(gender='Male'))
db.session.commit()
# PublicationCategory
db.session.add(PublicationCategory(category='Peer-reviewed publications',
description='reports of original '
'investigations; clinical '
'reports; letters to the '
'editor'))
db.session.add(PublicationCategory(category='Books - authoured',
description='books authoured/written'))
db.session.add(PublicationCategory(category='Books - edited',
description='books edited'))
db.session.add(PublicationCategory(category='Monographs - authoured',
description='Monographs authoured'))
db.session.add(PublicationCategory(category='Monographs - edited',
description='Monographs edited'))
db.session.add(PublicationCategory(category='Works in progress',
description='Complete articles published '
'in conference proceedings, '
'chapters in books; review '
'articles; editorials.'))
db.session.add(PublicationCategory(category='Development of educational materials',
description='e.g. teaching cases'))
db.session.add(PublicationCategory(category='Development of publication materials',
description='e.g. teaching cases'))
db.session.add(PublicationCategory(category='Non-print materials',
description='film strips, films, videotapes and '
'computer software relevant to '
'academic field'))
db.session.commit()
# PatentOffice
db.session.add(PatentOffice(name='Albania'))
db.session.add(PatentOffice(name='Algeria'))
db.session.add(PatentOffice(name='Andorra'))
db.session.add(PatentOffice(name='Angola'))
db.session.add(PatentOffice(name='Antigua and Barbuda'))
db.session.add(PatentOffice(name='Argentina'))
db.session.add(PatentOffice(name='Armenia'))
db.session.add(PatentOffice(name='Australia'))
db.session.add(PatentOffice(name='Austria'))
db.session.add(PatentOffice(name='Azerbaijan'))
db.session.add(PatentOffice(name='Bahamas'))
db.session.add(PatentOffice(name='Bahrain'))
db.session.add(PatentOffice(name='Bangladesh'))
db.session.add(PatentOffice(name='Barbados'))
db.session.add(PatentOffice(name='Belarus'))
db.session.add(PatentOffice(name='Belgium'))
db.session.add(PatentOffice(name='Belize'))
db.session.add(PatentOffice(name='Benin'))
db.session.add(PatentOffice(name='Bhutan'))
db.session.add(PatentOffice(name='Bolivia (Plurinational State of)'))
db.session.add(PatentOffice(name='Bosnia and Herzegovina'))
db.session.add(PatentOffice(name='Botswana'))
db.session.add(PatentOffice(name='Brazil'))
db.session.add(PatentOffice(name='Brunei Darussalam'))
db.session.add(PatentOffice(name='Bulgaria'))
db.session.add(PatentOffice(name='Burkina Faso'))
db.session.add(PatentOffice(name='Burundi'))
db.session.add(PatentOffice(name='Cabo Verde'))
db.session.add(PatentOffice(name='Cambodia'))
db.session.add(PatentOffice(name='Cameroon'))
db.session.add(PatentOffice(name='Canada'))
db.session.add(PatentOffice(name='Central African Republic'))
db.session.add(PatentOffice(name='Chad'))
db.session.add(PatentOffice(name='Chile'))
db.session.add(PatentOffice(name='China'))
db.session.add(PatentOffice(name='Colombia'))
db.session.add(PatentOffice(name='Comoros'))
db.session.add(PatentOffice(name='Congo'))
db.session.add(PatentOffice(name='Costa Rica'))
db.session.add(PatentOffice(name="Cote d'Ivoire"))
db.session.add(PatentOffice(name='Croatia'))
db.session.add(PatentOffice(name='Cuba'))
db.session.add(PatentOffice(name='Cyprus'))
db.session.add(PatentOffice(name='Czech Republic'))
db.session.add(PatentOffice(name="Democratic People's Republic of Korea"))
db.session.add(PatentOffice(name='Democratic Republic of the Congo'))
db.session.add(PatentOffice(name='Denmark'))
db.session.add(PatentOffice(name='Djibouti'))
db.session.add(PatentOffice(name='Dominica'))
db.session.add(PatentOffice(name='Dominican Republic'))
db.session.add(PatentOffice(name='Ecuador'))
db.session.add(PatentOffice(name='Egypt'))
db.session.add(PatentOffice(name='El Salvador'))
db.session.add(PatentOffice(name='Equatorial Guinea'))
db.session.add(PatentOffice(name='Eritrea'))
db.session.add(PatentOffice(name='Estonia'))
db.session.add(PatentOffice(name='Ethiopia'))
db.session.add(PatentOffice(name='Fiji'))
db.session.add(PatentOffice(name='Finland'))
db.session.add(PatentOffice(name='France'))
db.session.add(PatentOffice(name='Gabon'))
db.session.add(PatentOffice(name='Gambia'))
db.session.add(PatentOffice(name='Georgia'))
db.session.add(PatentOffice(name='Germany'))
db.session.add(PatentOffice(name='Ghana'))
db.session.add(PatentOffice(name='Greece'))
db.session.add(PatentOffice(name='Grenada'))
db.session.add(PatentOffice(name='Guatemala'))
db.session.add(PatentOffice(name='Guinea'))
db.session.add(PatentOffice(name='Guinea-Bissau'))
db.session.add(PatentOffice(name='Guyana'))
db.session.add(PatentOffice(name='Haiti'))
db.session.add(PatentOffice(name='Holy See'))
db.session.add(PatentOffice(name='Honduras'))
db.session.add(PatentOffice(name='Hungary'))
db.session.add(PatentOffice(name='Iceland'))
db.session.add(PatentOffice(name='India'))
db.session.add(PatentOffice(name='Indonesia'))
db.session.add(PatentOffice(name='Iran (Islamic Republic of)'))
db.session.add(PatentOffice(name='Iraq'))
db.session.add(PatentOffice(name='Ireland'))
db.session.add(PatentOffice(name='Israel'))
db.session.add(PatentOffice(name='Italy'))
db.session.add(PatentOffice(name='Jamaica'))
db.session.add(PatentOffice(name='Japan'))
db.session.add(PatentOffice(name='Jordan'))
db.session.add(PatentOffice(name='Kazakhstan'))
db.session.add(PatentOffice(name='Kenya'))
db.session.add(PatentOffice(name='Kiribati'))
db.session.add(PatentOffice(name='Kuwait'))
db.session.add(PatentOffice(name='Kyrgyzstan'))
db.session.add(PatentOffice(name='Lao People\'s Democratic Republic'))
db.session.add(PatentOffice(name='Latvia'))
db.session.add(PatentOffice(name='Lebanon'))
db.session.add(PatentOffice(name='Lesotho'))
db.session.add(PatentOffice(name='Liberia'))
db.session.add(PatentOffice(name='Libya'))
db.session.add(PatentOffice(name='Liechtenstein'))
db.session.add(PatentOffice(name='Lithuania'))
db.session.add(PatentOffice(name='Luxembourg'))
db.session.add(PatentOffice(name='Madagascar'))
db.session.add(PatentOffice(name='Malawi'))
db.session.add(PatentOffice(name='Malaysia'))
db.session.add(PatentOffice(name='Maldives'))
db.session.add(PatentOffice(name='Mali'))
db.session.add(PatentOffice(name='Malta'))
db.session.add(PatentOffice(name='Mauritania'))
db.session.add(PatentOffice(name='Mauritius'))
db.session.add(PatentOffice(name='Mexico'))
db.session.add(PatentOffice(name='Monaco'))
db.session.add(PatentOffice(name='Mongolia'))
db.session.add(PatentOffice(name='Montenegro'))
db.session.add(PatentOffice(name='Morocco'))
db.session.add(PatentOffice(name='Mozambique'))
db.session.add(PatentOffice(name='Myanmar'))
db.session.add(PatentOffice(name='Namibia'))
db.session.add(PatentOffice(name='Nauru'))
db.session.add(PatentOffice(name='Nepal'))
db.session.add(PatentOffice(name='Netherlands'))
db.session.add(PatentOffice(name='New Zealand'))
db.session.add(PatentOffice(name='Nicaragua'))
db.session.add(PatentOffice(name='Niger'))
db.session.add(PatentOffice(name='Nigeria'))
db.session.add(PatentOffice(name='Niue'))
db.session.add(PatentOffice(name='Norway'))
db.session.add(PatentOffice(name='Oman'))
db.session.add(PatentOffice(name='Pakistan'))
db.session.add(PatentOffice(name='Palau'))
db.session.add(PatentOffice(name='Panama'))
db.session.add(PatentOffice(name='Papua New Guinea'))
db.session.add(PatentOffice(name='Paraguay'))
db.session.add(PatentOffice(name='Peru'))
db.session.add(PatentOffice(name='Philippines'))
db.session.add(PatentOffice(name='Poland'))
db.session.add(PatentOffice(name='Portugal'))
db.session.add(PatentOffice(name='Qatar'))
db.session.add(PatentOffice(name='Republic of Korea'))
db.session.add(PatentOffice(name='Republic of Moldova'))
db.session.add(PatentOffice(name='Romania'))
db.session.add(PatentOffice(name='Russian Federation'))
db.session.add(PatentOffice(name='Rwanda'))
db.session.add(PatentOffice(name='Saint Kitts and Nevis'))
db.session.add(PatentOffice(name='Saint Lucia'))
db.session.add(PatentOffice(name='Saint Vincent and the Grenadines'))
db.session.add(PatentOffice(name='Samoa'))
db.session.add(PatentOffice(name='San Marino'))
db.session.add(PatentOffice(name='Sao Tome and Principe'))
db.session.add(PatentOffice(name='Saudi Arabia'))
db.session.add(PatentOffice(name='Senegal'))
db.session.add(PatentOffice(name='Serbia'))
db.session.add(PatentOffice(name='Seychelles'))
db.session.add(PatentOffice(name='Sierra Leone'))
db.session.add(PatentOffice(name='Singapore'))
db.session.add(PatentOffice(name='Slovakia'))
db.session.add(PatentOffice(name='Slovenia'))
db.session.add(PatentOffice(name='Solomon Islands'))
db.session.add(PatentOffice(name='Somalia'))
db.session.add(PatentOffice(name='South Africa'))
db.session.add(PatentOffice(name='Spain'))
db.session.add(PatentOffice(name='Sri Lanka'))
db.session.add(PatentOffice(name='Sudan'))
db.session.add(PatentOffice(name='Suriname'))
db.session.add(PatentOffice(name='Swaziland'))
db.session.add(PatentOffice(name='Sweden'))
db.session.add(PatentOffice(name='Switzerland'))
db.session.add(PatentOffice(name='Syrian Arab Republic'))
db.session.add(PatentOffice(name='Tajikistan'))
db.session.add(PatentOffice(name='Thailand'))
db.session.add(PatentOffice(name='the former Yugoslav Republic of Macedonia'))
db.session.add(PatentOffice(name='Timor-Leste'))
db.session.add(PatentOffice(name='Togo'))
db.session.add(PatentOffice(name='Tonga'))
db.session.add(PatentOffice(name='Trinidad and Tobago'))
db.session.add(PatentOffice(name='Tunisia'))
db.session.add(PatentOffice(name='Turkey'))
db.session.add(PatentOffice(name='Turkmenistan'))
db.session.add(PatentOffice(name='Tuvalu'))
db.session.add(PatentOffice(name='Uganda'))
db.session.add(PatentOffice(name='Ukraine'))
db.session.add(PatentOffice(name='United Arab Emirates'))
db.session.add(PatentOffice(name='United Kingdom'))
db.session.add(PatentOffice(name='United Republic of Tanzania'))
db.session.add(PatentOffice(name='United States of America'))
db.session.add(PatentOffice(name='Uruguay'))
db.session.add(PatentOffice(name='Uzbekistan'))
db.session.add(PatentOffice(name='Vanuatu'))
db.session.add(PatentOffice(name='Venezuela (Bolivarian Republic of)'))
db.session.add(PatentOffice(name='Viet Nam'))
db.session.add(PatentOffice(name='Yemen'))
db.session.add(PatentOffice(name='Zambia'))
db.session.add(PatentOffice(name='Zimbabwe'))
db.session.add(PatentOffice(name='OAPI'))
db.session.add(PatentOffice(name='ARIPO'))
db.session.add(PatentOffice(name='ASBU'))
db.session.add(PatentOffice(name='BOIP'))
db.session.add(PatentOffice(name='EAPO'))
db.session.add(PatentOffice(name='EPO'))
db.session.add(PatentOffice(name='EUIPO'))
db.session.add(PatentOffice(name='UPOV'))
db.session.add(PatentOffice(name='ICPIP'))
db.session.add(PatentOffice(name='GCC Patent Office'))
db.session.commit()
# PatentStatus
db.session.add(PatentStatus(status='Patent Issued'))
db.session.add(PatentStatus(status='Patent Pending'))
db.session.commit()
db.session.add(PresentationRole(description='Presenter'))
db.session.add(PresentationRole(description='Panelist'))
db.session.add(PresentationRole(description='Keynote Speaker'))
db.session.commit()
db.session.add(ResearchRole(description='Head Researcher'))
db.session.add(ResearchRole(description='Research Assistant'))
db.session.add(ResearchRole(description='Data Collection'))
db.session.add(ResearchRole(description='Experimentation'))
db.session.commit()
if __name__ == '__main__':
manager.run()
|
|
import logging
import threading
import time
from hazelcast.errors import TransactionError, IllegalStateError
from hazelcast.future import make_blocking
from hazelcast.invocation import Invocation
from hazelcast.protocol.codec import (
transaction_create_codec,
transaction_commit_codec,
transaction_rollback_codec,
)
from hazelcast.proxy.transactional_list import TransactionalList
from hazelcast.proxy.transactional_map import TransactionalMap
from hazelcast.proxy.transactional_multi_map import TransactionalMultiMap
from hazelcast.proxy.transactional_queue import TransactionalQueue
from hazelcast.proxy.transactional_set import TransactionalSet
from hazelcast.util import thread_id
_logger = logging.getLogger(__name__)
_STATE_ACTIVE = "active"
_STATE_NOT_STARTED = "not_started"
_STATE_COMMITTED = "committed"
_STATE_ROLLED_BACK = "rolled_back"
_STATE_PARTIAL_COMMIT = "rolling_back"
TWO_PHASE = 1
"""
The two phase commit is separated in 2 parts. First it tries to execute the prepare; if there are any conflicts,
the prepare will fail. Once the prepare has succeeded, the commit (writing the changes) can be executed.
Hazelcast also provides three phase transaction by automatically copying the backlog to another member so that in case
of failure during a commit, another member can continue the commit from backup.
"""
ONE_PHASE = 2
"""
The one phase transaction executes a transaction using a single step at the end; committing the changes. There
is no prepare of the transactions, so conflicts are not detected. If there is a conflict, then when the transaction
commits the changes, some of the changes are written and others are not; leaving the system in a potentially permanent
inconsistent state.
"""
RETRY_COUNT = 20
class TransactionManager:
"""Manages the execution of client transactions and provides Transaction objects."""
def __init__(self, context):
self._context = context
def _connect(self):
connection_manager = self._context.connection_manager
for count in range(0, RETRY_COUNT):
connection = connection_manager.get_random_connection()
if connection:
return connection
_logger.debug(
"Could not get a connection for the transaction. Attempt %d of %d",
count,
RETRY_COUNT,
exc_info=True,
)
if count + 1 == RETRY_COUNT:
raise IllegalStateError("No active connection is found")
def new_transaction(self, timeout, durability, transaction_type):
"""Creates a Transaction object with given timeout, durability and transaction type.
Args:
timeout (int): The timeout in seconds determines the maximum lifespan of a transaction.
durability (int): The durability is the number of machines that can take over if a member fails during a
transaction commit or rollback
transaction_type (int): the transaction type which can be ``hazelcast.transaction.TWO_PHASE``
or ``hazelcast.transaction.ONE_PHASE``
Returns:
hazelcast.transaction.Transaction: New created Transaction.
"""
connection = self._connect()
return Transaction(self._context, connection, timeout, durability, transaction_type)
class Transaction:
"""Provides transactional operations: beginning/committing transactions, but also retrieving
transactional data-structures like the TransactionalMap.
"""
state = _STATE_NOT_STARTED
id = None
start_time = None
_locals = threading.local()
thread_id = None
def __init__(self, context, connection, timeout, durability, transaction_type):
self._context = context
self.connection = connection
self.timeout = timeout
self.durability = durability
self.transaction_type = transaction_type
self._objects = {}
def begin(self):
"""Begins this transaction."""
if hasattr(self._locals, "transaction_exists") and self._locals.transaction_exists:
raise TransactionError("Nested transactions are not allowed.")
if self.state != _STATE_NOT_STARTED:
raise TransactionError("Transaction has already been started.")
self._locals.transaction_exists = True
self.start_time = time.time()
self.thread_id = thread_id()
try:
request = transaction_create_codec.encode_request(
timeout=int(self.timeout * 1000),
durability=self.durability,
transaction_type=self.transaction_type,
thread_id=self.thread_id,
)
invocation = Invocation(
request, connection=self.connection, response_handler=lambda m: m
)
invocation_service = self._context.invocation_service
invocation_service.invoke(invocation)
response = invocation.future.result()
self.id = transaction_create_codec.decode_response(response)
self.state = _STATE_ACTIVE
except:
self._locals.transaction_exists = False
raise
def commit(self):
"""Commits this transaction."""
self._check_thread()
if self.state != _STATE_ACTIVE:
raise TransactionError("Transaction is not active.")
try:
self._check_timeout()
request = transaction_commit_codec.encode_request(self.id, self.thread_id)
invocation = Invocation(request, connection=self.connection)
invocation_service = self._context.invocation_service
invocation_service.invoke(invocation)
invocation.future.result()
self.state = _STATE_COMMITTED
except:
self.state = _STATE_PARTIAL_COMMIT
raise
finally:
self._locals.transaction_exists = False
def rollback(self):
"""Rollback of this current transaction."""
self._check_thread()
if self.state not in (_STATE_ACTIVE, _STATE_PARTIAL_COMMIT):
raise TransactionError("Transaction is not active.")
try:
if self.state != _STATE_PARTIAL_COMMIT:
request = transaction_rollback_codec.encode_request(self.id, self.thread_id)
invocation = Invocation(request, connection=self.connection)
invocation_service = self._context.invocation_service
invocation_service.invoke(invocation)
invocation.future.result()
self.state = _STATE_ROLLED_BACK
finally:
self._locals.transaction_exists = False
def get_list(self, name):
"""Returns the transactional list instance with the specified name.
Args:
name (str): The specified name.
Returns:
hazelcast.proxy.transactional_list.TransactionalList`: The instance of Transactional List
with the specified name.
"""
return self._get_or_create_object(name, TransactionalList)
def get_map(self, name):
"""Returns the transactional map instance with the specified name.
Args:
name (str): The specified name.
Returns:
hazelcast.proxy.transactional_map.TransactionalMap: The instance of Transactional Map
with the specified name.
"""
return self._get_or_create_object(name, TransactionalMap)
def get_multi_map(self, name):
"""Returns the transactional multimap instance with the specified name.
Args:
name (str): The specified name.
Returns:
hazelcast.proxy.transactional_multi_map.TransactionalMultiMap: The instance of Transactional MultiMap
with the specified name.
"""
return self._get_or_create_object(name, TransactionalMultiMap)
def get_queue(self, name):
"""Returns the transactional queue instance with the specified name.
Args:
name (str): The specified name.
Returns:
hazelcast.proxy.transactional_queue.TransactionalQueue: The instance of Transactional Queue
with the specified name.
"""
return self._get_or_create_object(name, TransactionalQueue)
def get_set(self, name):
"""Returns the transactional set instance with the specified name.
Args:
name (str): The specified name.
Returns:
hazelcast.proxy.transactional_set.TransactionalSet: The instance of Transactional Set
with the specified name.
"""
return self._get_or_create_object(name, TransactionalSet)
def _get_or_create_object(self, name, proxy_type):
if self.state != _STATE_ACTIVE:
raise TransactionError("Transaction is not in active state.")
self._check_thread()
key = (proxy_type, name)
try:
return self._objects[key]
except KeyError:
proxy = proxy_type(name, self, self._context)
self._objects[key] = proxy
return make_blocking(proxy)
def _check_thread(self):
if not thread_id() == self.thread_id:
raise TransactionError("Transaction cannot span multiple threads.")
def _check_timeout(self):
if time.time() > self.timeout + self.start_time:
raise TransactionError("Transaction has timed out.")
def __enter__(self):
self.begin()
return self
def __exit__(self, type, value, traceback):
if not type and not value and self.state == _STATE_ACTIVE:
self.commit()
elif self.state in (_STATE_PARTIAL_COMMIT, _STATE_ACTIVE):
self.rollback()
|
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""D4PG agent implementation."""
import copy
import dataclasses
from typing import Iterator, List, Optional, Tuple
from acme import adders
from acme import core
from acme import datasets
from acme import specs
from acme import types
from acme.adders import reverb as reverb_adders
from acme.agents import agent
from acme.agents.tf import actors
from acme.agents.tf.d4pg import learning
from acme.tf import networks as network_utils
from acme.tf import utils
from acme.tf import variable_utils
from acme.utils import counting
from acme.utils import loggers
import reverb
import sonnet as snt
import tensorflow as tf
@dataclasses.dataclass
class D4PGConfig:
"""Configuration options for the D4PG agent."""
discount: float = 0.99
batch_size: int = 256
prefetch_size: int = 4
target_update_period: int = 100
policy_optimizer: Optional[snt.Optimizer] = None
critic_optimizer: Optional[snt.Optimizer] = None
min_replay_size: int = 1000
max_replay_size: int = 1000000
samples_per_insert: Optional[float] = 32.0
n_step: int = 5
sigma: float = 0.3
clipping: bool = True
replay_table_name: str = reverb_adders.DEFAULT_PRIORITY_TABLE
@dataclasses.dataclass
class D4PGNetworks:
"""Structure containing the networks for D4PG."""
policy_network: snt.Module
critic_network: snt.Module
observation_network: snt.Module
def __init__(
self,
policy_network: snt.Module,
critic_network: snt.Module,
observation_network: types.TensorTransformation,
):
# This method is implemented (rather than added by the dataclass decorator)
# in order to allow observation network to be passed as an arbitrary tensor
# transformation rather than as a snt Module.
# TODO(mwhoffman): use Protocol rather than Module/TensorTransformation.
self.policy_network = policy_network
self.critic_network = critic_network
self.observation_network = utils.to_sonnet_module(observation_network)
def init(self, environment_spec: specs.EnvironmentSpec):
"""Initialize the networks given an environment spec."""
# Get observation and action specs.
act_spec = environment_spec.actions
obs_spec = environment_spec.observations
# Create variables for the observation net and, as a side-effect, get a
# spec describing the embedding space.
emb_spec = utils.create_variables(self.observation_network, [obs_spec])
# Create variables for the policy and critic nets.
_ = utils.create_variables(self.policy_network, [emb_spec])
_ = utils.create_variables(self.critic_network, [emb_spec, act_spec])
def make_policy(
self,
environment_spec: specs.EnvironmentSpec,
sigma: float = 0.0,
) -> snt.Module:
"""Create a single network which evaluates the policy."""
# Stack the observation and policy networks.
stack = [
self.observation_network,
self.policy_network,
]
# If a stochastic/non-greedy policy is requested, add Gaussian noise on
# top to enable a simple form of exploration.
# TODO(mwhoffman): Refactor this to remove it from the class.
if sigma > 0.0:
stack += [
network_utils.ClippedGaussian(sigma),
network_utils.ClipToSpec(environment_spec.actions),
]
# Return a network which sequentially evaluates everything in the stack.
return snt.Sequential(stack)
class D4PGBuilder:
"""Builder for D4PG which constructs individual components of the agent."""
def __init__(self, config: D4PGConfig):
self._config = config
def make_replay_tables(
self,
environment_spec: specs.EnvironmentSpec,
) -> List[reverb.Table]:
"""Create tables to insert data into."""
if self._config.samples_per_insert is None:
# We will take a samples_per_insert ratio of None to mean that there is
# no limit, i.e. this only implies a min size limit.
limiter = reverb.rate_limiters.MinSize(self._config.min_replay_size)
else:
# Create enough of an error buffer to give a 10% tolerance in rate.
samples_per_insert_tolerance = 0.1 * self._config.samples_per_insert
error_buffer = self._config.min_replay_size * samples_per_insert_tolerance
limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=self._config.min_replay_size,
samples_per_insert=self._config.samples_per_insert,
error_buffer=error_buffer)
replay_table = reverb.Table(
name=self._config.replay_table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=self._config.max_replay_size,
rate_limiter=limiter,
signature=reverb_adders.NStepTransitionAdder.signature(
environment_spec))
return [replay_table]
def make_dataset_iterator(
self,
reverb_client: reverb.Client,
) -> Iterator[reverb.ReplaySample]:
"""Create a dataset iterator to use for learning/updating the agent."""
# The dataset provides an interface to sample from replay.
dataset = datasets.make_reverb_dataset(
table=self._config.replay_table_name,
server_address=reverb_client.server_address,
batch_size=self._config.batch_size,
prefetch_size=self._config.prefetch_size)
# TODO(b/155086959): Fix type stubs and remove.
return iter(dataset) # pytype: disable=wrong-arg-types
def make_adder(
self,
replay_client: reverb.Client,
) -> adders.Adder:
"""Create an adder which records data generated by the actor/environment."""
return reverb_adders.NStepTransitionAdder(
priority_fns={self._config.replay_table_name: lambda x: 1.},
client=replay_client,
n_step=self._config.n_step,
discount=self._config.discount)
def make_actor(
self,
policy_network: snt.Module,
adder: Optional[adders.Adder] = None,
variable_source: Optional[core.VariableSource] = None,
):
"""Create an actor instance."""
if variable_source:
# Create the variable client responsible for keeping the actor up-to-date.
variable_client = variable_utils.VariableClient(
client=variable_source,
variables={'policy': policy_network.variables},
update_period=1000,
)
# Make sure not to use a random policy after checkpoint restoration by
# assigning variables before running the environment loop.
variable_client.update_and_wait()
else:
variable_client = None
# Create the actor which defines how we take actions.
return actors.FeedForwardActor(
policy_network=policy_network,
adder=adder,
variable_client=variable_client,
)
def make_learner(
self,
networks: Tuple[D4PGNetworks, D4PGNetworks],
dataset: Iterator[reverb.ReplaySample],
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = False,
):
"""Creates an instance of the learner."""
online_networks, target_networks = networks
# The learner updates the parameters (and initializes them).
return learning.D4PGLearner(
policy_network=online_networks.policy_network,
critic_network=online_networks.critic_network,
observation_network=online_networks.observation_network,
target_policy_network=target_networks.policy_network,
target_critic_network=target_networks.critic_network,
target_observation_network=target_networks.observation_network,
policy_optimizer=self._config.policy_optimizer,
critic_optimizer=self._config.critic_optimizer,
clipping=self._config.clipping,
discount=self._config.discount,
target_update_period=self._config.target_update_period,
dataset_iterator=dataset,
counter=counter,
logger=logger,
checkpoint=checkpoint,
)
class D4PG(agent.Agent):
"""D4PG Agent.
This implements a single-process D4PG agent. This is an actor-critic algorithm
that generates data via a behavior policy, inserts N-step transitions into
a replay buffer, and periodically updates the policy (and as a result the
behavior) by sampling uniformly from this buffer.
"""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
policy_network: snt.Module,
critic_network: snt.Module,
observation_network: types.TensorTransformation = tf.identity,
discount: float = 0.99,
batch_size: int = 256,
prefetch_size: int = 4,
target_update_period: int = 100,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: float = 32.0,
n_step: int = 5,
sigma: float = 0.3,
clipping: bool = True,
replay_table_name: str = reverb_adders.DEFAULT_PRIORITY_TABLE,
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
checkpoint: bool = True,
):
"""Initialize the agent.
Args:
environment_spec: description of the actions, observations, etc.
policy_network: the online (optimized) policy.
critic_network: the online critic.
observation_network: optional network to transform the observations before
they are fed into any network.
discount: discount to use for TD updates.
batch_size: batch size for updates.
prefetch_size: size to prefetch from replay.
target_update_period: number of learner steps to perform before updating
the target networks.
policy_optimizer: optimizer for the policy network updates.
critic_optimizer: optimizer for the critic network updates.
min_replay_size: minimum replay size before updating.
max_replay_size: maximum replay size.
samples_per_insert: number of samples to take from replay for every insert
that is made.
n_step: number of steps to squash into a single transition.
sigma: standard deviation of zero-mean, Gaussian exploration noise.
clipping: whether to clip gradients by global norm.
replay_table_name: string indicating what name to give the replay table.
counter: counter object used to keep track of steps.
logger: logger object to be used by learner.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
# Create the Builder object which will internally create agent components.
builder = D4PGBuilder(
# TODO(mwhoffman): pass the config dataclass in directly.
# TODO(mwhoffman): use the limiter rather than the workaround below.
# Right now this modifies min_replay_size and samples_per_insert so that
# they are not controlled by a limiter and are instead handled by the
# Agent base class (the above TODO directly references this behavior).
D4PGConfig(
discount=discount,
batch_size=batch_size,
prefetch_size=prefetch_size,
target_update_period=target_update_period,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
min_replay_size=1, # Let the Agent class handle this.
max_replay_size=max_replay_size,
samples_per_insert=None, # Let the Agent class handle this.
n_step=n_step,
sigma=sigma,
clipping=clipping,
replay_table_name=replay_table_name,
))
# TODO(mwhoffman): pass the network dataclass in directly.
online_networks = D4PGNetworks(policy_network=policy_network,
critic_network=critic_network,
observation_network=observation_network)
# Target networks are just a copy of the online networks.
target_networks = copy.deepcopy(online_networks)
# Initialize the networks.
online_networks.init(environment_spec)
target_networks.init(environment_spec)
# TODO(mwhoffman): either make this Dataclass or pass only one struct.
# The network struct passed to make_learner is just a tuple for the
# time-being (for backwards compatibility).
networks = (online_networks, target_networks)
# Create the behavior policy.
policy_network = online_networks.make_policy(environment_spec, sigma)
# Create the replay server and grab its address.
replay_tables = builder.make_replay_tables(environment_spec)
replay_server = reverb.Server(replay_tables, port=None)
replay_client = reverb.Client(f'localhost:{replay_server.port}')
# Create actor, dataset, and learner for generating, storing, and consuming
# data respectively.
adder = builder.make_adder(replay_client)
actor = builder.make_actor(policy_network, adder)
dataset = builder.make_dataset_iterator(replay_client)
learner = builder.make_learner(networks, dataset, counter, logger,
checkpoint)
super().__init__(
actor=actor,
learner=learner,
min_observations=max(batch_size, min_replay_size),
observations_per_step=float(batch_size) / samples_per_insert)
# Save the replay so we don't garbage collect it.
self._replay_server = replay_server
|
|
"""Demonstrating how to convert a flowpipe graph to a render farm job.
This guide expects that your render farm can handle dependencies between tasks.
"""
import json
import logging
import os
from tempfile import gettempdir
from flowpipe import Graph, INode, Node
# -----------------------------------------------------------------------------
#
# Necessary utilities
#
# -----------------------------------------------------------------------------
class JsonDatabase:
"""The Database stores the JSON-serialized nodes.
The storage can also be handled via a database, this is just the easiest
way for demonstrational purposes. In production, a file based storage also
has advantages for debugging and allows for easy hacking by just altering
the JSON files directly.
"""
PATH = os.path.join(gettempdir(), "json-database", "{identifier}.json")
@staticmethod
def set(node):
"""Store the node under it's identifier."""
serialized_json = JsonDatabase.PATH.format(identifier=node.identifier)
if not os.path.exists(os.path.dirname(serialized_json)):
os.makedirs(os.path.dirname(serialized_json))
with open(serialized_json, "w") as f:
json.dump(node.serialize(), f, indent=2)
return serialized_json
@staticmethod
def get(identifier):
"""Retrieve the node behind the given identifier."""
serialized_json = JsonDatabase.PATH.format(identifier=identifier)
with open(serialized_json, "r") as f:
data = json.load(f)
return INode.deserialize(data)
# Command templates to execute a flowpipe node in the terminal.
# Uses different python interpreters and commands based on the host application
# The template just needs the path to the serialized json file and optionally
# a range of frames passed to the node for the implicit batch conversion.
COMMANDS = {
"python": (
"python -c '"
"from my_farm import conversion;"
'conversion.evaluate_on_farm("{serialized_json}", {frames})\''
),
"maya": (
"mayapy -c '"
"import maya.standalone;"
'maya.standalone.initialize(name="python");'
"from my_farm import conversion;"
'conversion.evaluate_on_farm("{serialized_json}", {frames})\''
),
}
def convert_graph_to_job(graph):
"""Convert the graph to a dict representing a typical render farm job."""
job = {"name": graph.name, "tasks": []}
# Turn every node into a farm task
tasks = {}
for node in graph.nodes:
serialized_json = JsonDatabase.set(node)
tasks[node.name] = []
# IMPLICIT BATCHING:
# Create individual tasks for each batch if the batch size is defined
# Feed the calculated frame range to each batch
if node.metadata.get("batch_size") is not None:
batch_size = node.metadata["batch_size"]
frames = node.inputs["frames"].value
i = 0
while i < len(frames) - 1:
end = i + batch_size
if end > len(frames) - 1:
end = len(frames)
f = frames[i:end]
task = {"name": "{0}-{1}".format(node.name, i / batch_size)}
command = COMMANDS.get(
node.metadata.get("interpreter", "python"), None
)
task["command"] = command.format(
serialized_json=serialized_json, frames=f
)
job["tasks"].append(task)
tasks[node.name].append(task)
i += batch_size
else:
task = {"name": node.name}
command = COMMANDS.get(
node.metadata.get("interpreter", "python"), None
)
task["command"] = command.format(
serialized_json=serialized_json, frames=None
)
job["tasks"].append(task)
tasks[node.name].append(task)
# The dependencies between the tasks based on the connections of the Nodes
for node_name in tasks:
for task in tasks[node_name]:
node = graph[node_name]
task["dependencies"] = []
for upstream in [n.name for n in node.upstream_nodes]:
task["dependencies"] += [t["name"] for t in tasks[upstream]]
return job
def evaluate_on_farm(serialized_json, frames=None):
"""Evaluate the node behind the given json file.
1. Deserialize the node
2. Collect any input values from any upstream dependencies
For implicit batching, the given frames are assigned to the node,
overriding whatever might be stored in the json file, becuase all
batches share the same json file.
3. Evaluate the node
4. Serialize the node back into its original file
For implicit farm conversion, the serialization only happens once,
for the 'last' batch, knowing that the last batch in numbers might
not be the 'last' batch actually executed.
"""
# Debug logs might be useful on the farm
logging.baseConfig.setLevel(logging.DEBUG)
# Deserialize the node from the serialized json
with open(serialized_json, "r") as f:
data = json.load(f)
node = INode.deserialize(data)
# Retrieve the upstream output data
for name, input_plug in data["inputs"].items():
for identifier, output_plug in input_plug["connections"].items():
upstream_node = JsonDatabase.get(identifier)
node.inputs[name].value = upstream_node.outputs[output_plug].value
# Specifically assign the batch frames here if applicable
if frames is not None:
all_frames = node.inputs["frames"]
node.inputs["frames"] = frames
# Actually evalute the node
node.evaluate()
# Store the result back into the same file ONLY once
# ALL batch processes access the same json file so the result is only stored
# for the last batch, knowing that the last batch in numbers might not be
# the last batch actually executed
if frames is not None and frames[-1] != all_frames[-1]:
return
with open(serialized_json, "w") as f:
json.dump(node.serialize(), f, indent=2)
# -----------------------------------------------------------------------------
#
# Examples
#
# -----------------------------------------------------------------------------
@Node(outputs=["renderings"], metadata={"interpreter": "maya"})
def MayaRender(frames, scene_file):
"""Render the given frames from the given scene.."""
return {"renderings": "/renderings/file.%04d.exr"}
@Node(outputs=["status"])
def UpdateDatabase(id_, images):
"""Update the database entries of the given asset with the given data."""
return {"status": True}
def implicit_batching(frames, batch_size):
"""Batches are created during the farm conversion."""
graph = Graph(name="Rendering")
render = MayaRender(
graph=graph,
frames=list(range(frames)),
scene_file="/scene/for/rendering.ma",
metadata={"batch_size": batch_size},
)
update = UpdateDatabase(graph=graph, id_=123456)
render.outputs["renderings"].connect(update.inputs["images"])
print(graph)
print(json.dumps(convert_graph_to_job(graph), indent=2))
def explicit_batching(frames, batch_size):
"""Batches are already part of the graph."""
graph = Graph(name="Rendering")
update_database = UpdateDatabase(graph=graph, id_=123456)
for i in range(0, frames, batch_size):
maya_render = MayaRender(
name="MayaRender{0}-{1}".format(i, i + batch_size),
graph=graph,
frames=list(range(i, i + batch_size)),
scene_file="/scene/for/rendering.ma",
)
maya_render.outputs["renderings"].connect(
update_database.inputs["images"][str(i)]
)
print(graph)
print(json.dumps(convert_graph_to_job(graph), indent=2))
if __name__ == "__main__":
implicit_batching(30, 10)
explicit_batching(30, 10)
|
|
# pointsToSpheres.py copyright (c) 2005 by Charl P. Botha <[email protected]>
# $Id$
# see module documentation
import gen_utils
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import wx
import vtk
class pointsToSpheres(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._inputPoints = None
self._internalPoints = None
self._config.radius = 5
self._config.thetaResolution = 8 # minimum 3
self._config.phiResolution = 8 # minimum 3
self._config.numInternalSpheres = 3
configList = [('Sphere radius:', 'radius', 'base:float', 'text',
'The radius of the spheres that will be created '
'in world coordinate units.'),
('Theta resolution:', 'thetaResolution', 'base:int',
'text',
'Number of points in the longitudinal direction.'),
('Phi resolution:', 'phiResolution', 'base:int',
'text',
'Number of points in the latitudinal direction.'),
('Number of internal spheres:', 'numInternalSpheres',
'base:int', 'text',
'Number of spheres to create in the interior.')]
self._appendPolyData = vtk.vtkAppendPolyData()
if False:
# checked on 20090314: dummy input is very definitely
# required
# we do need a dummy sphere, else the appender complains
dummySphere = vtk.vtkSphereSource()
dummySphere.SetRadius(0.0)
# and a dummy calc, with -1 index
# if we don't add the VolumeIndex array here as well, the append
# polydata discards all the others
calc = vtk.vtkArrayCalculator()
calc.SetAttributeModeToUsePointData()
calc.SetFunction('-1')
calc.SetResultArrayName('VolumeIndex')
calc.SetInput(dummySphere.GetOutput())
self._appendPolyData.AddInput(calc.GetOutput())
else:
self._appendPolyData.AddInput(vtk.vtkPolyData())
# this will be a list of lists containing tuples
# (vtkArrayCalculator, vtkSphereSource)
self._sphereSources = []
# this will hold our shallow-copied output
self._output = vtk.vtkPolyData()
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkAppendPolyData' : self._appendPolyData})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
# get rid of our reference
del self._appendPolyData
del self._sphereSources
del self._output
def get_input_descriptions(self):
return ('Selected points',)
def set_input(self, idx, inputStream):
if inputStream is not self._inputPoints:
if inputStream == None:
self._inputPoints = None
elif hasattr(inputStream, 'devideType') and \
inputStream.devideType == 'namedPoints':
# correct type
self._inputPoints = inputStream
else:
raise TypeError, 'This input requires a named points type.'
def get_output_descriptions(self):
return ('PolyData spheres',)
def get_output(self, idx):
#return self._appendPolyData.GetOutput()
return self._output
def logic_to_config(self):
pass
def config_to_logic(self):
# if any of the parameters have changed, this affects all the spheres
# fortunately VTK caches this type of parameter so we don't have to
# check
# some sanity checking
if self._config.radius < 0:
self._config.radius = 0
if self._config.thetaResolution < 3:
self._config.thetaResolution = 3
if self._config.phiResolution < 3:
self._config.phiResolution = 3
if self._config.numInternalSpheres < 0:
self._config.numInternalSpheres = 0
# if the number of internal spheres has changed, we have to start over
haveToCreate = False
if len(self._sphereSources) > 0:
# this means we HAVE spheres already
currentNum = len(self._sphereSources[0]) - 1
if currentNum != self._config.numInternalSpheres:
haveToCreate = True
if haveToCreate:
self._createSpheres()
else:
radiusStep = self._getRadiusStep()
for spheres in self._sphereSources:
for i in range(len(spheres)):
# each element of spheres is a (calc, sphere) tuple
sphere = spheres[i][1]
sphere.SetRadius(self._config.radius - radiusStep * i)
sphere.SetThetaResolution(self._config.thetaResolution)
sphere.SetPhiResolution(self._config.phiResolution)
def execute_module(self):
# synchronise our balls on the input points (they might have changed)
self._syncOnInputPoints()
# run the whole pipeline
self._appendPolyData.Update()
# shallow copy the polydata
self._output.ShallowCopy(self._appendPolyData.GetOutput())
# indicate that the output has been modified
self._output.Modified()
def _syncOnInputPoints(self):
# extract a list from the input points
tempList = []
if self._inputPoints:
for i in self._inputPoints:
tempList.append(i['world'])
if tempList != self._internalPoints:
# store the new points
self._internalPoints = tempList
if len(self._internalPoints) == len(self._sphereSources):
# if the number of points has not changed, we only have to
# move points
for i in range(len(self._internalPoints)):
pt = self._internalPoints[i]
for calc,sphere in self._sphereSources[i]:
# set new centre
sphere.SetCenter(pt)
# set new index!
calc.SetFunction('%d' % (i,))
else:
# if the number of points HAS changed, we have to redo
# everything (we could try figuring out how things have
# changed, but we won't)
self._createSpheres()
def _destroySpheres(self):
# first remove all inputs from the appender
for spheres in self._sphereSources:
for calc, sphere in spheres:
self._appendPolyData.RemoveInput(calc.GetOutput())
# now actually nuke our references
del self._sphereSources[:]
def _createSpheres(self):
"""Create all spheres according to self._internalPoints.
"""
# make sure we're all empty
self._destroySpheres()
for ptIdx in range(len(self._internalPoints)):
pt = self._internalPoints[ptIdx]
# each point gets potentially more than one sphere
spheres = []
# then create and add the internal spheres
radiusStep = self._getRadiusStep()
# we do the mainSphere and the internal spheres in one go
for i in range(self._config.numInternalSpheres + 1):
sphere = vtk.vtkSphereSource()
sphere.SetCenter(pt)
sphere.SetRadius(self._config.radius - radiusStep * i)
sphere.SetThetaResolution(self._config.thetaResolution)
sphere.SetPhiResolution(self._config.phiResolution)
# use calculator to add array with VolumeIndex
calc = vtk.vtkArrayCalculator()
calc.SetAttributeModeToUsePointData()
calc.SetFunction('%d' % (ptIdx,))
calc.SetResultArrayName('VolumeIndex')
calc.SetInput(sphere.GetOutput())
self._appendPolyData.AddInput(calc.GetOutput())
spheres.append((calc,sphere))
self._sphereSources.append(spheres)
def _getRadiusStep(self):
radiusStep = self._config.radius / \
float(self._config.numInternalSpheres + 1)
return radiusStep
|
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from blockstore import BlockStore, TxStore
from util import p2p_port
'''
This is a tool for comparing two or more bitcoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.create_callback_map()
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of [obj, True/False/None]:
# - obj is either a CBlock or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analagous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
self.connections.append(NodeConn('127.0.0.1', p2p_port(i),
nodes[i], TestNode(self.block_store, self.tx_store)))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
self.connections[-1].cb.add_connection(self.connections[-1])
def wait_for_verack(self):
sleep_time = 0.05
max_tries = 10 / sleep_time # Wait at most 10 seconds
while max_tries > 0:
done = True
with mininode_lock:
for c in self.connections:
if c.cb.verack_received is False:
done = False
break
if done:
break
time.sleep(sleep_time)
def wait_for_pings(self, counter):
received_pongs = False
while received_pongs is not True:
time.sleep(0.05)
received_pongs = True
with mininode_lock:
for c in self.connections:
if c.cb.received_ping_response(counter) is not True:
received_pongs = False
break
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
# Wait for nodes to request block (50ms sleep * 20 tries * num_blocks)
max_tries = 20*num_blocks
while max_tries > 0:
with mininode_lock:
results = [ blockhash in c.cb.block_request_map and
c.cb.block_request_map[blockhash] for c in self.connections ]
if False not in results:
break
time.sleep(0.05)
max_tries -= 1
# --> error if not requested
if max_tries == 0:
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# --> Answer request (we did this inline!)
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
max_tries = 20*num_events
while max_tries > 0:
with mininode_lock:
results = [ txhash in c.cb.tx_request_map and
c.cb.tx_request_map[txhash] for c in self.connections ]
if False not in results:
break
time.sleep(0.05)
max_tries -= 1
# --> error if not requested
if max_tries == 0:
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# --> Answer request (we did this inline!)
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome ] = [ None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for b_or_t, outcome in test_instance.blocks_and_transactions:
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
# Add to shared block_store, set as current block
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(block.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256,
len(test_instance.blocks_and_transactions))
if (not self.check_results(block.sha256, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
test_number += 1
self.block_store.close()
self.tx_store.close()
[ c.disconnect_node() for c in self.connections ]
|
|
"""ZFS based backup workflows."""
import datetime
import shlex
import gflags
import lvm
import workflow
FLAGS = gflags.FLAGS
gflags.DEFINE_string('rsync_options',
'--archive --acls --numeric-ids --delete --inplace',
'rsync command options')
gflags.DEFINE_string('rsync_path', '/usr/bin/rsync', 'path to rsync binary')
gflags.DEFINE_string('zfs_snapshot_prefix', 'ari-backup-',
'prefix for historical ZFS snapshots')
gflags.DEFINE_string('zfs_snapshot_timestamp_format', '%Y-%m-%d--%H%M',
'strftime() formatted timestamp used when naming new ZFS snapshots')
class ZFSLVMBackup(lvm.LVMSourceMixIn, workflow.BaseWorkflow):
"""Workflow for backing up a logical volume to a ZFS dataset.
Data is copied from and LVM snapshot to a ZFS dataset using rsync and then
ZFS commands are issued to create historical snapshots. The ZFS snapshot
lifecycle is also managed by this class. When a backup completes, snapshots
older than snapshot_expiration_days are destroyed.
This approach has some benefits over rdiff-backup in that all backup
datapoints are easily browseable and replication of the backup data using
ZFS streams is generally less resource intensive than using something like
rsync to mirror the files created by rdiff-backup.
One downside is that it's easier to store all file metadata using
rdiff-backup. Rsync can only store metadata for files that the destination
file system can also store. For example, if extended file system
attributes are used on the source file system, but aren't available on the
destination, rdiff-backup will still record those attributes in its own
files. If faced with that same scenario, rsync would lose those attributes.
Furthermore, rsync must have root privilege to write arbitrary file
metadata.
New post-job hooks are added for creating ZFS snapshots and trimming old
ones.
"""
def __init__(self, label, source_hostname, rsync_dst, zfs_hostname,
dataset_name, snapshot_expiration_days, **kwargs):
"""Configure a ZFSLVMBackup object.
Args:
label: str, label for the backup job (e.g. database-server1).
source_hostname: str, the name of the host with the source data to
backup.
rsync_dst: str, the destination argument for the rsync command line
(e.g. backupbox:/backup-store/database-server1).
zfs_hostname: str, the name of the backup destination host where we will
be managing the ZFS snapshots.
dataset_name: str, the full ZFS path (not file system path) to the
dataset holding the backups for this job
(e.g. tank/backup-store/database-server1).
snapshot_expiration_days: int, the maxmium age of a ZFS snapshot in days.
Pro tip: It's a good practice to reuse the label argument as the last
path component in the rsync_dst and dataset_name arguments.
"""
# Call our super class's constructor to enable LVM snapshot management
super(ZFSLVMBackup, self).__init__(label, **kwargs)
# Assign instance vars specific to this class.
self.source_hostname = source_hostname
self.rsync_dst = rsync_dst
self.zfs_hostname = zfs_hostname
self.dataset_name = dataset_name
# Assign flags to instance vars so they might be easily overridden in
# workflow configs.
self.rsync_options = FLAGS.rsync_options
self.rsync_path = FLAGS.rsync_path
self.zfs_snapshot_prefix = FLAGS.zfs_snapshot_prefix
self.zfs_snapshot_timestamp_format = FLAGS.zfs_snapshot_timestamp_format
self.add_post_hook(self._create_zfs_snapshot)
self.add_post_hook(self._destroy_expired_zfs_snapshots,
{'days': snapshot_expiration_days})
def _get_current_datetime(self):
"""Returns datetime object with the current date and time.
This method is mostly useful for testing purposes.
"""
return datetime.datetime.now()
def _run_custom_workflow(self):
"""Run rsync backup of LVM snapshot to ZFS dataset."""
# TODO(jpwoodbu) Consider throwing an exception if we see things in the
# include or exclude lists since we don't use them in this class.
self.logger.debug('ZFSLVMBackup._run_custom_workflow started.')
# Since we're dealing with ZFS datasets, let's always exclude the .zfs
# directory in our rsync options.
rsync_options = shlex.split(self.rsync_options) + ['--exclude', '/.zfs']
# We add a trailing slash to the src path otherwise rsync will make a
# subdirectory at the destination, even if the destination is already a
# directory.
rsync_src = self._snapshot_mount_point_base_path + '/'
command = [self.rsync_path] + rsync_options + [rsync_src, self.rsync_dst]
self.run_command(command, self.source_hostname)
self.logger.debug('ZFSLVMBackup._run_custom_workflow completed.')
def _create_zfs_snapshot(self, error_case):
"""Creates a new ZFS snapshot of our destination dataset.
The name of the snapshot will include the zfs_snapshot_prefix provided by
FLAGS and a timestamp. The zfs_snapshot_prefix is used by
_remove_zfs_snapshots_older_than() when deciding which snapshots to
destroy. The timestamp encoded in a snapshot name is only for end-user
convenience. The creation metadata on the ZFS snapshot is what is used to
determine a snapshot's age.
This method does nothing if error_case is True.
Args:
error_case: bool, whether an error has occurred during the backup.
"""
if not error_case:
self.logger.info('Creating ZFS snapshot...')
timestamp = self._get_current_datetime().strftime(
self.zfs_snapshot_timestamp_format)
snapshot_name = self.zfs_snapshot_prefix + timestamp
snapshot_path = '{dataset_name}@{snapshot_name}'.format(
dataset_name=self.dataset_name, snapshot_name=snapshot_name)
command = ['zfs', 'snapshot', snapshot_path]
self.run_command(command, self.zfs_hostname)
def _find_snapshots_older_than(self, days):
"""Returns snapshots older than the given number of days.
Only snapshots that meet the following criteria are returned:
1. They were created at least "days" ago.
2. Their name is prefixed with FLAGS.zfs_snapshot_prefix.
Args:
days: int, the minimum age of the snapshots in days.
Returns:
A list of filtered snapshots.
"""
expiration = self._get_current_datetime() - datetime.timedelta(days=days)
# Let's find all the snapshots for this dataset.
command = ['zfs', 'get', '-rH', '-o', 'name,value', 'type',
self.dataset_name]
stdout, unused_stderr = self.run_command(command, self.zfs_hostname)
snapshots = list()
# Sometimes we get extra lines which are empty, so we'll strip the lines.
for line in stdout.strip().splitlines():
name, dataset_type = line.split('\t')
if dataset_type == 'snapshot':
# Let's try to only consider destroying snapshots made by us ;)
if name.split('@')[1].startswith(self.zfs_snapshot_prefix):
snapshots.append(name)
expired_snapshots = list()
for snapshot in snapshots:
creation_time = self._get_snapshot_creation_time(snapshot)
if creation_time <= expiration:
expired_snapshots.append(snapshot)
return expired_snapshots
def _get_snapshot_creation_time(self, snapshot):
"""Gets the creation time of a snapshot as a Python datetime object
Args:
snapshot: str, the fule ZFS path to the snapshot.
Returns:
A datetime object representing the creation time of the snapshot.
"""
command = ['zfs', 'get', '-H', '-o', 'value', 'creation', snapshot]
stdout, unused_stderr = self.run_command(command, self.zfs_hostname)
return datetime.datetime.strptime(stdout.strip(), '%a %b %d %H:%M %Y')
def _destroy_expired_zfs_snapshots(self, days, error_case):
"""Destroy snapshots older than the given numnber of days.
Any snapshots in the target dataset with a name that starts with
FLAGS.zfs_snapshot_prefix and a creation date older than days will be
destroyed. Depending on the size of the snapshots and the performance of
the disk subsystem, this operation could take a while.
This method does nothing if error_case is True.
Args:
days: int, the max age of a snapshot in days.
error_case: bool, whether an error has occurred during the backup.
"""
if not error_case:
self.logger.info('Looking for expired ZFS snapshots...')
snapshots = self._find_snapshots_older_than(days)
# Sentinel value used to log if we destroyed no snapshots.
snapshots_destroyed = False
# Destroy expired snapshots.
for snapshot in snapshots:
command = ['zfs', 'destroy', snapshot]
self.run_command(command, self.zfs_hostname)
snapshots_destroyed = True
self.logger.info('{snapshot} destroyed.'.format(snapshot=snapshot))
if not snapshots_destroyed:
self.logger.info('Found no expired ZFS snapshots.')
|
|
from unittest import mock
import kafka.codec
from kafka.errors import UnsupportedCodecError
import pytest
from aiokafka.record.default_records import (
DefaultRecordBatch, DefaultRecordBatchBuilder
)
@pytest.mark.parametrize("compression_type,crc", [
(DefaultRecordBatch.CODEC_NONE, 3950153926),
# Gzip header includes timestamp, so checksum varies
(DefaultRecordBatch.CODEC_GZIP, None),
(DefaultRecordBatch.CODEC_SNAPPY, 2171068483),
(DefaultRecordBatch.CODEC_LZ4, 462121143),
(DefaultRecordBatch.CODEC_ZSTD, 1679657554),
])
def test_read_write_serde_v2(compression_type, crc):
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=compression_type, is_transactional=1,
producer_id=123456, producer_epoch=123, base_sequence=9999,
batch_size=999999)
headers = [("header1", b"aaa"), ("header2", b"bbb")]
for offset in range(10):
builder.append(
offset, timestamp=9999999 + offset, key=b"test", value=b"Super",
headers=headers)
buffer = builder.build()
reader = DefaultRecordBatch(bytes(buffer))
assert reader.validate_crc()
msgs = list(reader)
assert reader.is_transactional is True
assert reader.is_control_batch is False
assert reader.compression_type == compression_type
assert reader.magic == 2
assert reader.timestamp_type == 0
assert reader.base_offset == 0
assert reader.last_offset_delta == 9
assert reader.next_offset == 10
assert reader.first_timestamp == 9999999
assert reader.max_timestamp == 10000008
if crc is not None:
assert reader.crc == crc
for offset, msg in enumerate(msgs):
assert msg.offset == offset
assert msg.timestamp == 9999999 + offset
assert msg.key == b"test"
assert msg.value == b"Super"
assert msg.headers == headers
def test_written_bytes_equals_size_in_bytes_v2():
key = b"test"
value = b"Super"
headers = [("header1", b"aaa"), ("header2", b"bbb"), ("xx", None)]
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=0, is_transactional=0,
producer_id=-1, producer_epoch=-1, base_sequence=-1,
batch_size=999999)
size_in_bytes = builder.size_in_bytes(
0, timestamp=9999999, key=key, value=value, headers=headers)
pos = builder.size()
meta = builder.append(
0, timestamp=9999999, key=key, value=value, headers=headers)
assert builder.size() - pos == size_in_bytes
assert meta.size == size_in_bytes
def test_estimate_size_in_bytes_bigger_than_batch_v2():
key = b"Super Key"
value = b"1" * 100
headers = [("header1", b"aaa"), ("header2", b"bbb")]
estimate_size = DefaultRecordBatchBuilder.estimate_size_in_bytes(
key, value, headers)
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=0, is_transactional=0,
producer_id=-1, producer_epoch=-1, base_sequence=-1,
batch_size=999999)
builder.append(
0, timestamp=9999999, key=key, value=value, headers=headers)
buf = builder.build()
assert len(buf) <= estimate_size, \
"Estimate should always be upper bound"
def test_default_batch_builder_validates_arguments():
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=0, is_transactional=0,
producer_id=-1, producer_epoch=-1, base_sequence=-1,
batch_size=999999)
# Key should not be str
with pytest.raises(TypeError):
builder.append(
0, timestamp=9999999, key="some string", value=None, headers=[])
# Value should not be str
with pytest.raises(TypeError):
builder.append(
0, timestamp=9999999, key=None, value="some string", headers=[])
# Timestamp should be of proper type
with pytest.raises(TypeError):
builder.append(
0, timestamp="1243812793", key=None, value=b"some string",
headers=[])
# Offset of invalid type
with pytest.raises(TypeError):
builder.append(
"0", timestamp=9999999, key=None, value=b"some string", headers=[])
# Ok to pass value as None
builder.append(
0, timestamp=9999999, key=b"123", value=None, headers=[])
# Timestamp can be None
builder.append(
1, timestamp=None, key=None, value=b"some string", headers=[])
# Ok to pass offsets in not incremental order. This should not happen thou
builder.append(
5, timestamp=9999999, key=b"123", value=None, headers=[])
# in case error handling code fails to fix inner buffer in builder
assert len(builder.build()) == 104
def test_default_correct_metadata_response():
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=0, is_transactional=0,
producer_id=-1, producer_epoch=-1, base_sequence=-1,
batch_size=1024 * 1024)
meta = builder.append(
0, timestamp=9999999, key=b"test", value=b"Super", headers=[])
assert meta.offset == 0
assert meta.timestamp == 9999999
assert meta.crc is None
assert meta.size == 16
assert repr(meta) == (
"DefaultRecordMetadata(offset=0, size={}, timestamp={})"
.format(meta.size, meta.timestamp)
)
def test_default_batch_size_limit():
# First message can be added even if it's too big
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=0, is_transactional=0,
producer_id=-1, producer_epoch=-1, base_sequence=-1,
batch_size=1024)
meta = builder.append(
0, timestamp=None, key=None, value=b"M" * 2000, headers=[])
assert meta.size > 0
assert meta.crc is None
assert meta.offset == 0
assert meta.timestamp is not None
assert len(builder.build()) > 2000
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=0, is_transactional=0,
producer_id=-1, producer_epoch=-1, base_sequence=-1,
batch_size=1024)
meta = builder.append(
0, timestamp=None, key=None, value=b"M" * 700, headers=[])
assert meta is not None
meta = builder.append(
1, timestamp=None, key=None, value=b"M" * 700, headers=[])
assert meta is None
meta = builder.append(
2, timestamp=None, key=None, value=b"M" * 700, headers=[])
assert meta is None
assert len(builder.build()) < 1000
@pytest.mark.parametrize("compression_type,name,checker_name", [
(DefaultRecordBatch.CODEC_GZIP, "gzip", "has_gzip"),
(DefaultRecordBatch.CODEC_SNAPPY, "snappy", "has_snappy"),
(DefaultRecordBatch.CODEC_LZ4, "lz4", "has_lz4"),
(DefaultRecordBatch.CODEC_ZSTD, "zstd", "has_zstd"),
])
def test_unavailable_codec(compression_type, name, checker_name):
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=compression_type, is_transactional=0,
producer_id=-1, producer_epoch=-1, base_sequence=-1,
batch_size=1024)
builder.append(0, timestamp=None, key=None, value=b"M" * 2000, headers=[])
correct_buffer = builder.build()
with mock.patch.object(kafka.codec, checker_name, return_value=False):
# Check that builder raises error
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=compression_type, is_transactional=0,
producer_id=-1, producer_epoch=-1, base_sequence=-1,
batch_size=1024)
error_msg = "Libraries for {} compression codec not found".format(name)
with pytest.raises(UnsupportedCodecError, match=error_msg):
builder.append(0, timestamp=None, key=None, value=b"M", headers=[])
builder.build()
# Check that reader raises same error
batch = DefaultRecordBatch(bytes(correct_buffer))
with pytest.raises(UnsupportedCodecError, match=error_msg):
list(batch)
def test_unsupported_yet_codec():
compression_type = DefaultRecordBatch.CODEC_MASK # It doesn't exist
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=compression_type, is_transactional=0,
producer_id=-1, producer_epoch=-1, base_sequence=-1,
batch_size=1024)
with pytest.raises(UnsupportedCodecError):
builder.append(0, timestamp=None, key=None, value=b"M", headers=[])
builder.build()
def test_build_without_append():
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=0, is_transactional=1,
producer_id=123456, producer_epoch=123, base_sequence=9999,
batch_size=999999)
buffer = builder.build()
reader = DefaultRecordBatch(bytes(buffer))
msgs = list(reader)
assert not msgs
def test_set_producer_state():
builder = DefaultRecordBatchBuilder(
magic=2, compression_type=0, is_transactional=0,
producer_id=-1, producer_epoch=-1, base_sequence=-1,
batch_size=999999)
builder.set_producer_state(
producer_id=700,
producer_epoch=5,
base_sequence=17)
assert builder.producer_id == 700
buffer = builder.build()
reader = DefaultRecordBatch(bytes(buffer))
assert reader.producer_id == 700
assert reader.producer_epoch == 5
assert reader.base_sequence == 17
|
|
import os
import sys
import subprocess
from six import print_
from ccmlib import common
from ccmlib.cmds.command import Cmd
from ccmlib.node import NodeError
def node_cmds():
return [
"show",
"remove",
"showlog",
"setlog",
"start",
"stop",
"ring",
"flush",
"compact",
"drain",
"cleanup",
"repair",
"scrub",
"verify",
"shuffle",
"sstablesplit",
"getsstables",
"decommission",
"json",
"updateconf",
"updatelog4j",
"stress",
"cli",
"cqlsh",
"scrub",
"verify",
"status",
"setdir",
"bulkload",
"version",
"nodetool",
"dsetool",
"setworkload",
"dse",
"hadoop",
"hive",
"pig",
"sqoop",
"spark",
"pause",
"resume",
"jconsole",
"versionfrombuild"
]
class NodeShowCmd(Cmd):
def description(self):
return "Display information on a node"
def get_parser(self):
usage = "usage: ccm node_name show [options]"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
self.node.show()
class NodeRemoveCmd(Cmd):
def description(self):
return "Remove a node (stopping it if necessary and deleting all its data)"
def get_parser(self):
usage = "usage: ccm node_name remove [options]"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
self.cluster.remove(self.node)
class NodeShowlogCmd(Cmd):
def description(self):
return "Show the log of node name (runs your $PAGER on its system.log)"
def get_parser(self):
usage = "usage: ccm node_name showlog [options]"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
log = self.node.logfilename()
pager = os.environ.get('PAGER', common.platform_pager())
os.execvp(pager, (pager, log))
class NodeSetlogCmd(Cmd):
def description(self):
return "Set node name log level (INFO, DEBUG, ...) with/without Java class - require a node restart"
def get_parser(self):
usage = "usage: ccm node_name setlog [options] level"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-c', '--class', type="string", dest="class_name", default=None,
help="Optional java class/package. Logging will be set for only this class/package if set")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
if len(args) == 1:
print_('Missing log level', file=sys.stderr)
parser.print_help()
self.level = args[1]
try:
self.class_name = options.class_name
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
try:
self.node.set_log_level(self.level, self.class_name)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class NodeClearCmd(Cmd):
def description(self):
return "Clear the node data & logs (and stop the node)"
def get_parser(self):
usage = "usage: ccm node_name_clear [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-a', '--all', action="store_true", dest="all",
help="Also clear the saved cache and node log files", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
self.node.stop()
self.node.clear(self.options.all)
class NodeStartCmd(Cmd):
def description(self):
return "Start a node"
def get_parser(self):
usage = "usage: ccm node start [options] name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="Print standard output of cassandra process", default=False)
parser.add_option('--no-wait', action="store_true", dest="no_wait",
help="Do not wait for cassandra node to be ready", default=False)
parser.add_option('--wait-other-notice', action="store_true", dest="wait_other_notice",
help="Wait until all other live node of the cluster have marked this node UP", default=False)
parser.add_option('--wait-for-binary-proto', action="store_true", dest="wait_for_binary_proto",
help="Wait for the binary protocol to start", default=False)
parser.add_option('-j', '--dont-join-ring', action="store_true", dest="no_join_ring",
help="Launch the instance without joining the ring", default=False)
parser.add_option('--replace-address', type="string", dest="replace_address", default=None,
help="Replace a node in the ring through the cassandra.replace_address option")
parser.add_option('--jvm_arg', action="append", dest="jvm_args",
help="Specify a JVM argument", default=[])
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
try:
self.node.start(not self.options.no_join_ring,
no_wait=self.options.no_wait,
wait_other_notice=self.options.wait_other_notice,
wait_for_binary_proto=self.options.wait_for_binary_proto,
verbose=self.options.verbose,
replace_address=self.options.replace_address,
jvm_args=self.options.jvm_args)
except NodeError as e:
print_(str(e), file=sys.stderr)
print_("Standard error output is:", file=sys.stderr)
for line in e.process.stderr:
print_(line.rstrip('\n'), file=sys.stderr)
exit(1)
class NodeStopCmd(Cmd):
def description(self):
return "Stop a node"
def get_parser(self):
usage = "usage: ccm node stop [options] name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('--no-wait', action="store_true", dest="no_wait",
help="Do not wait for the node to be stopped", default=False)
parser.add_option('-g', '--gently', action="store_true", dest="gently",
help="Shut down gently (default)", default=True)
parser.add_option('--not-gently', action="store_false", dest="gently",
help="Shut down immediately (kill -9)", default=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
try:
if not self.node.stop(not self.options.no_wait, gently=self.options.gently):
print_("%s is not running" % self.name, file=sys.stderr)
exit(1)
except NodeError as e:
print_(str(e), file=sys.stderr)
exit(1)
class _NodeToolCmd(Cmd):
def get_parser(self):
parser = self._get_default_parser(self.usage, self.description())
return parser
def description(self):
return self.descr_text
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
stdout, stderr = self.node.nodetool(self.nodetool_cmd + " " + " ".join((self.args[1:])))
print_(stderr)
print_(stdout)
class NodeNodetoolCmd(_NodeToolCmd):
usage = "usage: ccm node_name nodetool [options]"
descr_text = "Run nodetool (connecting to node name)"
def run(self):
stdout, stderr = self.node.nodetool(" ".join(self.args[1:]))
print_(stderr)
print_(stdout)
class NodeRingCmd(_NodeToolCmd):
usage = "usage: ccm node_name ring [options]"
nodetool_cmd = 'ring'
descr_text = "Print ring (connecting to node name)"
class NodeStatusCmd(_NodeToolCmd):
usage = "usage: ccm node_name status [options]"
nodetool_cmd = 'status'
descr_text = "Print status (connecting to node name)"
class NodeFlushCmd(_NodeToolCmd):
usage = "usage: ccm node_name flush [options]"
nodetool_cmd = 'flush'
descr_text = "Flush node name"
class NodeCompactCmd(_NodeToolCmd):
usage = "usage: ccm node_name compact [options]"
nodetool_cmd = 'compact'
descr_text = "Compact node name"
class NodeDrainCmd(_NodeToolCmd):
usage = "usage: ccm node_name drain [options]"
nodetool_cmd = 'drain'
descr_text = "Drain node name"
class NodeCleanupCmd(_NodeToolCmd):
usage = "usage: ccm node_name cleanup [options]"
nodetool_cmd = 'cleanup'
descr_text = "Run cleanup on node name"
class NodeRepairCmd(_NodeToolCmd):
usage = "usage: ccm node_name repair [options]"
nodetool_cmd = 'repair'
descr_text = "Run repair on node name"
class NodeVersionCmd(_NodeToolCmd):
usage = "usage: ccm node_name version"
nodetool_cmd = 'version'
descr_text = "Get the cassandra version of node"
class NodeDecommissionCmd(_NodeToolCmd):
usage = "usage: ccm node_name decommission [options]"
nodetool_cmd = 'decommission'
descr_text = "Run decommission on node name"
def run(self):
self.node.decommission()
class _DseToolCmd(Cmd):
def get_parser(self):
parser = self._get_default_parser(self.usage, self.description())
return parser
def description(self):
return self.descr_text
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
self.node.dsetool(self.dsetool_cmd)
class NodeDsetoolCmd(_DseToolCmd):
usage = "usage: ccm node_name dsetool [options]"
descr_text = "Run dsetool (connecting to node name)"
def run(self):
self.node.dsetool(" ".join(self.args[1:]))
class NodeCliCmd(Cmd):
def description(self):
return "Launch a cassandra cli connected to this node"
def get_parser(self):
usage = "usage: ccm node_name cli [options] [cli_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
parser.add_option('-x', '--exec', type="string", dest="cmds", default=None,
help="Execute the specified commands and exit")
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="With --exec, show cli output after completion", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.cli_options = parser.get_ignored() + args[1:]
def run(self):
self.node.run_cli(self.options.cmds, self.options.verbose, self.cli_options)
class NodeCqlshCmd(Cmd):
def description(self):
return "Launch a cqlsh session connected to this node"
def get_parser(self):
usage = "usage: ccm node_name cqlsh [options] [cli_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
parser.add_option('-x', '--exec', type="string", dest="cmds", default=None,
help="Execute the specified commands and exit")
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="With --exec, show cli output after completion", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.cqlsh_options = parser.get_ignored() + args[1:]
def run(self):
self.node.run_cqlsh(self.options.cmds, self.options.verbose, self.cqlsh_options)
class NodeBulkloadCmd(Cmd):
def description(self):
return "Bulkload files into the cluster by connecting to this node"
def get_parser(self):
usage = "usage: ccm node_name bulkload [options] [sstable_dir]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.loader_options = parser.get_ignored() + args[1:]
def run(self):
self.node.bulkload(self.loader_options)
class NodeScrubCmd(Cmd):
def description(self):
return "Scrub files"
def get_parser(self):
usage = "usage: ccm node_name scrub [options] <keyspace> <cf>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.scrub_options = parser.get_ignored() + args[1:]
def run(self):
self.node.scrub(self.scrub_options)
class NodeVerifyCmd(Cmd):
def description(self):
return "Verify files"
def get_parser(self):
usage = "usage: ccm node_name verify [options] <keyspace> <cf>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.verify_options = parser.get_ignored() + args[1:]
def run(self):
self.node.verify(self.verify_options)
class NodeJsonCmd(Cmd):
def description(self):
return "Call sstable2json on the sstables of this node"
def get_parser(self):
usage = "usage: ccm node_name json [options] [file]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-k', '--keyspace', type="string", dest="keyspace", default=None,
help="The keyspace to use [use all keyspaces by default]")
parser.add_option('-c', '--column-families', type="string", dest="cfs", default=None,
help="Comma separated list of column families to use (requires -k to be set)")
parser.add_option('-e', '--enumerate-keys', action="store_true", dest="enumerate_keys",
help="Only enumerate keys (i.e, call sstable2keys)", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.keyspace = options.keyspace
if len(args) < 2:
print_("You must specify an output file.")
parser.print_help()
exit(1)
if self.keyspace is None:
print_("You must specify a keyspace.")
parser.print_help()
exit(1)
self.outfile = args[-1]
self.column_families = options.cfs.split(',') if options.cfs else None
def run(self):
try:
with open(self.outfile, 'w') as f:
self.node.run_sstable2json(keyspace=self.keyspace,
out_file=f,
column_families=self.column_families,
enumerate_keys=self.options.enumerate_keys)
except common.ArgumentError as e:
print_(e, file=sys.stderr)
class NodeSstablesplitCmd(Cmd):
def description(self):
return "Run sstablesplit on the sstables of this node"
def get_parser(self):
usage = "usage: ccm node_name sstablesplit [options] [file]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-k', '--keyspace', type="string", dest="keyspace", default=None,
help="The keyspace to use [use all keyspaces by default]")
parser.add_option('-c', '--column-families', type="string", dest='cfs', default=None,
help="Comma separated list of column families to use (requires -k to be set)")
parser.add_option('-s', '--size', type='int', dest="size", default=None,
help="Maximum size in MB for the output sstables (default: 50 MB)")
parser.add_option('--no-snapshot', action='store_true', dest="no_snapshot", default=False,
help="Don't snapshot the sstables before splitting")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.keyspace = options.keyspace
self.size = options.size
self.no_snapshot = options.no_snapshot
self.column_families = None
self.datafiles = None
if options.cfs is not None:
if self.keyspace is None:
print_("You need a keyspace (option -k) if you specify column families", file=sys.stderr)
exit(1)
self.column_families = options.cfs.split(',')
if len(args) > 1:
if self.column_families is None:
print_("You need a column family (option -c) if you specify datafiles", file=sys.stderr)
exit(1)
self.datafiles = args[1:]
def run(self):
self.node.run_sstablesplit(datafiles=self.datafiles, keyspace=self.keyspace,
column_families=self.column_families, size=self.size,
no_snapshot=self.no_snapshot)
class NodeGetsstablesCmd(Cmd):
def description(self):
return "Run getsstables to get absolute path of sstables in this node"
def get_parser(self):
usage = "usage: ccm node_name getsstables [options] [file]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-k', '--keyspace', type="string", dest="keyspace", default=None,
help="The keyspace to use [use all keyspaces by default]")
parser.add_option('-t', '--tables', type="string", dest='tables', default=None,
help="Comma separated list of tables to use (requires -k to be set)")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.keyspace = options.keyspace
self.tables = None
self.datafiles = None
if options.tables is not None:
if self.keyspace is None:
print_("You need a keyspace (option -k) if you specify tables", file=sys.stderr)
exit(1)
self.tables = options.tables.split(',')
if len(args) > 1:
if self.tables is None:
print_("You need a tables (option -t) if you specify datafiles", file=sys.stderr)
exit(1)
self.datafiles = args[1:]
def run(self):
sstablefiles = self.node.get_sstablespath(datafiles=self.datafiles, keyspace=self.keyspace,
tables=self.tables)
print_('\n'.join(sstablefiles))
class NodeUpdateconfCmd(Cmd):
def description(self):
return "Update the cassandra config files for this node (useful when updating cassandra)"
def get_parser(self):
usage = "usage: ccm node_name updateconf [options] [ new_setting | ... ], where new_setting should be a string of the form 'compaction_throughput_mb_per_sec: 32'"
parser = self._get_default_parser(usage, self.description())
parser.add_option('--no-hh', '--no-hinted-handoff', action="store_false",
dest="hinted_handoff", default=True, help="Disable hinted handoff")
parser.add_option('--batch-cl', '--batch-commit-log', action="store_true",
dest="cl_batch", default=False, help="Set commit log to batch mode")
parser.add_option('--rt', '--rpc-timeout', action="store", type='int',
dest="rpc_timeout", help="Set rpc timeout")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
args = args[1:]
try:
self.setting = common.parse_settings(args)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
self.setting['hinted_handoff_enabled'] = self.options.hinted_handoff
if self.options.rpc_timeout is not None:
if self.node.cluster.cassandra_version() < "1.2":
self.setting['rpc_timeout_in_ms'] = self.options.rpc_timeout
else:
self.setting['read_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['range_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['write_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['truncate_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['request_timeout_in_ms'] = self.options.rpc_timeout
self.node.set_configuration_options(values=self.setting, batch_commitlog=self.options.cl_batch)
#
# Class implementens the functionality of updating log4j-server.properties
# on the given node by copying the given config into
# ~/.ccm/name-of-cluster/nodeX/conf/log4j-server.properties
#
class NodeUpdatelog4jCmd(Cmd):
def description(self):
return "Update the Cassandra log4j-server.properties configuration file under given node"
def get_parser(self):
usage = "usage: ccm node_name updatelog4j -p <log4j config>"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-p', '--path', type="string", dest="log4jpath",
help="Path to new Cassandra log4j configuration file")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
try:
self.log4jpath = options.log4jpath
if self.log4jpath is None:
raise KeyError("[Errno] -p or --path <path of new log4j configuration file> is not provided")
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
except KeyError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
try:
self.node.update_log4j(self.log4jpath)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class NodeStressCmd(Cmd):
def description(self):
return "Run stress on a node"
def get_parser(self):
usage = "usage: ccm node_name stress [options] [stress_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.stress_options = args[1:] + parser.get_ignored()
def run(self):
try:
self.node.stress(self.stress_options)
except OSError:
print_("Could not find stress binary (you may need to build it)", file=sys.stderr)
class NodeShuffleCmd(Cmd):
def description(self):
return "Run shuffle on a node"
def get_parser(self):
usage = "usage: ccm node_name shuffle [options] [shuffle_cmds]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.shuffle_cmd = args[1]
def run(self):
self.node.shuffle(self.shuffle_cmd)
class NodeSetdirCmd(Cmd):
def description(self):
return "Set the cassandra directory to use for the node"
def get_parser(self):
usage = "usage: ccm node_name setdir [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', "--version", type="string", dest="version",
help="Download and use provided cassandra or dse version. If version is of the form 'git:<branch name>', then the specified branch will be downloaded from the git repo and compiled. (takes precedence over --install-dir)", default=None)
parser.add_option("--install-dir", type="string", dest="install_dir",
help="Path to the cassandra or dse directory to use [default %default]", default="./")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
try:
self.node.set_install_dir(install_dir=self.options.install_dir, version=self.options.version, verbose=True)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class NodeSetworkloadCmd(Cmd):
def description(self):
return "Sets the workload for a DSE node"
def get_parser(self):
usage = "usage: ccm node_name setworkload [cassandra|solr|hadoop|spark|cfs]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.workload = args[1]
workloads = ['cassandra', 'solr', 'hadoop', 'spark', 'cfs']
if not self.workload in workloads:
print_(self.workload, ' is not a valid workload')
exit(1)
def run(self):
try:
self.node.set_workload(workload=self.workload)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class NodeDseCmd(Cmd):
def description(self):
return "Launch a dse client application connected to this node"
def get_parser(self):
usage = "usage: ccm node_name dse [dse_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.dse_options = args[1:] + parser.get_ignored()
def run(self):
self.node.dse(self.dse_options)
class NodeHadoopCmd(Cmd):
def description(self):
return "Launch a hadoop session connected to this node"
def get_parser(self):
usage = "usage: ccm node_name hadoop [options] [hadoop_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.hadoop_options = args[1:] + parser.get_ignored()
def run(self):
self.node.hadoop(self.hadoop_options)
class NodeHiveCmd(Cmd):
def description(self):
return "Launch a hive session connected to this node"
def get_parser(self):
usage = "usage: ccm node_name hive [options] [hive_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.hive_options = args[1:] + parser.get_ignored()
def run(self):
self.node.hive(self.hive_options)
class NodePigCmd(Cmd):
def description(self):
return "Launch a pig session connected to this node"
def get_parser(self):
usage = "usage: ccm node_name pig [options] [pig_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.pig_options = parser.get_ignored() + args[1:]
def run(self):
self.node.pig(self.pig_options)
class NodeSqoopCmd(Cmd):
def description(self):
return "Launch a sqoop session connected to this node"
def get_parser(self):
usage = "usage: ccm node_name sqoop [options] [sqoop_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.sqoop_options = args[1:] + parser.get_ignored()
def run(self):
self.node.sqoop(self.sqoop_options)
class NodeSparkCmd(Cmd):
def description(self):
return "Launch a spark session connected to this node"
def get_parser(self):
usage = "usage: ccm node_name spark [options] [spark_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
self.spark_options = args[1:] + parser.get_ignored()
def run(self):
self.node.spark(self.spark_options)
class NodePauseCmd(Cmd):
def description(self):
return "Send a SIGSTOP to this node"
def get_parser(self):
usage = "usage: ccm node_name pause"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
self.node.pause()
class NodeResumeCmd(Cmd):
def description(self):
return "Send a SIGCONT to this node"
def get_parser(self):
usage = "usage: ccm node_name resume"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
self.node.resume()
class NodeJconsoleCmd(Cmd):
def description(self):
return "Opens jconsole client and connect to running node"
def get_parser(self):
usage = "usage: ccm node_name jconsole"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
cmds = ["jconsole", "localhost:%s" % self.node.jmx_port]
try:
subprocess.call(cmds)
except OSError as e:
print_("Could not start jconsole. Please make sure jconsole can be found in your $PATH.")
exit(1)
class NodeVersionfrombuildCmd(Cmd):
def description(self):
return "Print the node's version as grepped from build.xml. Can be used when the node isn't running."
def get_parser(self):
usage = "usage: ccm node_name versionfrombuild"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True)
def run(self):
version_from_nodetool = self.node.nodetool('version')[0].strip()
version_from_build = common.get_version_from_build(self.node.get_install_dir())
if version_from_nodetool and (version_from_nodetool != version_from_build):
print_('nodetool reports Cassandra version {ntv}; '
'version from build.xml is {bv}'.format(ntv=version_from_nodetool,
bv=version_from_build),
file=sys.stderr)
print_(version_from_build)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.