gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright (c) 2010, individual contributors (see AUTHORS file)
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import datetime
from dashboard.forms import BlogPostForm, BlogForm
from dashboard.models import BlogPost, Blog, Project
from dashboard.util import url_pathify, force_url_paths
from dashboard.util import avoid_duplicate_queries
from observatory.dashboard.views import projects
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from lib.markdown import markdown
# the number of posts per page
POSTS_PER_PAGE = 5
# alias for the first page of blog posts
def posts(request):
return posts_page(request, 1)
# shows a page of blog posts, the number of posts is set by PAGE_PER_POSTS
def posts_page(request, page_num):
paginator = Paginator(BlogPost.objects.order_by('date').reverse(),
POSTS_PER_PAGE)
# if the page requested does not exist, 404
if int(page_num) not in paginator.page_range:
raise Http404
page = paginator.page(page_num)
avoid_duplicate_queries(page.object_list, "author", "project",
author = { request.user.id: request.user }
if request.user.is_authenticated() else {})
# otherwise, render
return render_to_response('blogs/posts.html', {
'page': page,
'disable_content': True
}, context_instance = RequestContext(request))
# shows a project's internally hosted blog, or redirects to an external one
def show_blog(request, project_url_path):
resp = force_url_paths(show_blog, project_url_path)
if resp: return resp
project = get_object_or_404(Project, url_path = project_url_path)
if project.blog.from_feed:
return HttpResponseRedirect(project.blog.url)
else:
return render_to_response('blogs/show-blog.html', {
'project': project,
'posts': project.blog.blogpost_set.all(),
'disable_content': True
}, context_instance = RequestContext(request))
# shows all blog posts by a specific user (personal blogs, mostly)
def show_user_blog(request, user_id):
user = get_object_or_404(User, id = user_id)
return render_to_response('blogs/show-user.html', {
'posts': BlogPost.objects.filter(author = user),
'user': user,
'disable_content': True
}, context_instance = RequestContext(request))
# shows a specific blog post
def show_post(request, project_url_path, post_url_path):
resp = force_url_paths(show_post, project_url_path, post_url_path)
if resp: return resp
return show_post_real(request, post_url_path)
# show a post with a user-based url (personal posts)
def show_user_post(request, post_url_path):
resp = force_url_paths(show_user_post, post_url_path)
if resp: return resp
return show_post_real(request, post_url_path)
# actually does the template/redirect for showing posts
def show_post_real(request, post_url_path):
post = get_object_or_404(BlogPost, url_path = post_url_path)
if post.from_feed:
return HttpResponseRedirect(post.external_link)
else:
return render_to_response('blogs/show-post.html', {
'post': post
}, context_instance = RequestContext(request))
# write a new post
@login_required
def write_post(request, project_id):
project = get_object_or_404(Project, id = int(project_id))
if request.user not in project.authors.all():
return HttpResponseRedirect(reverse(projects.show,
args = (project.url_path,)))
return render_to_response('blogs/edit.html', {
'project': project,
'form': BlogPostForm()
}, context_instance = RequestContext(request))
# edit an existing post
@login_required
def edit_post(request, project_url_path, post_url_path):
# redirect if the url path is not in the correct format
resp = force_url_paths(edit_post, project_url_path, post_url_path)
if resp: return resp
post = get_object_or_404(BlogPost, url_path = post_url_path)
if request.user not in post.blog.project.authors.all():
return HttpResponseRedirect(reverse(projects.show,
args = (project.url_path,)))
return render_to_response('blogs/edit.html', {
'project': post.blog.project,
'post': post,
'form': BlogPostForm(instance = post)
}, context_instance = RequestContext(request))
# creates a new post
@login_required
def create_post(request, project_id):
form = BlogPostForm(request.POST)
project = get_object_or_404(Project, id = int(project_id))
if request.user not in project.authors.all():
return HttpResponseRedirect(reverse(projects.show,
args = (project.url_path,)))
# validate the form
if form.is_valid():
date = datetime.datetime.utcnow()
html = markdown(request.POST['markdown'], safe_mode = True)
post = BlogPost(title = request.POST['title'],
markdown = request.POST['markdown'],
summary = html,
content = html,
from_feed = False,
author = request.user,
project = project,
date = date)
post.blog = project.blog
post.save()
project.blog.most_recent_date = date
project.blog.save()
project.calculate_score()
return HttpResponseRedirect(reverse(show_post,
args = (post.blog.project.url_path,
post.url_path,)))
else:
return render_to_response('blogs/edit.html', {
'project': project,
'form': form
}, context_instance = RequestContext(request))
# updates a previously posted post, and redirects to the management page
@login_required
def update_post(request, project_url_path, post_url_path):
form = BlogPostForm(request.POST)
post = get_object_or_404(BlogPost, url_path = post_url_path)
# validate the form
if form.is_valid():
# update the post
html = markdown(request.POST['markdown'], safe_mode = True)
post.title = request.POST['title']
post.markdown = request.POST['markdown']
post.summary = html
post.content = html
post.save()
return HttpResponseRedirect(reverse(show_post,
args = (post.blog.project.url_path,
post.url_path,)))
else:
return render_to_response('blogs/edit.html', {
'project': post.blog.project,
'form': form
}, context_instance = RequestContext(request))
# deletes a post
@login_required
def delete_post(request, project_url_path, post_url_path):
post = get_object_or_404(BlogPost, url_path = post_url_path)
project = post.project
if request.user not in post.blog.project.authors.all():
return HttpResponseRedirect(reverse(projects.show,
args = (project.url_path,)))
post.delete()
return HttpResponseRedirect(reverse(projects.modify,
args = (project.url_path, 2)))
@login_required
def remove_personal_blog(request, user_id):
if request.user.id != int(user_id):
raise Http404
try: #remove the blog and all related posts, if they have one
blog = Blog.objects.get(user = request.user)
BlogPost.objects.filter(blog = blog).delete()
blog.delete()
except Blog.DoesNotExist:
pass #No need to delete anything
from observatory.dashboard.views import users
return HttpResponseRedirect(reverse(users.profile,
args = (request.user.id,)))
@login_required
def edit_personal_blog(request, user_id):
# users can only edit their own blogs, of course
if request.user.id != int(user_id):
raise Http404
# user is saving the form
if request.POST:
form = BlogForm(request.POST)
if form.is_valid():
try:
blog = Blog.objects.get(user = request.user)
blog.url = form.cleaned_data['url']
blog.rss = form.cleaned_data['rss']
except Blog.DoesNotExist:
blog = Blog(user = request.user,
url = form.cleaned_data['url'],
rss = form.cleaned_data['rss'],
from_feed = True)
blog.save()
# prevent form resubmission on refresh by redirecting
from observatory.dashboard.views import users
return HttpResponseRedirect(reverse(users.profile,
args = (request.user.id,)))
# displaying the initial form, or a form from an already created blog
else:
try:
form = BlogForm(instance = Blog.objects.get(user = request.user))
except Blog.DoesNotExist:
form = BlogForm()
return render_to_response("blogs/edit-personal-blog.html", {
"form": form,
"user": request.user,
}, context_instance = RequestContext(request))
|
|
#!/usr/bin/env python
import sys, urllib, urllib2, re, logging, json, uuid, ast, datetime, os, requests, time, collections
from redten.shellprinting import lg, good, boom, mark, anmt, info
from redten.redten_client import RedTenClient, ppj
"""
More documentation and samples:
- Forecast: https://github.com/jay-johnson/sci-pype/blob/master/red10/Red10-SPY-Multi-Model-Price-Forecast.ipynb
- Predictions with the IRIS dataset: https://github.com/jay-johnson/sci-pype/blob/master/red10/Red10-IRIS-Predictions.ipynb
"""
# Login to red10
rt = RedTenClient()
csv_file = ""
rloc = ""
sloc = ""
ds_name = "SPY"
if len(sys.argv) > 1:
ds_name = str(sys.argv[1]).upper()
# What column do you want to predict values?
target_column_name = "FClose"
# possible values in the Target Column
target_column_values = [ "GoodBuys", "BadBuys", "Not Finished" ]
# What columns can the algorithms use for training and learning?
feature_column_names = [ "FHigh", "FLow", "FOpen", "FClose", "FVolume" ]
ignore_features = [ # Prune non-int/float columns as needed:
"Ticker",
"Date",
"FDate",
"FPrice",
"DcsnDate",
"Decision"
]
train_xgb = {
"learning_rate" : 0.20,
"num_estimators" : 50,
"sub_sample" : 0.20,
"col_sample_by_tree" : 0.90,
"col_sample_by_level" : 1.0,
"objective" : "reg:linear",
"max_depth" : 3,
"max_delta_step" : 0,
"min_child_weight" : 1,
"reg_alpha" : 0,
"reg_lambda" : 1,
"base_score" : 0.6,
"gamma" : 0,
"seed" : 42,
"silent" : True
}
predict_row = {
"FHigh" : 5.4,
"FLow" : 3.4,
"FOpen" : 1.7,
"FClose" : 0.2,
"FVolume" : 0
}
units_ahead_set = [ 5, 10, 15, 20, 25, 30 ]
units_ahead_type = "Days"
algo_name = "xgb-regressor"
title = str(ds_name) + " Forecast v5 - " + str(rt.uni_key())
desc = "Forecast simulation"
label_column_name = target_column_name
test_ratio = 0.1
sample_filter_rules = {}
# list of emails to send the analysis to on successful completion
send_to_email = []
# Allow a list of comma separated emails to be passed in
# example: [email protected],[email protected]
# note no spaces between them
if str(os.getenv("ENV_REDTEN_FORECAST_EMAILS", "")).strip().lstrip() != "":
send_to_email = str(os.getenv("ENV_REDTEN_FORECAST_EMAILS", "")).strip().lstrip().split(",")
# allow the target dataset to load from the env for automation with docker
if csv_file == "" and sloc == "" and rloc == "":
csv_file = str(os.getenv("ENV_REDTEN_CSV_FILE", "")).strip().lstrip()
post_data = {
"predict_this_data" : predict_row,
"title" : title,
"desc" : desc,
"ds_name" : ds_name,
"feature_column_names" : feature_column_names,
"ignore_features" : ignore_features,
"csv_file" : csv_file,
"rloc" : rloc,
"sloc" : sloc,
"algo_name" : algo_name,
"test_ratio" : test_ratio,
"target_column_values" : target_column_values,
"target_column_name" : target_column_name,
"label_column_name" : label_column_name,
"user_id" : rt.get_uid(),
"train" : train_xgb,
"max_features" : 10,
"tracking_type" : "",
"units_ahead_set" : units_ahead_set,
"units_ahead_type" : units_ahead_type,
"prediction_type" : "Forecast",
"ml_type" : "Playbook-UnitsAhead",
"forecast_type" : "ETFPriceForecasting",
"forecast_version" : 5,
"valid_forecast_threshold" : 0.3,
"sample_filters" : sample_filter_rules,
"predict_units_back" : 90,
"bypass_trading_day" : 1,
"send_to_email" : send_to_email,
"version" : 1
}
"""
Wait on the job to finish
"""
anmt("Launching Job")
job_data = {}
job_report = {}
job_res = {}
job_response = rt.run_job(post_data=post_data)
if job_response["status"] != "valid":
boom("Forecast job failed with error=" + str(job_response["status"]))
sys.exit(1)
else:
if "id" not in job_response["data"]:
boom("Failed to create new forecast job")
sys.exit(1)
else:
job_id = job_response["data"]["id"]
job_status = job_response["data"]["status"]
lg("Started Forecast job=" + str(job_id) + " with current status=" + str(job_status))
# end of if job was valid or not
lg("Started Forecast=" + str(ds_name) + " job=" + str(job_id), 6)
"""
Wait on the job to finish
"""
if job_id == None:
boom("Failed to start a new job")
else:
lg("Waiting on results", 6)
job_res = rt.wait_on_job(job_id)
if job_res["status"] != "SUCCESS":
boom("Job=" + str(job_id) + " failed with status=" + str(job_res["status"]) + " err=" + str(job_res["error"]))
else:
job_data = job_res["record"]
anmt("Job Report:")
lg(ppj(job_data), 5)
# end of waiting
"""
Get Job Analysis
"""
job_report = {}
if job_id == None:
boom("Failed to start a new job")
else:
# Get the analysis, but do not auto-show the plots
job_report = rt.get_job_analysis(job_id, show_plots=False)
if len(job_report) == 0:
boom("Job=" + str(job_id) + " failed")
sys.exit(1)
else:
lg("")
# if the job failed
# end of get job analysis
"""
Build Forecast Results
"""
lg("Building Forecast=" + str(ds_name) + " Results for job=" + str(job_id), 6)
# Build the forecast accuracy dictionary from the analysis
# and show the forecast dataframes
acc_results = rt.build_forecast_results(job_report)
for col in acc_results:
col_node = acc_results[col]
predictions_df = col_node["predictions_df"]
date_predictions_df = col_node["date_predictions_df"]
train_predictions_df = col_node["train_predictions_df"]
lg("--------------------------------------------------")
# for all columns in the accuracy dictionary:
lg("Column=" + str(col) + " accuracy=" + str(col_node["accuracy"]) + " mse=" + str(col_node["mse"]) + " num_predictions=" + str(len(col_node["date_predictions_df"].index)))
# end of header line
# show the timeseries forecast
lg(date_predictions_df.head(5), 6)
lg("")
# end of showing prediction results
"""
Get Analysis Images
"""
lg("Getting Forecast=" + str(ds_name) + " Analysis Images for job=" + str(job_id), 6)
# unless matplotlib is installed this will fail showing plots:
job_res = rt.get_job_analysis(job_id, show_plots=False)
sys.exit(0)
|
|
import bmesh
import bpy
from mathutils import Vector
MAX_NUM_COLOR_SETS = 8
MAX_NUM_TEXCOORD_SETS = 8
def create_mesh(op, mesh_spec):
idx, primitive_idx = mesh_spec
mesh = op.gltf['meshes'][idx]
primitives = mesh['primitives']
# The caller can request we generate only one primitive instead of all of them
if primitive_idx is not None:
primitives = [primitives[primitive_idx]]
bme = bmesh.new()
# If any of the materials used in this mesh use COLOR_0 attributes, we need
# to pre-emptively create that layer, or else the Attribute node referencing
# COLOR_0 in those materials will produce a solid red color. See
# material.compute_materials_using_color0, which, note, must be called
# before this function.
needs_color0 = any(
op.material_infos[prim.get('material', 'default_material')].num_color_sets > 0
for prim in primitives
)
if needs_color0:
bme.loops.layers.color.new('COLOR_0')
# Make a list of all the materials this mesh will need; the material on a
# face is set by giving an index into this list.
materials = list(set(
op.get('material', primitive.get('material', 'default_material'))
for primitive in primitives
))
# Add in all the primitives
for primitive in primitives:
material = op.get('material', primitive.get('material', 'default_material'))
material_idx = materials.index(material)
add_primitive_to_bmesh(op, bme, primitive, material_idx)
name = mesh_name(op, mesh_spec)
me = bpy.data.meshes.new(name)
bmesh_to_mesh(bme, me)
bme.free()
# Fill in the material list (we can't do me.materials = materials since this
# property is read-only).
for material in materials:
me.materials.append(material)
# Set polygon smoothing if the user requested it
if op.options['smooth_polys']:
for polygon in me.polygons:
polygon.use_smooth = True
me.update()
if not me.shape_keys:
return me
else:
# Tell op.get not to cache us if we have morph targets; this is because
# morph target weights are stored on the mesh instance in glTF, what
# would be on the object in Blender. But in Blender shape keys are part
# of the mesh. So when an object wants a mesh with morph targets, it
# always needs to get a new one. Ergo we lose sharing for meshes with
# morph targets.
return {
'result': me,
'do_not_cache_me': True,
}
def mesh_name(op, mesh_spec):
mesh_idx, primitive_idx = mesh_spec
name = op.gltf['meshes'][mesh_idx].get('name', 'meshes[%d]' % mesh_idx)
if primitive_idx is not None:
# Look for a name on the extras property
extras = op.gltf['meshes'][mesh_idx]['primitives'][primitive_idx].get('extras')
if type(extras) == dict and type(extras.get('name')) == str and extras['name']:
primitive_name = extras['name']
else:
primitive_name = 'primitives[%d]' % primitive_idx
name += '.' + primitive_name
return name
def bmesh_to_mesh(bme, me):
bme.to_mesh(me)
# to_mesh ignores normals?
normals = [v.normal for v in bme.verts]
me.use_auto_smooth = True
me.normals_split_custom_set_from_vertices(normals)
if len(bme.verts.layers.shape) != 0:
# to_mesh does NOT create shape keys so if there's shape data we'll have
# to do it by hand. The only way I could find to create a shape key was
# to temporarily parent me to an object and use obj.shape_key_add.
dummy_ob = None
try:
dummy_ob = bpy.data.objects.new('##dummy-object##', me)
dummy_ob.shape_key_add(name='Basis')
me.shape_keys.name = me.name
for layer_name in bme.verts.layers.shape.keys():
dummy_ob.shape_key_add(name=layer_name)
key_block = me.shape_keys.key_blocks[layer_name]
layer = bme.verts.layers.shape[layer_name]
for i, v in enumerate(bme.verts):
key_block.data[i].co = v[layer]
finally:
if dummy_ob:
bpy.data.objects.remove(dummy_ob)
def get_layer(bme_layers, name):
"""Gets a layer from a BMLayerCollection, creating it if it does not exist."""
if name not in bme_layers:
return bme_layers.new(name)
return bme_layers[name]
def add_primitive_to_bmesh(op, bme, primitive, material_index):
"""Adds a glTF primitive into a bmesh."""
attributes = primitive['attributes']
# Early out if there's no POSITION data
if 'POSITION' not in attributes:
return
positions = op.get('accessor', attributes['POSITION'])
if 'indices' in primitive:
indices = op.get('accessor', primitive['indices'])
else:
indices = range(0, len(positions))
bme_verts = bme.verts
bme_edges = bme.edges
bme_faces = bme.faces
convert_coordinates = op.convert_translation
if op.options['axis_conversion'] == 'BLENDER_UP':
def convert_normal(n):
return Vector([n[0], -n[2], n[1]])
else:
def convert_normal(n):
return n
# The primitive stores vertex attributes in arrays and gives indices into
# those arrays
#
# Attributes:
# v0 v1 v2 v3 v4 ...
# Indices:
# 1 2 4 ...
#
# We want to add **only those vertices that are used in an edge/tri** to the
# bmesh. Because of this and because the bmesh already has some vertices,
# when we add the new vertices their index in the bmesh will be different
# than their index in the primitive's vertex attribute arrays
#
# Bmesh:
# ...pre-existing vertices... v1 v2 v4 ...
#
# The index into the primitive's vertex attribute array is called the
# vertex's p-index (pidx) and the index into the bmesh is called its b-index
# (bidx). Remember to use the right index!
# The pidx of all the vertices that are actually used by the primitive
used_pidxs = set(indices)
# Contains a pair (bidx, pidx) for every vertex in the primitive
vert_idxs = []
# pidx_to_bidx[pidx] is the bidx of the vertex with pidx (or -1 if unused)
pidx_to_bidx = [-1] * len(positions)
bidx = len(bme_verts)
for pidx in range(0, len(positions)):
if pidx in used_pidxs:
bme_verts.new(convert_coordinates(positions[pidx]))
vert_idxs.append((bidx, pidx))
pidx_to_bidx[pidx] = bidx
bidx += 1
bme_verts.ensure_lookup_table()
# Add edges/faces to bmesh
mode = primitive.get('mode', 4)
edges, tris = edges_and_tris(indices, mode)
# NOTE: edges and vertices are in terms of pidxs
for edge in edges:
try:
bme_edges.new((
bme_verts[pidx_to_bidx[edge[0]]],
bme_verts[pidx_to_bidx[edge[1]]],
))
except ValueError:
# Ignores dulicate/degenerate edges
pass
for tri in tris:
try:
tri = bme_faces.new((
bme_verts[pidx_to_bidx[tri[0]]],
bme_verts[pidx_to_bidx[tri[1]]],
bme_verts[pidx_to_bidx[tri[2]]],
))
tri.material_index = material_index
except ValueError:
# Ignores dulicate/degenerate tris
pass
# Set normals
if 'NORMAL' in attributes:
normals = op.get('accessor', attributes['NORMAL'])
for bidx, pidx in vert_idxs:
bme_verts[bidx].normal = convert_normal(normals[pidx])
# Set vertex colors. Add them in the order COLOR_0, COLOR_1, etc.
set_num = 0
while 'COLOR_%d' % set_num in attributes:
if set_num >= MAX_NUM_COLOR_SETS:
print('more than %d COLOR_n attributes; dropping the rest on the floor',
MAX_NUM_COLOR_SETS
)
break
layer_name = 'COLOR_%d' % set_num
layer = get_layer(bme.loops.layers.color, layer_name)
colors = op.get('accessor', attributes[layer_name])
# Check whether Blender takes RGB or RGBA colors (old versions only take RGB)
num_components = len(colors[0])
blender_num_components = len(bme_verts[0].link_loops[0][layer])
if num_components == 3 and blender_num_components == 4:
# RGB -> RGBA
colors = [color+(1,) for color in colors]
if num_components == 4 and blender_num_components == 3:
# RGBA -> RGB
colors = [color[:3] for color in colors]
print('No RGBA vertex colors in your Blender version; dropping A component!')
for bidx, pidx in vert_idxs:
for loop in bme_verts[bidx].link_loops:
loop[layer] = colors[pidx]
set_num += 1
# Set texcoords
set_num = 0
while 'TEXCOORD_%d' % set_num in attributes:
if set_num >= MAX_NUM_TEXCOORD_SETS:
print('more than %d TEXCOORD_n attributes; dropping the rest on the floor',
MAX_NUM_TEXCOORD_SETS
)
break
layer_name = 'TEXCOORD_%d' % set_num
layer = get_layer(bme.loops.layers.uv, layer_name)
uvs = op.get('accessor', attributes[layer_name])
for bidx, pidx in vert_idxs:
# UV transform
u, v = uvs[pidx]
uv = (u, 1 - v)
for loop in bme_verts[bidx].link_loops:
loop[layer].uv = uv
set_num += 1
# Set joints/weights for skinning (multiple sets allow > 4 influences)
# TODO: multiple sets are untested!
joint_sets = []
weight_sets = []
set_num = 0
while 'JOINTS_%d' % set_num in attributes and 'WEIGHTS_%d' % set_num in attributes:
joint_sets.append(op.get('accessor', attributes['JOINTS_%d' % set_num]))
weight_sets.append(op.get('accessor', attributes['WEIGHTS_%d' % set_num]))
set_num += 1
if joint_sets:
layer = get_layer(bme.verts.layers.deform, 'Vertex Weights')
for joint_set, weight_set in zip(joint_sets, weight_sets):
for bidx, pidx in vert_idxs:
for j in range(0, 4):
weight = weight_set[pidx][j]
if weight != 0.0:
joint = joint_set[pidx][j]
bme_verts[bidx][layer][joint] = weight
# Set morph target positions (we don't handle normals/tangents)
for k, target in enumerate(primitive.get('targets', [])):
if 'POSITION' not in target:
continue
layer = get_layer(bme.verts.layers.shape, 'Morph %d' % k)
morph_positions = op.get('accessor', target['POSITION'])
for bidx, pidx in vert_idxs:
bme_verts[bidx][layer] = convert_coordinates(
Vector(positions[pidx]) +
Vector(morph_positions[pidx])
)
def edges_and_tris(indices, mode):
"""
Convert the indices for different primitive modes into a list of edges
(pairs of endpoints) and a list of tris (triples of vertices).
"""
edges = []
tris = []
# TODO: only mode TRIANGLES is tested!!
if mode == 0:
# POINTS
pass
elif mode == 1:
# LINES
# 1 3
# / /
# 0 2
edges = [tuple(indices[i:i+2]) for i in range(0, len(indices), 2)]
elif mode == 2:
# LINE LOOP
# 1---2
# / \
# 0-------3
edges = [tuple(indices[i:i+2]) for i in range(0, len(indices) - 1)]
edges.append((indices[-1], indices[0]))
elif mode == 3:
# LINE STRIP
# 1---2
# / \
# 0 3
edges = [tuple(indices[i:i+2]) for i in range(0, len(indices) - 1)]
elif mode == 4:
# TRIANGLES
# 2 3
# / \ / \
# 0---1 4---5
tris = [tuple(indices[i:i+3]) for i in range(0, len(indices), 3)]
elif mode == 5:
# TRIANGLE STRIP
# 1---3---5
# / \ / \ /
# 0---2---4
def alternate(i, xs):
ccw = i % 2 != 0
return xs if ccw else (xs[0], xs[2], xs[1])
tris = [
alternate(i, tuple(indices[i:i+3]))
for i in range(0, len(indices) - 2)
]
elif mode == 6:
# TRIANGLE FAN
# 3---2
# / \ / \
# 4---0---1
tris = [
(indices[0], indices[i], indices[i+1])
for i in range(1, len(indices) - 1)
]
else:
raise Exception('primitive mode unimplemented: %d' % mode)
return edges, tris
|
|
"This module define the functions of the features for EEG analysis"
import numpy as np
import scipy as sp
from sklearn.neighbors import KDTree
from numba import njit
def bandPower(spectrum, bandsLimits, freqRes, normalize=False):
"""
Returns the power of each band at the given index.
Parameters
----------
spectrum: 1D arraylike
An array containing the spectrum of a signal
bandsLimits: dict
This parameter is used to indicate the bands that are going to be
used. It is a dict with the name of each band as key and a tuple
with the lower and upper bounds as value.
freqRes: float
Minimum resolution for the frequency.
normalize: bool, optional
If True the each band power is divided by the total power of the
spectrum. Default False.
Returns
-------
dict
The keys are the name of each band and the values are their power.
"""
total = 1
if normalize:
total = sp.integrate.trapz(spectrum, dx=freqRes)
return {key:sp.integrate.trapz(spectrum[band[0]:band[1]], dx=freqRes)/total
for key, band in bandsLimits.items()}
def PFD(data):
"""
Returns the Petrosian Fractal Dimension of the signal given in data.
Parameters
----------
data: array_like
Signal
Returns
-------
float
The resulting value
"""
derivative = np.diff(data)
size=len(data)
signChanges = countSignChanges(derivative)
logSize = np.log(size)
return logSize / (logSize + np.log(size / (size + 0.4 * signChanges)))
def HFD(data,kMax=None):
"""
Returns the Higuchi Fractal Dimension of the signal given data.
Parameters
----------
data: array_like
signal
kMax: int, optional
By default it will be windowSize//4.
Returns
-------
float
The resulting value
"""
#Inicializations
data=np.array(data)
N = len(data)
kMax = N // 4 if kMax is None else kMax #Default kMax
L = np.zeros(kMax-1)
x = np.array((-np.log(np.arange(2,kMax+1)),np.ones(kMax-1))).transpose()
return _HFD(data, N, kMax, L, x)
@njit
def _HFD(data, N, kMax, L, x):# pragma: no cover
# Loop from 2 to kMax
for k in range(2, kMax + 1):
Lk = np.zeros(k)
#Loop for compute the lenght of Lmk
for m in range(0, k):
Lmk = 0
for i in range(1, (N - m) // k):
Lmk += abs(data[m + i * k] - data[m + i * k - k])
Lk[m]=Lmk * (N - 1) / (((N - m) // k) * k * k)
Laux=np.mean(Lk)
Laux=0.01/k if Laux==0 else Laux
L[k-2]=np.log(Laux)
p, _, _, _ = np.linalg.lstsq(x, L)
return p[0]
def synchronizationLikelihood(c1, c2, m, l, w1, w2, pRef=0.05, epsilonIterations=20):
"""
Returns the Synchronization Likelihood between c1 and c2. This is a
modified version of the original algorithm.
Parameters
----------
c1: array_like
First signal
c2: array_like
second signal
m: int
Numbers of elements of the embedded vectors.
l: int
Separation between elements of the embedded vectors.
w1: int
Theiler correction for autocorrelation effects
w2: int
A window that sharpens the time resolution of the Synchronization
measure
pRef: float, optional
The pRef param of the synchronizationLikelihood. Default 0.05
epsilonIterations: int,optional
Number of iterations used to determine the value of epsilon. Default:20
Returns
-------
float
A value between 0 and 1. 0 means that the signal are not synchronized
at all and 1 means that they are totally synchronized.
"""
if len(c1)!=len(c2):
raise ValueError("c1 and c2 must have the same lenght")
c1 = np.array(c1)
c2 = np.array(c2)
return _SL(c1, c2, m, l, w1, w2, pRef,epsilonIterations)
# Auxiliar functions for Synchronization Likeihood
@njit
def _SL(c1, c2, m, l, w1, w2, pRef, epsilonIterations):# pragma: no cover
X1 = _getEmbeddedVectors(c1, m, l)
X2 = _getEmbeddedVectors(c2, m, l)
D1 = _getDistances(X1)
D2 = _getDistances(X2)
size=len(X1)
E1 = np.zeros(size)
E2 = np.zeros(size)
for i in range(size):
E1[i]=_getEpsilon(D1, i, pRef,epsilonIterations)
E2[i]=_getEpsilon(D2, i, pRef,epsilonIterations)
SL = 0
SLMax = 0
for i in range(size):
Sij = 0
SijMax = 0
for j in range(size):
if w1 < abs(j - i) < w2:
if D1[i,j] < E1[i]:
if D2[i,j] < E2[i]:
Sij += 1
SijMax += 1
SL += Sij
SLMax += SijMax
return SL / SLMax if SLMax>0 else 0
@njit
def _getHij(D, i, e):# pragma: no cover
summ = 0
for j in range(len(D)):
if D[i,j] < e:
summ += 1
return summ
@njit
def _getDistances(X):# pragma: no cover
t=len(X)
D=np.zeros((t,t),dtype=np.float64)
for i in range(t):
for j in range(i):
D[j,i]=D[i,j]=np.linalg.norm(X[i]-X[j])
return D
@njit
def _getProbabilityP(D, i, e):# pragma: no cover
return _getHij(D, i, e) /len(D)
@njit
def _getEmbeddedVectors(x, m, l):# pragma: no cover
size = len(x) - (m - 1) * l
X = np.zeros((m, size))
for i in range(m):
X[i]=x[i*l:i * l + size]
return X.T
@njit
def _logDiference(p1,p2):# pragma: no cover
return abs(np.log(p2/p1))
@njit
def _getEpsilon(D, i, pRef, iterations):# pragma: no cover
eInf = 0
eSup = None
bestE=e = 1
bestP=p = 1
minP = 1 / len(D)
for _ in range(iterations):
p = _getProbabilityP(D, i, e)
if pRef < minP == p:
break
if p < pRef:
eInf = e
elif p > pRef:
eSup = e
else:
bestP=p
bestE=e
break
if _logDiference(bestP,pRef) > _logDiference(p,pRef):
bestP=p
bestE=e
e = e * 2 if eSup is None else (eInf + eSup) / 2
return bestE
def countSignChanges(data):
"""
Returns the number of sign changes of a 1D array
Parameters
----------
data: array_like
The data from which the sign changes will be counted
Returns
-------
int
Number of sign changes in the data
"""
signChanges = 0
for i in range(1, len(data)):
if data[i] * data[i - 1] < 0:
signChanges += 1
return signChanges
# HjorthParameters
def hjorthActivity(data):
"""
Returns the Hjorth Activity of the given data
Parameters
----------
data: array_like
Returns
-------
float
The resulting value
"""
return np.var(data)
def hjorthMobility(data):
"""
Returns the Hjorth Mobility of the given data
Parameters
----------
data: array_like
Returns
-------
float
The resulting value
"""
return np.sqrt(np.var(np.gradient(data)) / np.var(data))
def hjorthComplexity(data):
"""
Returns the Hjorth Complexity of the given data
Parameters
----------
data: array_like
Returns
-------
float
The resulting value
"""
return hjorthMobility(np.gradient(data)) / hjorthMobility(data)
# Sample Entropy
def sampEn(data, m = 2, l = 1, r = None, fr = 0.2, eps = 1e-10):
"""
Returns Sample Entropy of the given data.
Parameters
----------
data: array_like
The signal
m: int, optional
Size of the embedded vectors. By default 2.
l: int, optional
Lag beetwen elements of embedded vectors. By default 1.
r: float, optional
Tolerance. By default fr*std(data)
fr: float, optional
Fraction of std(data) used as tolerance. If r is passed, this
parameter is ignored. By default, 0.2.
eps: float, optional
Small number added to avoid infinite results. If 0 infinite results can
appear. Default: 1e-10.
Returns
-------
float
The resulting value
"""
if not r:
r = fr * np.std(data)
A = _countEmbeddedDistances(data, m+1, l, r) + eps
B = _countEmbeddedDistances(data, m , l, r) + eps
if B == 0:# pragma: no cover
return -np.inf
if A == 0:# pragma: no cover
return np.inf
return -np.log(A/B)
def _countEmbeddedDistances(data, m, l, r):
X = _getEmbeddedVectors(data , m, l)
kdtree = KDTree(X, metric="chebyshev")
# Return the count
return np.sum(kdtree.query_radius(X, r, count_only=True) - 1)
# Lempel-Ziv Complexity
def LZC(data, threshold = None):
"""
Returns the Lempel-Ziv Complexity (LZ76) of the given data.
Parameters
----------
data: array_like
The signal.
theshold: numeric, optional
A number use to binarize the signal. The values of the signal above
threshold will be converted to 1 and the rest to 0. By default, the
median of the data.
References
----------
.. [1] M. Aboy, R. Hornero, D. Abasolo and D. Alvarez, "Interpretation of
the Lempel-Ziv Complexity Measure in the Context of Biomedical
Signal Analysis," in IEEE Transactions on Biomedical Engineering,
vol. 53, no.11, pp. 2282-2288, Nov. 2006.
"""
if not threshold:
threshold=np.median(data)
n = len(data)
sequence = _binarize(data, threshold)
c = _LZC(sequence)
b = n/np.log2(n)
lzc = c/b
return lzc
@njit
def _LZC(sequence):# pragma: no cover
n = len(sequence)
complexity = 1
q0 = 1
qSize = 1
sqi = 0
where = 0
while q0 + qSize <= n:
# If we are checking the end of the sequence we just need to look at
# the last element
if sqi != q0-1:
contained, where = _isSubsequenceContained(sequence[q0:q0+qSize],
sequence[sqi:q0+qSize-1])
else:
contained = sequence[q0+qSize] == sequence[q0+qSize-1]
#If Q is contained in sq~, we increase the size of q
if contained:
qSize+=1
sqi = where
#If Q is not contained the complexity is increased by 1 and reset Q
else:
q0+=qSize
qSize=1
complexity+=1
sqi=0
return complexity
def _binarize(data, threshold):
if not isinstance(data, np.ndarray):
data = np.array(data)
return np.array(data > threshold, np.uint8)
@njit
def _isSubsequenceContained(subSequence, sequence):# pragma: no cover
"""
Checks if the subSequence is into the sequence and returns a tuple that
informs if the subsequence is into and where. Return examples: (True, 7),
(False, -1).
"""
n = len(sequence)
m = len(subSequence)
for i in range(n-m+1):
equal = True
for j in range(m):
equal = subSequence[j] == sequence[i+j]
if not equal:
break
if equal:
return True, i
return False, -1
# Detrended Fluctuation Analysis
def DFA(data, fit_degree = 1, min_window_size = 4, max_window_size = None,
fskip = 1, max_n_windows_sizes=None):
"""
Applies Detrended Fluctuation Analysis algorithm to the given data.
Parameters
----------
data: array_like
The signal.
fit_degree: int, optional
Degree of the polynomial used to model de local trends. Default: 1.
min_window_size: int, optional
Size of the smallest window that will be used. Default: 4.
max_window_size: int, optional
Size of the biggest window that will be used. Default: signalSize//4
fskip: float, optional
Fraction of the window that will be skiped in each iteration for each
window size. Default: 1
max_n_windows_sizes: int, optional
Maximum number of window sizes that will be used. The final number can
be smaller once the repeated values are removed
Default: log2(size)
Returns
-------
float
The resulting value
"""
#Arguments handling
data = np.array(data)
size=len(data)
if not max_window_size:
max_window_size = size//4
#Detrended data
Y = np.cumsum(data - np.mean(data))
#Windows sizes
if not max_n_windows_sizes:
max_n_windows_sizes = int(np.round(np.log2(size)))
ns = np.unique(
np.geomspace(min_window_size, max_window_size, max_n_windows_sizes,
dtype=int))
#Fluctuations for each window size
F = np.zeros(ns.size)
#Loop for each window size
for indexF,n in enumerate(ns):
itskip = max(int(fskip * n),1)
nWindows = int(np.ceil((size - n + 1) / itskip))
#Aux x
x = np.arange(n)
y = np.array([Y[i*itskip:i*itskip+n] for i in range(0,nWindows)])
c = np.polynomial.polynomial.polyfit(x, y.T, fit_degree)
yn = np.polynomial.polynomial.polyval(x, c)
F[indexF] = np.mean(np.sqrt(np.sum((y-yn)**2, axis=1)/n))
alpha = np.polyfit(np.log(ns), np.log(F), 1)[0]
if np.isnan(alpha): # pragma: no cover
return 0
return alpha
|
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for accessing all other services."""
__author__ = '[email protected] (Stan Grinberg)'
import os
import threading
from adspygoogle.common import SanityCheck
from adspygoogle.common import Utils
from adspygoogle.common.Client import Client
from adspygoogle.common.Logger import Logger
from adspygoogle.dfa import DEFAULT_API_VERSION
from adspygoogle.dfa import DfaSanityCheck
from adspygoogle.dfa import DfaUtils
from adspygoogle.dfa import LIB_SIG
from adspygoogle.dfa import REQUIRED_SOAP_HEADERS
from adspygoogle.dfa.GenericDfaService import GenericDfaService
class DfaClient(Client):
"""Provides entry point to all web services.
Allows instantiation of all DFA API web services.
"""
auth_pkl_name = 'dfa_api_auth.pkl'
config_pkl_name = 'dfa_api_config.pkl'
def __init__(self, headers=None, config=None, path=None):
"""Inits DfaClient.
Args:
[optional]
headers: dict Object with populated authentication credentials.
config: dict Object with client configuration values.
path: str Relative or absolute path to home directory (i.e. location of
pickles and logs/).
Example:
headers = {
'Username': '[email protected]',
'Password': 'secret',
'AuthToken': '...'
}
config = {
'home': '/path/to/home',
'log_home': '/path/to/logs/home',
'xml_parser': '1', # PYXML = 1, ELEMENTREE = 2
'debug': 'n',
'raw_debug': 'n',
'xml_log': 'y',
'request_log': 'y',
'raw_response': 'n',
'strict': 'y',
'pretty_xml': 'y',
'compress': 'y',
}
path = '/path/to/home'
"""
super(DfaClient, self).__init__(headers, config, path)
self.__lock = threading.RLock()
self.__loc = None
if path is not None:
# Update absolute path for a given instance of DfaClient, based on
# provided relative path.
if os.path.isabs(path):
DfaClient.home = path
else:
# NOTE(api.sgrinberg): Keep first parameter of join() as os.getcwd(),
# do not change it to DfaClient.home. Otherwise, may break when
# multiple instances of DfaClient exist during program run.
DfaClient.home = os.path.join(os.getcwd(), path)
# If pickles don't exist at given location, default to "~".
if (not headers and not config and
(not os.path.exists(os.path.join(DfaClient.home,
DfaClient.auth_pkl_name)) or
not os.path.exists(os.path.join(DfaClient.home,
DfaClient.config_pkl_name)))):
DfaClient.home = os.path.expanduser('~')
else:
DfaClient.home = os.path.expanduser('~')
# Update location for both pickles.
DfaClient.auth_pkl = os.path.join(DfaClient.home,
DfaClient.auth_pkl_name)
DfaClient.config_pkl = os.path.join(DfaClient.home,
DfaClient.config_pkl_name)
# Only load from the pickle if config wasn't specified.
self._config = config or self.__LoadConfigValues()
self._config = self.__SetMissingDefaultConfigValues(self._config)
self._config['home'] = DfaClient.home
# Validate XML parser to use.
SanityCheck.ValidateConfigXmlParser(self._config['xml_parser'])
# Only load from the pickle if 'headers' wasn't specified.
if headers is None:
self._headers = self.__LoadAuthCredentials()
else:
if Utils.BoolTypeConvert(self._config['strict']):
SanityCheck.ValidateRequiredHeaders(headers, REQUIRED_SOAP_HEADERS)
self._headers = headers
# Initialize logger.
self.__logger = Logger(LIB_SIG, self._config['log_home'])
def __LoadAuthCredentials(self):
"""Load existing authentication credentials from dfa_api_auth.pkl.
Returns:
dict Dictionary object with populated authentication credentials.
"""
return super(DfaClient, self)._LoadAuthCredentials()
def __WriteUpdatedAuthValue(self, key, new_value):
"""Write updated authentication value for a key in dfa_api_auth.pkl.
Args:
key: str Key to update.
new_value: str New value to update the key with.
"""
super(DfaClient, self)._WriteUpdatedAuthValue(key, new_value)
def __LoadConfigValues(self):
"""Load existing configuration values from dfa_api_config.pkl.
Returns:
dict Dictionary object with populated configuration values.
"""
return super(DfaClient, self)._LoadConfigValues()
def __SetMissingDefaultConfigValues(self, config=None):
"""Set default configuration values for missing elements in the config dict.
Args:
config: dict Object with client configuration values.
Returns:
dictionary Configuration values with defaults added in.
"""
if config is None:
config = {}
config = super(DfaClient, self)._SetMissingDefaultConfigValues(config)
default_config = {
'home': DfaClient.home,
'log_home': os.path.join(DfaClient.home, 'logs')
}
for key in default_config:
if key not in config:
config[key] = default_config[key]
return config
def CallRawMethod(self, soap_message, url, server, http_proxy):
"""Call API method directly, using raw SOAP message.
For API calls performed with this method, outgoing data is not run through
library's validation logic.
Args:
soap_message: str SOAP XML message.
url: str URL of the API service for the method to call.
server: str API server to access for this API call.
http_proxy: str HTTP proxy to use for this API call.
Returns:
tuple Response from the API method (SOAP XML response message).
"""
service_name = DfaUtils.DetermineServiceFromUrl(url).capitalize()
service = getattr(self, 'Get' + service_name + 'Service')(
server=server, http_proxy=http_proxy)
return service.CallRawMethod(soap_message)
def GetService(self, service_name,
server='https://advertisersapi.doubleclick.net', version=None,
http_proxy=None, op_config=None):
"""Generic method to create a service.
Args:
service_name: str Name of the service to create.
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
op_config: dict Dictionary object with additional configuration values for
this operation.
Returns:
GenericDfaService New object representing the SOAP service.
"""
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
DfaSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
if op_config is None:
op_config = {
'server': server,
'version': version,
'http_proxy': http_proxy
}
return GenericDfaService(self._headers, self._config, op_config,
self.__lock, self.__logger, service_name)
def GetAdService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the ad service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('ad', server, version, http_proxy)
def GetAdvertiserService(self,
server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the advertiser service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('advertiser', server, version, http_proxy)
def GetAdvertiserGroupService(self,
server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the advertisergroup service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('advertisergroup', server, version, http_proxy)
def GetCampaignService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the campaign service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('campaign', server, version, http_proxy)
def GetChangeLogService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the changelog service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('changelog', server, version, http_proxy)
def GetContentCategoryService(self,
server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the contentcategory service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('contentcategory', server, version, http_proxy)
def GetCreativeService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the creative service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('creative', server, version, http_proxy)
def GetCreativeFieldService(self,
server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the creativefield service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('creativefield', server, version, http_proxy)
def GetCreativeGroupService(self,
server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the creativegroup service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('creativegroup', server, version, http_proxy)
def GetLoginService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the login service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('login', server, version, http_proxy)
def GetNetworkService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the network service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('network', server, version, http_proxy)
def GetPlacementService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the placement service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('placement', server, version, http_proxy)
def GetReportService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the report service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('report', server, version, http_proxy)
def GetSiteService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the site service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('site', server, version, http_proxy)
def GetSizeService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the size service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('size', server, version, http_proxy)
def GetSpotlightService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the spotlight service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('spotlight', server, version, http_proxy)
def GetStrategyService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the strategy service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('strategy', server, version, http_proxy)
def GetSubnetworkService(self,
server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the subnetwork service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('subnetwork', server, version, http_proxy)
def GetUserService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the user service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('user', server, version, http_proxy)
def GetUserRoleService(self, server='https://advertisersapi.doubleclick.net',
version=None, http_proxy=None):
"""Returns an object which can call methods in the userrole service.
Args:
[optional]
server: str API server this object will access. Possible values are:
'https://advertisersapi.doubleclick.net' for production,
'https://advertisersapitest.doubleclick.net' for test, and
'https://betaadvertisersapi.doubleclick.net' for beta.
The default behavior is to access the production environment.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericDfaService New object representing the SOAP service.
"""
return self.GetService('userrole', server, version, http_proxy)
|
|
# Copyright 2013 Metacloud, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Workflow Logic the Assignment service."""
import copy
import functools
import uuid
from oslo_config import cfg
from oslo_log import log
from six.moves import urllib
from keystone.assignment import schema
from keystone.common import controller
from keystone.common import dependency
from keystone.common import validation
from keystone import exception
from keystone.i18n import _, _LW
from keystone.models import token_model
from keystone import notifications
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@dependency.requires('assignment_api', 'identity_api', 'token_provider_api')
class TenantAssignment(controller.V2Controller):
"""The V2 Project APIs that are processing assignments."""
@controller.v2_deprecated
def get_projects_for_token(self, context, **kw):
"""Get valid tenants for token based on token used to authenticate.
Pulls the token from the context, validates it and gets the valid
tenants for the user in the token.
Doesn't care about token scopedness.
"""
try:
token_data = self.token_provider_api.validate_token(
context['token_id'])
token_ref = token_model.KeystoneToken(token_id=context['token_id'],
token_data=token_data)
except exception.NotFound as e:
LOG.warning(_LW('Authentication failed: %s'), e)
raise exception.Unauthorized(e)
tenant_refs = (
self.assignment_api.list_projects_for_user(token_ref.user_id))
tenant_refs = [self.filter_domain_id(ref) for ref in tenant_refs
if ref['domain_id'] == CONF.identity.default_domain_id]
params = {
'limit': context['query_string'].get('limit'),
'marker': context['query_string'].get('marker'),
}
return self.format_project_list(tenant_refs, **params)
@controller.v2_deprecated
def get_project_users(self, context, tenant_id, **kw):
self.assert_admin(context)
user_refs = []
user_ids = self.assignment_api.list_user_ids_for_project(tenant_id)
for user_id in user_ids:
try:
user_ref = self.identity_api.get_user(user_id)
except exception.UserNotFound:
# Log that user is missing and continue on.
message = ("User %(user_id)s in project %(project_id)s "
"doesn't exist.")
LOG.debug(message,
{'user_id': user_id, 'project_id': tenant_id})
else:
user_refs.append(self.v3_to_v2_user(user_ref))
return {'users': user_refs}
@dependency.requires('assignment_api', 'role_api')
class Role(controller.V2Controller):
"""The Role management APIs."""
@controller.v2_deprecated
def get_role(self, context, role_id):
self.assert_admin(context)
return {'role': self.role_api.get_role(role_id)}
@controller.v2_deprecated
def create_role(self, context, role):
role = self._normalize_dict(role)
self.assert_admin(context)
if 'name' not in role or not role['name']:
msg = _('Name field is required and cannot be empty')
raise exception.ValidationError(message=msg)
role_id = uuid.uuid4().hex
role['id'] = role_id
role_ref = self.role_api.create_role(role_id, role)
return {'role': role_ref}
@controller.v2_deprecated
def delete_role(self, context, role_id):
self.assert_admin(context)
self.role_api.delete_role(role_id)
@controller.v2_deprecated
def get_roles(self, context):
self.assert_admin(context)
return {'roles': self.role_api.list_roles()}
@dependency.requires('assignment_api', 'resource_api', 'role_api')
class RoleAssignmentV2(controller.V2Controller):
"""The V2 Role APIs that are processing assignments."""
# COMPAT(essex-3)
@controller.v2_deprecated
def get_user_roles(self, context, user_id, tenant_id=None):
"""Get the roles for a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
roles = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
return {'roles': [self.role_api.get_role(x)
for x in roles]}
@controller.v2_deprecated
def add_role_to_user(self, context, user_id, role_id, tenant_id=None):
"""Add a role to a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant_id required')
self.assignment_api.add_role_to_user_and_project(
user_id, tenant_id, role_id)
role_ref = self.role_api.get_role(role_id)
return {'role': role_ref}
@controller.v2_deprecated
def remove_role_from_user(self, context, user_id, role_id, tenant_id=None):
"""Remove a role from a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant_id required')
# This still has the weird legacy semantics that adding a role to
# a user also adds them to a tenant, so we must follow up on that
self.assignment_api.remove_role_from_user_and_project(
user_id, tenant_id, role_id)
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def get_role_refs(self, context, user_id):
"""Ultimate hack to get around having to make role_refs first-class.
This will basically iterate over the various roles the user has in
all tenants the user is a member of and create fake role_refs where
the id encodes the user-tenant-role information so we can look
up the appropriate data when we need to delete them.
"""
self.assert_admin(context)
tenants = self.assignment_api.list_projects_for_user(user_id)
o = []
for tenant in tenants:
# As a v2 call, we should limit the response to those projects in
# the default domain.
if tenant['domain_id'] != CONF.identity.default_domain_id:
continue
role_ids = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant['id'])
for role_id in role_ids:
ref = {'roleId': role_id,
'tenantId': tenant['id'],
'userId': user_id}
ref['id'] = urllib.parse.urlencode(ref)
o.append(ref)
return {'roles': o}
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def create_role_ref(self, context, user_id, role):
"""This is actually used for adding a user to a tenant.
In the legacy data model adding a user to a tenant required setting
a role.
"""
self.assert_admin(context)
# TODO(termie): for now we're ignoring the actual role
tenant_id = role.get('tenantId')
role_id = role.get('roleId')
self.assignment_api.add_role_to_user_and_project(
user_id, tenant_id, role_id)
role_ref = self.role_api.get_role(role_id)
return {'role': role_ref}
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def delete_role_ref(self, context, user_id, role_ref_id):
"""This is actually used for deleting a user from a tenant.
In the legacy data model removing a user from a tenant required
deleting a role.
To emulate this, we encode the tenant and role in the role_ref_id,
and if this happens to be the last role for the user-tenant pair,
we remove the user from the tenant.
"""
self.assert_admin(context)
# TODO(termie): for now we're ignoring the actual role
role_ref_ref = urllib.parse.parse_qs(role_ref_id)
tenant_id = role_ref_ref.get('tenantId')[0]
role_id = role_ref_ref.get('roleId')[0]
self.assignment_api.remove_role_from_user_and_project(
user_id, tenant_id, role_id)
@dependency.requires('assignment_api', 'resource_api')
class ProjectAssignmentV3(controller.V3Controller):
"""The V3 Project APIs that are processing assignments."""
collection_name = 'projects'
member_name = 'project'
def __init__(self):
super(ProjectAssignmentV3, self).__init__()
self.get_member_from_driver = self.resource_api.get_project
@controller.filterprotected('enabled', 'name')
def list_user_projects(self, context, filters, user_id):
hints = ProjectAssignmentV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_projects_for_user(user_id,
hints=hints)
return ProjectAssignmentV3.wrap_collection(context, refs, hints=hints)
@dependency.requires('role_api')
class RoleV3(controller.V3Controller):
"""The V3 Role CRUD APIs."""
collection_name = 'roles'
member_name = 'role'
def __init__(self):
super(RoleV3, self).__init__()
self.get_member_from_driver = self.role_api.get_role
@controller.protected()
@validation.validated(schema.role_create, 'role')
def create_role(self, context, role):
ref = self._assign_unique_id(self._normalize_dict(role))
initiator = notifications._get_request_audit_info(context)
ref = self.role_api.create_role(ref['id'], ref, initiator)
return RoleV3.wrap_member(context, ref)
@controller.filterprotected('name')
def list_roles(self, context, filters):
hints = RoleV3.build_driver_hints(context, filters)
refs = self.role_api.list_roles(
hints=hints)
return RoleV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_role(self, context, role_id):
ref = self.role_api.get_role(role_id)
return RoleV3.wrap_member(context, ref)
@controller.protected()
@validation.validated(schema.role_update, 'role')
def update_role(self, context, role_id, role):
self._require_matching_id(role_id, role)
initiator = notifications._get_request_audit_info(context)
ref = self.role_api.update_role(role_id, role, initiator)
return RoleV3.wrap_member(context, ref)
@controller.protected()
def delete_role(self, context, role_id):
initiator = notifications._get_request_audit_info(context)
self.role_api.delete_role(role_id, initiator)
@dependency.requires('assignment_api', 'identity_api', 'resource_api',
'role_api')
class GrantAssignmentV3(controller.V3Controller):
"""The V3 Grant Assignment APIs."""
collection_name = 'roles'
member_name = 'role'
def __init__(self):
super(GrantAssignmentV3, self).__init__()
self.get_member_from_driver = self.role_api.get_role
def _require_domain_xor_project(self, domain_id, project_id):
if domain_id and project_id:
msg = _('Specify a domain or project, not both')
raise exception.ValidationError(msg)
if not domain_id and not project_id:
msg = _('Specify one of domain or project')
raise exception.ValidationError(msg)
def _require_user_xor_group(self, user_id, group_id):
if user_id and group_id:
msg = _('Specify a user or group, not both')
raise exception.ValidationError(msg)
if not user_id and not group_id:
msg = _('Specify one of user or group')
raise exception.ValidationError(msg)
def _check_if_inherited(self, context):
return (CONF.os_inherit.enabled and
context['path'].startswith('/OS-INHERIT') and
context['path'].endswith('/inherited_to_projects'))
def _check_grant_protection(self, context, protection, role_id=None,
user_id=None, group_id=None,
domain_id=None, project_id=None,
allow_no_user=False):
"""Check protection for role grant APIs.
The policy rule might want to inspect attributes of any of the entities
involved in the grant. So we get these and pass them to the
check_protection() handler in the controller.
"""
ref = {}
if role_id:
ref['role'] = self.role_api.get_role(role_id)
if user_id:
try:
ref['user'] = self.identity_api.get_user(user_id)
except exception.UserNotFound:
if not allow_no_user:
raise
else:
ref['group'] = self.identity_api.get_group(group_id)
if domain_id:
ref['domain'] = self.resource_api.get_domain(domain_id)
else:
ref['project'] = self.resource_api.get_project(project_id)
self.check_protection(context, protection, ref)
@controller.protected(callback=_check_grant_protection)
def create_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Grants a role to a user or group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
self.assignment_api.create_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context), context)
@controller.protected(callback=_check_grant_protection)
def list_grants(self, context, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Lists roles granted to user/group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
refs = self.assignment_api.list_grants(
user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
return GrantAssignmentV3.wrap_collection(context, refs)
@controller.protected(callback=_check_grant_protection)
def check_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Checks if a role has been granted on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
self.assignment_api.get_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
# NOTE(lbragstad): This will allow users to clean up role assignments
# from the backend in the event the user was removed prior to the role
# assignment being removed.
@controller.protected(callback=functools.partial(
_check_grant_protection, allow_no_user=True))
def revoke_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Revokes a role from user/group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
self.assignment_api.delete_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context), context)
@dependency.requires('assignment_api', 'identity_api', 'resource_api')
class RoleAssignmentV3(controller.V3Controller):
"""The V3 Role Assignment APIs, really just list_role_assignment()."""
# TODO(henry-nash): The current implementation does not provide a full
# first class entity for role-assignment. There is no role_assignment_id
# and only the list_role_assignment call is supported. Further, since it
# is not a first class entity, the links for the individual entities
# reference the individual role grant APIs.
collection_name = 'role_assignments'
member_name = 'role_assignment'
@classmethod
def wrap_member(cls, context, ref):
# NOTE(henry-nash): Since we are not yet a true collection, we override
# the wrapper as have already included the links in the entities
pass
def _format_entity(self, context, entity):
"""Format an assignment entity for API response.
The driver layer returns entities as dicts containing the ids of the
actor (e.g. user or group), target (e.g. domain or project) and role.
If it is an inherited role, then this is also indicated. Examples:
{'user_id': user_id,
'project_id': domain_id,
'role_id': role_id}
or, for an inherited role:
{'user_id': user_id,
'domain_id': domain_id,
'role_id': role_id,
'inherited_to_projects': true}
This function maps this into the format to be returned via the API,
e.g. for the second example above:
{
'user': {
{'id': user_id}
},
'scope': {
'domain': {
{'id': domain_id}
},
'OS-INHERIT:inherited_to': 'projects
},
'role': {
{'id': role_id}
},
'links': {
'assignment': '/domains/domain_id/users/user_id/roles/'
'role_id/inherited_to_projects'
}
}
"""
formatted_entity = {}
suffix = ""
if 'user_id' in entity:
formatted_entity['user'] = {'id': entity['user_id']}
actor_link = 'users/%s' % entity['user_id']
if 'group_id' in entity:
formatted_entity['group'] = {'id': entity['group_id']}
actor_link = 'groups/%s' % entity['group_id']
if 'role_id' in entity:
formatted_entity['role'] = {'id': entity['role_id']}
if 'project_id' in entity:
formatted_entity['scope'] = (
{'project': {'id': entity['project_id']}})
if 'inherited_to_projects' in entity:
formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
'projects')
target_link = '/OS-INHERIT/projects/%s' % entity['project_id']
suffix = '/inherited_to_projects'
else:
target_link = '/projects/%s' % entity['project_id']
if 'domain_id' in entity:
formatted_entity['scope'] = (
{'domain': {'id': entity['domain_id']}})
if 'inherited_to_projects' in entity:
formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
'projects')
target_link = '/OS-INHERIT/domains/%s' % entity['domain_id']
suffix = '/inherited_to_projects'
else:
target_link = '/domains/%s' % entity['domain_id']
formatted_entity.setdefault('links', {})
path = '%(target)s/%(actor)s/roles/%(role)s%(suffix)s' % {
'target': target_link,
'actor': actor_link,
'role': entity['role_id'],
'suffix': suffix}
formatted_entity['links']['assignment'] = self.base_url(context, path)
return formatted_entity
def _expand_indirect_assignments(self, context, refs):
"""Processes entity list into all-direct assignments.
For any group role assignments in the list, create a role assignment
entity for each member of that group, and then remove the group
assignment entity itself from the list.
If the OS-INHERIT extension is enabled, then honor any inherited
roles on the domain by creating the equivalent on all projects
owned by the domain.
For any new entity created by virtue of group membership, add in an
additional link to that membership.
"""
def _get_group_members(ref):
"""Get a list of group members.
Get the list of group members. If this fails with
GroupNotFound, then log this as a warning, but allow
overall processing to continue.
"""
try:
members = self.identity_api.list_users_in_group(
ref['group']['id'])
except exception.GroupNotFound:
members = []
# The group is missing, which should not happen since
# group deletion should remove any related assignments, so
# log a warning
target = 'Unknown'
# Should always be a domain or project, but since to get
# here things have gone astray, let's be cautious.
if 'scope' in ref:
if 'domain' in ref['scope']:
dom_id = ref['scope']['domain'].get('id', 'Unknown')
target = 'Domain: %s' % dom_id
elif 'project' in ref['scope']:
proj_id = ref['scope']['project'].get('id', 'Unknown')
target = 'Project: %s' % proj_id
role_id = 'Unknown'
if 'role' in ref and 'id' in ref['role']:
role_id = ref['role']['id']
LOG.warning(
_LW('Group %(group)s not found for role-assignment - '
'%(target)s with Role: %(role)s'), {
'group': ref['group']['id'], 'target': target,
'role': role_id})
return members
def _build_user_assignment_equivalent_of_group(
user, group_id, template):
"""Create a user assignment equivalent to the group one.
The template has had the 'group' entity removed, so
substitute a 'user' one. The 'assignment' link stays as it is,
referring to the group assignment that led to this role.
A 'membership' link is added that refers to this particular
user's membership of this group.
"""
user_entry = copy.deepcopy(template)
user_entry['user'] = {'id': user['id']}
user_entry['links']['membership'] = (
self.base_url(context, '/groups/%s/users/%s' %
(group_id, user['id'])))
return user_entry
def _build_project_equivalent_of_user_target_role(
project_id, target_id, target_type, template):
"""Create a user project assignment equivalent to the domain one.
The template has had the 'domain' entity removed, so
substitute a 'project' one, modifying the 'assignment' link
to match.
"""
project_entry = copy.deepcopy(template)
project_entry['scope']['project'] = {'id': project_id}
project_entry['links']['assignment'] = (
self.base_url(
context,
'/OS-INHERIT/%s/%s/users/%s/roles/%s'
'/inherited_to_projects' % (
target_type, target_id, project_entry['user']['id'],
project_entry['role']['id'])))
return project_entry
def _build_project_equivalent_of_group_target_role(
user_id, group_id, project_id,
target_id, target_type, template):
"""Create a user project equivalent to the domain group one.
The template has had the 'domain' and 'group' entities removed, so
substitute a 'user-project' one, modifying the 'assignment' link
to match.
"""
project_entry = copy.deepcopy(template)
project_entry['user'] = {'id': user_id}
project_entry['scope']['project'] = {'id': project_id}
project_entry['links']['assignment'] = (
self.base_url(context,
'/OS-INHERIT/%s/%s/groups/%s/roles/%s'
'/inherited_to_projects' % (
target_type, target_id, group_id,
project_entry['role']['id'])))
project_entry['links']['membership'] = (
self.base_url(context, '/groups/%s/users/%s' %
(group_id, user_id)))
return project_entry
# Scan the list of entities for any assignments that need to be
# expanded.
#
# If the OS-INERIT extension is enabled, the refs lists may
# contain roles to be inherited from domain to project, so expand
# these as well into project equivalents
#
# For any regular group entries, expand these into user entries based
# on membership of that group.
#
# Due to the potentially large expansions, rather than modify the
# list we are enumerating, we build a new one as we go.
#
new_refs = []
for r in refs:
if 'OS-INHERIT:inherited_to' in r['scope']:
if 'domain' in r['scope']:
# It's an inherited domain role - so get the list of
# projects owned by this domain.
project_ids = (
[x['id'] for x in
self.resource_api.list_projects_in_domain(
r['scope']['domain']['id'])])
base_entry = copy.deepcopy(r)
target_type = 'domains'
target_id = base_entry['scope']['domain']['id']
base_entry['scope'].pop('domain')
else:
# It's an inherited project role - so get the list of
# projects in this project subtree.
project_id = r['scope']['project']['id']
project_ids = (
[x['id'] for x in
self.resource_api.list_projects_in_subtree(
project_id)])
base_entry = copy.deepcopy(r)
target_type = 'projects'
target_id = base_entry['scope']['project']['id']
base_entry['scope'].pop('project')
# For each project, create an equivalent role assignment
for p in project_ids:
# If it's a group assignment, then create equivalent user
# roles based on membership of the group
if 'group' in base_entry:
members = _get_group_members(base_entry)
sub_entry = copy.deepcopy(base_entry)
group_id = sub_entry['group']['id']
sub_entry.pop('group')
for m in members:
new_entry = (
_build_project_equivalent_of_group_target_role(
m['id'], group_id, p,
target_id, target_type, sub_entry))
new_refs.append(new_entry)
else:
new_entry = (
_build_project_equivalent_of_user_target_role(
p, target_id, target_type, base_entry))
new_refs.append(new_entry)
elif 'group' in r:
# It's a non-inherited group role assignment, so get the list
# of members.
members = _get_group_members(r)
# Now replace that group role assignment entry with an
# equivalent user role assignment for each of the group members
base_entry = copy.deepcopy(r)
group_id = base_entry['group']['id']
base_entry.pop('group')
for m in members:
user_entry = _build_user_assignment_equivalent_of_group(
m, group_id, base_entry)
new_refs.append(user_entry)
else:
new_refs.append(r)
return new_refs
def _filter_inherited(self, entry):
if ('inherited_to_projects' in entry and
not CONF.os_inherit.enabled):
return False
else:
return True
def _assert_effective_filters(self, inherited, group, domain):
"""Assert that useless filter combinations are avoided.
In effective mode, the following filter combinations are useless, since
they would always return an empty list of role assignments:
- group id, since no group assignment is returned in effective mode;
- domain id and inherited, since no domain inherited assignment is
returned in effective mode.
"""
if group:
msg = _('Combining effective and group filter will always '
'result in an empty list.')
raise exception.ValidationError(msg)
if inherited and domain:
msg = _('Combining effective, domain and inherited filters will '
'always result in an empty list.')
raise exception.ValidationError(msg)
def _assert_domain_nand_project(self, domain_id, project_id):
if domain_id and project_id:
msg = _('Specify a domain or project, not both')
raise exception.ValidationError(msg)
def _assert_user_nand_group(self, user_id, group_id):
if user_id and group_id:
msg = _('Specify a user or group, not both')
raise exception.ValidationError(msg)
@controller.filterprotected('group.id', 'role.id',
'scope.domain.id', 'scope.project.id',
'scope.OS-INHERIT:inherited_to', 'user.id')
def list_role_assignments(self, context, filters):
# TODO(henry-nash): This implementation uses the standard filtering
# in the V3.wrap_collection. Given the large number of individual
# assignments, this is pretty inefficient. An alternative would be
# to pass the filters into the driver call, so that the list size is
# kept a minimum.
params = context['query_string']
effective = 'effective' in params and (
self.query_filter_is_true(params['effective']))
if 'scope.OS-INHERIT:inherited_to' in params:
inherited = (
params['scope.OS-INHERIT:inherited_to'] == 'projects')
else:
# None means querying both inherited and direct assignments
inherited = None
self._assert_domain_nand_project(params.get('scope.domain.id'),
params.get('scope.project.id'))
self._assert_user_nand_group(params.get('user.id'),
params.get('group.id'))
if effective:
self._assert_effective_filters(inherited=inherited,
group=params.get('group.id'),
domain=params.get(
'scope.domain.id'))
hints = self.build_driver_hints(context, filters)
refs = self.assignment_api.list_role_assignments()
formatted_refs = (
[self._format_entity(context, x) for x in refs
if self._filter_inherited(x)])
if effective:
formatted_refs = self._expand_indirect_assignments(context,
formatted_refs)
return self.wrap_collection(context, formatted_refs, hints=hints)
@controller.protected()
def get_role_assignment(self, context):
raise exception.NotImplemented()
@controller.protected()
def update_role_assignment(self, context):
raise exception.NotImplemented()
@controller.protected()
def delete_role_assignment(self, context):
raise exception.NotImplemented()
|
|
from __future__ import absolute_import
import datetime
from xml_models import xpath_finder
from xml_models.managers import ModelManager
from dateutil.parser import parse as date_parser
from lxml import etree
# pylint: disable=too-few-public-methods
# Fields only need one public method
from xml_models.xpath_finder import MultipleNodesReturnedException
class BaseField:
"""
Base class for Fields. Should not be used directly
"""
def __init__(self, **kw):
"""
All fields must specify an ``xpath`` as a keyword argument in their constructor. Fields may optionally specify
a default value using the ``default`` keyword argument.
:raises AttributeError: if xpath attribute is empty
"""
if 'xpath' not in kw:
raise AttributeError('No XPath supplied for xml field')
self.xpath = kw['xpath']
self._default = kw.pop('default', None)
def _fetch_by_xpath(self, xml_doc, namespace):
find = xpath_finder.find_unique(xml_doc, self.xpath, namespace)
if find is None:
return self._default
return find
class CharField(BaseField):
"""
Returns the single value found by the xpath expression, as a string
"""
def parse(self, xml, namespace):
"""
:param xml: the etree.Element to search in
:param namespace: not used yet
:rtype: string
"""
return self._fetch_by_xpath(xml, namespace)
class IntField(BaseField):
"""
Returns the single value found by the xpath expression, as an int
"""
def parse(self, xml, namespace):
"""
:param xml: the etree.Element to search in
:param namespace: not used yet
:rtype: DateTime, may be timezone aware or naive
"""
value = self._fetch_by_xpath(xml, namespace)
if value:
return int(value)
return self._default
class DateField(BaseField):
"""
Returns the single value found by the xpath expression, as a ``datetime``.
By default, expects dates that match the ISO8601 date format. If a ``date_format`` keyword
arg is supplied, that will be used instead. ``date_format`` should conform to ``strptime`` formatting options.
If the XML contains UTC offsets then a timezone aware datetime object will be returned.
"""
def __init__(self, date_format=None, **kw):
BaseField.__init__(self, **kw)
self.date_format = date_format
def parse(self, xml, namespace):
"""
:param xml: the etree.Element to search in
:param namespace: not used yet
:rtype: DateTime, may be timezone aware or naive
"""
value = self._fetch_by_xpath(xml, namespace)
if value:
if self.date_format:
return datetime.datetime.strptime(value, self.date_format)
return date_parser(value)
return self._default
class FloatField(BaseField):
"""
Returns the single value found by the xpath expression, as a float
"""
def parse(self, xml, namespace):
"""
:param xml: the etree.Element to search in
:param namespace: not used yet
:rtype: float
"""
value = self._fetch_by_xpath(xml, namespace)
if value:
return float(value)
return self._default
class BoolField(BaseField):
"""
Returns the single value found by the xpath expression, as a boolean
"""
def parse(self, xml, namespace):
"""
Recognises any-case TRUE or FALSE only i.e. wont parse 0 as False or 1 as True etc.
:param xml: the etree.Element to search in
:param namespace: not used yet
:rtype: Bool
"""
value = self._fetch_by_xpath(xml, namespace)
if value is not None:
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
return self._default
class CollectionField(BaseField):
"""
Returns a collection found by the xpath expression.
Requires a field_type to be supplied, which can either be a field type, e.g. :class:`IntField`, which returns a
collection ints, or it can be a :class:`Model` type e.g. Person may contain a collection of Address objects.
"""
def __init__(self, field_type, order_by=None, **kw):
"""
:param field_type: class to cast to. Should be a subclass of :class:`BaseField` or :class:`Model`
:param order_by: the attribute in ``field_type`` to order the collection on. Asc only
"""
self.field_type = field_type
self.order_by = order_by
BaseField.__init__(self, **kw)
def parse(self, xml, namespace):
"""
Find all nodes matching the xpath expression and create objects from each the matched node.
If ``order_by`` has been defined then the resulting list will be ordered.
:param xml: the etree.Element to search in
:param namespace: not used yet
:rtype: as defined by ``self.field_type``
"""
matches = xpath_finder.find_all(xml, self.xpath, namespace)
if BaseField not in self.field_type.__bases__:
results = [self.field_type(xml=match) for match in matches]
else:
field = self.field_type(xpath='.')
results = [field.parse(xpath_finder.domify(match), namespace) for match in matches]
if self.order_by:
from operator import attrgetter
results.sort(key=attrgetter(self.order_by))
return results
class OneToOneField(BaseField):
"""
Returns a subclass of :class:`Model` from the xpath expression.
"""
def __init__(self, field_type, **kw):
"""
:param field_type: class to cast to. Should be a subclass of :class:`BaseField` or :class:`Model`
"""
self.field_type = field_type
BaseField.__init__(self, **kw)
def parse(self, xml, namespace):
"""
:param xml: the etree.Element to search in
:param namespace: not used yet
:rtype: as defined by ``self.field_type``
"""
match = xpath_finder.find_all(xml, self.xpath, namespace)
if len(match) > 1:
raise MultipleNodesReturnedException
if len(match) == 1:
return self.field_type(xml=match[0])
return self._default
class ModelBase(type):
"""
Meta class for declarative xml_model building
"""
def __new__(mcs, name, bases, attrs):
new_class = super(ModelBase, mcs).__new__(mcs, name, bases, attrs)
xml_fields = [field_name for field_name in attrs.keys() if isinstance(attrs[field_name], BaseField)]
setattr(new_class, 'xml_fields', xml_fields)
for field_name in xml_fields:
setattr(new_class, field_name, new_class._get_xpath(attrs[field_name]))
attrs[field_name]._name = field_name
if "finders" in attrs:
setattr(new_class, "objects", ModelManager(new_class, attrs["finders"]))
else:
setattr(new_class, "objects", ModelManager(new_class, {}))
if "headers" in attrs:
setattr(new_class.objects, "headers", attrs["headers"])
return new_class
def _get_xpath(cls, field_impl):
return property(fget=lambda cls: cls._parse_field(field_impl),
fset=lambda cls, value: cls._set_value(field_impl, value))
from future.utils import with_metaclass
class Model(with_metaclass(ModelBase)):
"""
A model is a representation of the XML source, consisting of a number of Fields. It can be constructed with
either an xml string, or an :class:`etree.Element`.
:Example:
.. code-block:: python
class Person(xml_models.Model):
namespace="urn:my.default.namespace"
name = xml_models.CharField(xpath"/Person/@Name", default="John")
nicknames = xml_models.CollectionField(CharField, xpath="/Person/Nicknames/Name")
addresses = xml_models.CollectionField(Address, xpath="/Person/Addresses/Address")
date_of_birth = xml_models.DateField(xpath="/Person/@DateOfBirth", date_format="%d-%m-%Y")
If you define :ref:`finders` on your model you will also be able to retreive models from an API endpoint using
a familiar Django-esque object manager style of access with chainable filtering etc.
"""
def __init__(self, xml=None, dom=None):
self._xml = xml
self._dom = dom
self._cache = {}
self.validate_on_load()
def validate_on_load(self):
"""
Perform validation when the model is instantiated.
Override on your model to perform validation when the XML data is first passed in.
.. note:: You will need to raise appropriate exceptions as no checking of the return value occurs
"""
pass
def to_tree(self):
"""
:class:`etree.Element` representation of :class:`Model`
:rtype: :class:`lxml.etree.Element`
"""
for field in self._cache:
self._update_field(field)
return self._get_tree()
def to_xml(self, pretty=False):
"""
XML representation of Model
:rtype: string
"""
return etree.tostring(self.to_tree(), pretty_print=pretty).decode('UTF-8')
def _update_attribute(self, field):
"""
Update the value of an attribute field.
Assumes simple data type in the attribute that can be cast to string
:param field: field to update
"""
parts = field.xpath.split('/')
xpath = "/".join(parts[:-1]) # I think it is safe to assume attributes are in the last place
attr = parts[-1].replace('@', '')
self._get_tree().xpath(xpath)[0].attrib[attr] = str(getattr(self, field._name))
def _update_subtree(self, field):
"""
Replace a whole subtree
:param field: Model field with `to_tree`
"""
new_tree = getattr(self, field._name).to_tree()
old_tree = self._get_tree().xpath(field.xpath)[0]
self._get_tree().replace(old_tree, new_tree)
def _create_from_xpath(self, xpath, tree, value=None, extra_root_name=None):
"""
Generates XML under `tree` that will satisfy `xpath`. Will pre-populate `value` if given
:param xpath: simple xpath only. Does not handle attributes, indexing etc.
:param tree: parent tree
:param value:
:param value:
:param extra_root_name: extra root node added for helping generating xml
:return: Element node
"""
# not handling attribute
parts = [x for x in xpath.split('/') if x != '' and x[0] != '@']
xpath = '' if extra_root_name is None else '/' + extra_root_name
for part in parts[:-1]: # save the last node
xpath += '/' + part
nodes = tree.xpath(xpath)
if not nodes:
node = etree.XML("<%s/>" % part)
tree.append(node)
tree = node
else:
tree = nodes[0]
# now we create the missing last node
node = etree.XML("<%s/>" % parts[-1])
tree.append(node)
if value:
node.text = str(value)
return node
def _update_collection(self, field):
"""
Update _dom with all the items in a CollectionField value
:param field: CollectionField
"""
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
new_values = getattr(self, field._name)
old_values = self._get_tree().xpath(field.xpath)
collection_xpath = "/".join(field.xpath.split('/')[:-1])
collection_node = self._get_tree().xpath(collection_xpath)[0]
for old, new in zip_longest(old_values, new_values):
if not new:
old.getparent().remove(old)
continue
if isinstance(field.field_type, ModelBase):
xml = etree.fromstring(new.to_xml())
if old is None:
collection_node.append(xml)
else:
collection_node.replace(old, xml)
continue
if old is None:
self._create_from_xpath(field.xpath, self._get_tree(), new)
else:
old.text = new
def _update_field(self, field):
"""
Update _dom with value from field
:param field: BaseField
:return:
"""
if '@' in field.xpath:
self._update_attribute(field)
elif isinstance(field, CollectionField):
self._update_collection(field)
elif isinstance(field, OneToOneField):
self._update_subtree(field)
else:
node = self._get_tree().xpath(field.xpath)
value = str(getattr(self, field._name))
if node:
node[0].text = value
else:
self._create_from_xpath(field.xpath, self._get_tree(), value)
def _get_tree(self):
if self._dom is None:
self._dom = xpath_finder.domify(self._get_xml())
return self._dom
def _get_xml(self):
if not self._xml:
# create a fake root node that will get stripped off later
tree = etree.Element('RrootR')
for field in self._cache:
self._create_from_xpath(field.xpath, tree, extra_root_name='RrootR')
self._xml = etree.tostring(tree[0])
return self._xml
def _set_value(self, field, value):
self._cache[field] = value
def _parse_field(self, field):
if field not in self._cache:
namespace = getattr(self, 'namespace', None)
self._cache[field] = field.parse(self._get_tree(), namespace)
return self._cache[field]
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from ..decorators import requires_auth
from ..exceptions import error_for
from ..models import GitHubCore
from .. import utils
from uritemplate import URITemplate
class Release(GitHubCore):
"""The :class:`Release <Release>` object.
It holds the information GitHub returns about a release from a
:class:`Repository <github3.repos.repo.Repository>`.
"""
CUSTOM_HEADERS = {'Accept': 'application/vnd.github.manifold-preview'}
def _update_attributes(self, release):
self._api = release.get('url')
#: List of :class:`Asset <Asset>` objects for this release
self.original_assets = [
Asset(i, self) for i in release.get('assets', [])
]
#: URL for uploaded assets
self.assets_url = release.get('assets_url')
#: Body of the release (the description)
self.body = release.get('body')
#: Date the release was created
self.created_at = self._strptime(release.get('created_at'))
#: Boolean whether value is True or False
self.draft = release.get('draft')
#: HTML URL of the release
self.html_url = release.get('html_url')
#: GitHub id
self.id = release.get('id')
#: Name given to the release
self.name = release.get('name')
#: Boolean whether release is a prerelease
self.prerelease = release.get('prerelease')
#: Date the release was published
self.published_at = self._strptime(release.get('published_at'))
#: Name of the tag
self.tag_name = release.get('tag_name')
#: URL to download a tarball of the release
self.tarball_url = release.get('tarball_url')
#: "Commit" that this release targets
self.target_commitish = release.get('target_commitish')
upload_url = release.get('upload_url')
#: URITemplate to upload an asset with
self.upload_urlt = URITemplate(upload_url) if upload_url else None
#: URL to download a zipball of the release
self.zipball_url = release.get('zipball_url')
def _repr(self):
return '<Release [{0}]>'.format(self.name)
def archive(self, format, path=''):
"""Get the tarball or zipball archive for this release.
:param str format: (required), accepted values: ('tarball',
'zipball')
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:returns: bool -- True if successful, False otherwise
"""
resp = None
if format in ('tarball', 'zipball'):
repo_url = self._api[:self._api.rfind('/releases')]
url = self._build_url(format, self.tag_name, base_url=repo_url)
resp = self._get(url, allow_redirects=True, stream=True)
if resp and self._boolean(resp, 200, 404):
utils.stream_response_to_file(resp, path)
return True
return False
def asset(self, asset_id):
"""Retrieve the asset from this release with ``asset_id``.
:param int asset_id: ID of the Asset to retrieve
:returns: :class:`~github3.repos.release.Asset`
"""
json = None
if int(asset_id) > 0:
i = self._api.rfind('/')
url = self._build_url('assets', str(asset_id),
base_url=self._api[:i])
json = self._json(self._get(url), 200)
return self._instance_or_null(Asset, json)
def assets(self, number=-1, etag=None):
"""Iterate over the assets available for this release.
:param int number: (optional), Number of assets to return
:param str etag: (optional), last ETag header sent
:returns: generator of :class:`Asset <Asset>` objects
"""
url = self._build_url('assets', base_url=self._api)
return self._iter(number, url, Asset, etag=etag)
@requires_auth
def delete(self):
"""Users with push access to the repository can delete a release.
:returns: True if successful; False if not successful
"""
url = self._api
return self._boolean(
self._delete(url, headers=Release.CUSTOM_HEADERS),
204,
404
)
@requires_auth
def edit(self, tag_name=None, target_commitish=None, name=None, body=None,
draft=None, prerelease=None):
"""Users with push access to the repository can edit a release.
If the edit is successful, this object will update itself.
:param str tag_name: (optional), Name of the tag to use
:param str target_commitish: (optional), The "commitish" value that
determines where the Git tag is created from. Defaults to the
repository's default branch.
:param str name: (optional), Name of the release
:param str body: (optional), Description of the release
:param boolean draft: (optional), True => Release is a draft
:param boolean prerelease: (optional), True => Release is a prerelease
:returns: True if successful; False if not successful
"""
url = self._api
data = {
'tag_name': tag_name,
'target_commitish': target_commitish,
'name': name,
'body': body,
'draft': draft,
'prerelease': prerelease,
}
self._remove_none(data)
r = self.session.patch(
url, data=json.dumps(data), headers=Release.CUSTOM_HEADERS
)
successful = self._boolean(r, 200, 404)
if successful:
# If the edit was successful, let's update the object.
self._update_attributes(r.json())
return successful
@requires_auth
def upload_asset(self, content_type, name, asset, label=None):
"""Upload an asset to this release.
All parameters are required.
:param str content_type: The content type of the asset. Wikipedia has
a list of common media types
:param str name: The name of the file
:param asset: The file or bytes object to upload.
:param label: (optional), An alternate short description of the asset.
:returns: :class:`Asset <Asset>`
"""
headers = {'Content-Type': content_type}
params = {'name': name, 'label': label}
self._remove_none(params)
url = self.upload_urlt.expand(params)
r = self._post(url, data=asset, json=False, headers=headers)
if r.status_code in (201, 202):
return Asset(r.json(), self)
raise error_for(r)
class Asset(GitHubCore):
def _update_attributes(self, asset):
self._api = asset.get('url')
#: Content-Type provided when the asset was created
self.content_type = asset.get('content_type')
#: Date the asset was created
self.created_at = self._strptime(asset.get('created_at'))
#: Number of times the asset was downloaded
self.download_count = asset.get('download_count')
#: URL to download the asset.
#: Request headers must include ``Accept: application/octet-stream``.
self.download_url = self._api
# User friendly download URL
self.browser_download_url = asset.get('browser_download_url')
#: GitHub id of the asset
self.id = asset.get('id')
#: Short description of the asset
self.label = asset.get('label')
#: Name of the asset
self.name = asset.get('name')
#: Size of the asset
self.size = asset.get('size')
#: State of the asset, e.g., "uploaded"
self.state = asset.get('state')
#: Date the asset was updated
self.updated_at = self._strptime(asset.get('updated_at'))
def _repr(self):
return '<Asset [{0}]>'.format(self.name)
def download(self, path=''):
"""Download the data for this asset.
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:returns: name of the file, if successful otherwise ``None``
:rtype: str
"""
headers = {
'Accept': 'application/octet-stream'
}
resp = self._get(self._api, allow_redirects=False, stream=True,
headers=headers)
if resp.status_code == 302:
# Amazon S3 will reject the redirected request unless we omit
# certain request headers
headers.update({
'Content-Type': None,
})
with self.session.no_auth():
resp = self._get(resp.headers['location'], stream=True,
headers=headers)
if self._boolean(resp, 200, 404):
return utils.stream_response_to_file(resp, path)
return None
@requires_auth
def delete(self):
"""Delete this asset if the user has push access.
:returns: True if successful; False if not successful
:rtype: boolean
"""
url = self._api
return self._boolean(
self._delete(url, headers=Release.CUSTOM_HEADERS),
204,
404
)
def edit(self, name, label=None):
"""Edit this asset.
:param str name: (required), The file name of the asset
:param str label: (optional), An alternate description of the asset
:returns: boolean
"""
if not name:
return False
edit_data = {'name': name, 'label': label}
self._remove_none(edit_data)
r = self._patch(
self._api,
data=json.dumps(edit_data),
headers=Release.CUSTOM_HEADERS
)
successful = self._boolean(r, 200, 404)
if successful:
self._update_attributes(r.json())
return successful
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import pipes
import threading
from dataclasses import dataclass, asdict, field
import logging
import sqlite3
import time
@dataclass
class JobInfo:
"""
A class for ota job information
"""
id: str
target: str
incremental: str = ''
verbose: bool = False
partial: list[str] = field(default_factory=list)
output: str = ''
status: str = 'Running'
downgrade: bool = False
extra: str = ''
stdout: str = ''
stderr: str = ''
start_time: int = 0
finish_time: int = 0
isPartial: bool = False
isIncremental: bool = False
@property
def is_running(self):
return self.status == 'Running'
@property
def is_killed(self):
return self.status == 'Killed'
def __post_init__(self):
def enforce_bool(t): return t if isinstance(t, bool) else bool(t)
self.verbose, self.downgrade = map(
enforce_bool,
[self.verbose, self.downgrade])
if self.incremental:
self.isIncremental = True
if self.partial:
self.isPartial = True
else:
self.partial = []
if type(self.partial) == str:
self.partial = self.partial.split(',')
def to_sql_form_dict(self):
"""
Convert this instance to a dict, which can be later used to insert into
the SQL database.
Format:
id: string, target: string, incremental: string, verbose: int,
partial: string, output:string, status:string,
downgrade: bool, extra: string, stdout: string, stderr:string,
start_time:int, finish_time: int(not required)
"""
sql_form_dict = asdict(self)
sql_form_dict['partial'] = ','.join(sql_form_dict['partial'])
def bool_to_int(t): return 1 if t else 0
sql_form_dict['verbose'], sql_form_dict['downgrade'] = map(
bool_to_int,
[sql_form_dict['verbose'], sql_form_dict['downgrade']])
return sql_form_dict
def to_dict_basic(self):
"""
Convert the instance to a dict, which includes the file name of target.
"""
basic_info = asdict(self)
basic_info['target_name'] = self.target.split('/')[-1]
if self.isIncremental:
basic_info['incremental_name'] = self.incremental.split('/')[-1]
return basic_info
def to_dict_detail(self, target_lib, offset=0):
"""
Convert this instance into a dict, which includes some detailed information
of the target/source build, i.e. build version and file name.
"""
detail_info = asdict(self)
try:
with open(self.stdout, 'r') as fout:
detail_info['stdout'] = fout.read()
with open(self.stderr, 'r') as ferr:
detail_info['stderr'] = ferr.read()
except FileNotFoundError:
detail_info['stdout'] = 'NO STD OUTPUT IS FOUND'
detail_info['stderr'] = 'NO STD ERROR IS FOUND'
target_info = target_lib.get_build_by_path(self.target)
detail_info['target_name'] = target_info.file_name
detail_info['target_build_version'] = target_info.build_version
if self.incremental:
incremental_info = target_lib.get_build_by_path(
self.incremental)
detail_info['incremental_name'] = incremental_info.file_name
detail_info['incremental_build_version'] = incremental_info.build_version
return detail_info
class DependencyError(Exception):
pass
class ProcessesManagement:
"""
A class manage the ota generate process
"""
@staticmethod
def check_external_dependencies():
try:
java_version = subprocess.check_output(["java", "--version"])
print("Java version:", java_version.decode())
except Exception as e:
raise DependencyError(
"java not found in PATH. Attempt to generate OTA might fail. " + str(e))
try:
zip_version = subprocess.check_output(["zip", "-v"])
print("Zip version:", zip_version.decode())
except Exception as e:
raise DependencyError(
"zip command not found in PATH. Attempt to generate OTA might fail. " + str(e))
def __init__(self, *, working_dir='output', db_path=None, otatools_dir=None):
"""
create a table if not exist
"""
ProcessesManagement.check_external_dependencies()
self.working_dir = working_dir
self.logs_dir = os.path.join(working_dir, 'logs')
self.otatools_dir = otatools_dir
os.makedirs(self.working_dir, exist_ok=True)
os.makedirs(self.logs_dir, exist_ok=True)
if not db_path:
db_path = os.path.join(self.working_dir, "ota_database.db")
self.path = db_path
with sqlite3.connect(self.path) as connect:
cursor = connect.cursor()
cursor.execute("""
CREATE TABLE if not exists Jobs (
ID TEXT,
TargetPath TEXT,
IncrementalPath TEXT,
Verbose INTEGER,
Partial TEXT,
OutputPath TEXT,
Status TEXT,
Downgrade INTEGER,
OtherFlags TEXT,
STDOUT TEXT,
STDERR TEXT,
StartTime INTEGER,
FinishTime INTEGER
)
""")
for job in self.get_running_jobs():
end_time = min(os.stat(job.stdout).st_mtime,
os.stat(job.stderr).st_mtime)
logging.info(
"Updating %s to status 'Killed', end time %d", job.id, end_time)
self.update_status(job.id, 'Killed', end_time)
def insert_database(self, job_info):
"""
Insert the job_info into the database
Args:
job_info: JobInfo
"""
with sqlite3.connect(self.path) as connect:
cursor = connect.cursor()
cursor.execute("""
INSERT INTO Jobs (ID, TargetPath, IncrementalPath, Verbose, Partial, OutputPath, Status, Downgrade, OtherFlags, STDOUT, STDERR, StartTime, Finishtime)
VALUES (:id, :target, :incremental, :verbose, :partial, :output, :status, :downgrade, :extra, :stdout, :stderr, :start_time, :finish_time)
""", job_info.to_sql_form_dict())
def get_status_by_ID(self, id):
"""
Return the status of job <id> as a instance of JobInfo
Args:
id: string
Return:
JobInfo
"""
with sqlite3.connect(self.path) as connect:
cursor = connect.cursor()
logging.info(id)
cursor.execute("""
SELECT *
FROM Jobs WHERE ID=(?)
""", (str(id),))
row = cursor.fetchone()
status = JobInfo(*row)
return status
def get_running_jobs(self):
with sqlite3.connect(self.path) as connect:
cursor = connect.cursor()
cursor.execute("""
SELECT *
FROM Jobs
WHERE Status == 'Running'
""")
rows = cursor.fetchall()
statuses = [JobInfo(*row) for row in rows]
return statuses
def get_status(self):
"""
Return the status of all jobs as a list of JobInfo
Return:
List[JobInfo]
"""
with sqlite3.connect(self.path) as connect:
cursor = connect.cursor()
cursor.execute("""
SELECT *
FROM Jobs
""")
rows = cursor.fetchall()
statuses = [JobInfo(*row) for row in rows]
return statuses
def update_status(self, id, status, finish_time):
"""
Change the status and finish time of job <id> in the database
Args:
id: string
status: string
finish_time: int
"""
with sqlite3.connect(self.path) as connect:
cursor = connect.cursor()
cursor.execute("""
UPDATE Jobs SET Status=(?), FinishTime=(?)
WHERE ID=(?)
""",
(status, finish_time, id))
def ota_run(self, command, id, stdout_path, stderr_path):
"""
Initiate a subprocess to run the ota generation. Wait until it finished and update
the record in the database.
"""
stderr_pipes = pipes.Template()
stdout_pipes = pipes.Template()
ferr = stderr_pipes.open(stdout_path, 'w')
fout = stdout_pipes.open(stderr_path, 'w')
env = {}
if self.otatools_dir:
env['PATH'] = os.path.join(
self.otatools_dir, "bin") + ":" + os.environ["PATH"]
# TODO(lishutong): Enable user to use self-defined stderr/stdout path
try:
proc = subprocess.Popen(
command, stderr=ferr, stdout=fout, shell=False, env=env, cwd=self.otatools_dir)
self.update_status(id, 'Running', 0)
except FileNotFoundError as e:
logging.error('ota_from_target_files is not set properly %s', e)
self.update_status(id, 'Error', int(time.time()))
raise
except Exception as e:
logging.error('Failed to execute ota_from_target_files %s', e)
self.update_status(id, 'Error', int(time.time()))
raise
def wait_result():
try:
exit_code = proc.wait()
finally:
if exit_code == 0:
self.update_status(id, 'Finished', int(time.time()))
else:
self.update_status(id, 'Error', int(time.time()))
threading.Thread(target=wait_result).start()
def ota_generate(self, args, id):
"""
Read in the arguments from the frontend and start running the OTA
generation process, then update the records in database.
Format of args:
output: string, extra_keys: List[string], extra: string,
isIncremental: bool, isPartial: bool, partial: List[string],
incremental: string, target: string, verbose: bool
args:
args: dict
id: string
"""
command = ['ota_from_target_files']
# Check essential configuration is properly set
if not os.path.isfile(args['target']):
raise FileNotFoundError
if not 'output' in args:
args['output'] = os.path.join(self.working_dir, str(id) + '.zip')
if args['verbose']:
command.append('-v')
if args['extra_keys']:
args['extra'] = '--' + \
' --'.join(args['extra_keys']) + ' ' + args['extra']
if args['extra']:
command += args['extra'].strip().split(' ')
if args['isIncremental']:
if not os.path.isfile(args['incremental']):
raise FileNotFoundError
command.append('-i')
command.append(os.path.realpath(args['incremental']))
if args['isPartial']:
command.append('--partial')
command.append(' '.join(args['partial']))
command.append(os.path.realpath(args['target']))
command.append(os.path.realpath(args['output']))
stdout = os.path.join(self.logs_dir, 'stdout.' + str(id))
stderr = os.path.join(self.logs_dir, 'stderr.' + str(id))
job_info = JobInfo(id,
target=args['target'],
incremental=args['incremental'] if args['isIncremental'] else '',
verbose=args['verbose'],
partial=args['partial'] if args['isPartial'] else [
],
output=args['output'],
status='Pending',
extra=args['extra'],
start_time=int(time.time()),
stdout=stdout,
stderr=stderr
)
self.insert_database(job_info)
self.ota_run(command, id, job_info.stdout, job_info.stderr)
logging.info(
'Starting generating OTA package with id {}: \n {}'
.format(id, command))
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class SupportingDocumentTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trusthub.v1.supporting_documents.create(friendly_name="friendly_name", type="type")
values = {'FriendlyName': "friendly_name", 'Type': "type", }
self.holodeck.assert_has_request(Request(
'post',
'https://trusthub.twilio.com/v1/SupportingDocuments',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"status": "draft",
"date_updated": "2021-02-11T17:23:00Z",
"friendly_name": "Business-profile-physical-address",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://trusthub.twilio.com/v1/SupportingDocuments/RDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2021-02-11T17:23:00Z",
"sid": "RDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"attributes": {
"address_sids": "ADaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
},
"type": "customer_profile_address",
"mime_type": null
}
'''
))
actual = self.client.trusthub.v1.supporting_documents.create(friendly_name="friendly_name", type="type")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trusthub.v1.supporting_documents.list()
self.holodeck.assert_has_request(Request(
'get',
'https://trusthub.twilio.com/v1/SupportingDocuments',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"results": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://trusthub.twilio.com/v1/SupportingDocuments?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://trusthub.twilio.com/v1/SupportingDocuments?PageSize=50&Page=0",
"next_page_url": null,
"key": "results"
}
}
'''
))
actual = self.client.trusthub.v1.supporting_documents.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"results": [
{
"status": "draft",
"date_updated": "2021-02-11T17:23:00Z",
"friendly_name": "Business-profile-physical-address",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://trusthub.twilio.com/v1/SupportingDocuments/RDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2021-02-11T17:23:00Z",
"sid": "RDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"attributes": {
"address_sids": "ADaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
},
"type": "customer_profile_address",
"mime_type": null
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://trusthub.twilio.com/v1/SupportingDocuments?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://trusthub.twilio.com/v1/SupportingDocuments?PageSize=50&Page=0",
"next_page_url": null,
"key": "results"
}
}
'''
))
actual = self.client.trusthub.v1.supporting_documents.list()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trusthub.v1.supporting_documents("RDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://trusthub.twilio.com/v1/SupportingDocuments/RDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"status": "draft",
"date_updated": "2021-02-11T17:23:00Z",
"friendly_name": "Business-profile-physical-address",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://trusthub.twilio.com/v1/SupportingDocuments/RDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2021-02-11T17:23:00Z",
"sid": "RDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"attributes": {
"address_sids": "ADaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
},
"type": "customer_profile_address",
"mime_type": null
}
'''
))
actual = self.client.trusthub.v1.supporting_documents("RDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trusthub.v1.supporting_documents("RDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://trusthub.twilio.com/v1/SupportingDocuments/RDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"status": "draft",
"date_updated": "2021-02-11T17:23:00Z",
"friendly_name": "friendly_name",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://trusthub.twilio.com/v1/SupportingDocuments/RDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2021-02-11T17:23:00Z",
"sid": "RDaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"attributes": {
"address_sids": "ADaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
},
"type": "customer_profile_address",
"mime_type": null
}
'''
))
actual = self.client.trusthub.v1.supporting_documents("RDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trusthub.v1.supporting_documents("RDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://trusthub.twilio.com/v1/SupportingDocuments/RDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.trusthub.v1.supporting_documents("RDXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
|
|
# Copyright (c) 2011 Adi Roiban.
# See LICENSE for details.
'''Configuration section for Chevah logger.'''
from chevah.utils import __python_future__
import re
from zope.interface import implements
from chevah.utils.configuration import ConfigurationSectionMixin
from chevah.utils.constants import (
CONFIGURATION_SECTION_LOG,
)
from chevah.utils.exceptions import UtilsError
from chevah.utils.interfaces import ILogConfigurationSection
from chevah.utils.helpers import _
from chevah.utils.observer import Signal
class LogConfigurationSection(ConfigurationSectionMixin):
'''Configurations for the log section.
[log]
log_file: /path/to/file
log_file_rotate_external: Yes | No
log_file_rotate_at_size: 0 | Disabled
log_file_rotate_each:
1 hour | 2 seconds | 2 midnight | 3 Monday | Disabled
log_file_rotate_count: 3 | 0 | Disabled
log_syslog: /path/to/syslog/pipe | syslog.host:port
log_enabled_groups: all
log_windows_eventlog: sftpplus-server
'''
implements(ILogConfigurationSection)
def __init__(self, proxy):
self._proxy = proxy
self._section_name = CONFIGURATION_SECTION_LOG
self._prefix = u'log'
@property
def syslog(self):
'''Return the syslog address used for logging.
server_log_syslog can be a path to a file or a host:port address.
'''
syslog = self._proxy.getStringOrNone(
self._section_name, self._prefix + '_syslog')
if not syslog:
return None
# See if we can make an IP address out of the value.
# For IP address we must return a (IP, PORT) tuple
# For files we just return the value.
search = re.search('(.*):(\d+)$', syslog)
if search:
return (search.groups()[0], int(search.groups()[1]))
else:
return syslog
@syslog.setter
def syslog(self, value):
self._updateWithNotify(
setter=self._proxy.setStringOrNone, name='syslog', value=value)
def _updateWithNotify(self, setter, name, value):
"""
Update configuration and notify changes.
Revert configuration on error.
"""
initial_value = getattr(self, name)
configuration_option_name = '%s_%s' % (self._prefix, name)
setter(self._section_name, configuration_option_name, value)
current_value = getattr(self, name)
signal = Signal(
self, initial_value=initial_value, current_value=current_value)
try:
self.notify(name, signal)
except:
setter(
self._section_name, configuration_option_name, initial_value)
raise
@property
def file(self):
'''Return the file path where logs are sent.'''
return self._proxy.getStringOrNone(
self._section_name, self._section_name + '_file')
@file.setter
def file(self, value):
self._updateWithNotify(
setter=self._proxy.setStringOrNone, name='file', value=value)
@property
def file_rotate_external(self):
'''Return log_file_rotate_external.'''
return self._proxy.getBoolean(
self._section_name,
self._prefix + '_file_rotate_external')
@file_rotate_external.setter
def file_rotate_external(self, value):
self._updateWithNotify(
setter=self._proxy.setBoolean,
name='file_rotate_external',
value=value,
)
@property
def file_rotate_count(self):
'''Return log_file_rotate_count.'''
value = self._proxy.getIntegerOrNone(
self._section_name,
self._prefix + '_file_rotate_count')
if value is None:
value = 0
return value
@file_rotate_count.setter
def file_rotate_count(self, value):
self._updateWithNotify(
setter=self._proxy.setIntegerOrNone,
name='file_rotate_count',
value=value,
)
@property
def file_rotate_at_size(self):
'''Return log_file_rotate_at_size.'''
value = self._proxy.getIntegerOrNone(
self._section_name,
self._prefix + '_file_rotate_at_size')
if value is None:
value = 0
return value
@file_rotate_at_size.setter
def file_rotate_at_size(self, value):
self._updateWithNotify(
setter=self._proxy.setIntegerOrNone,
name='file_rotate_at_size',
value=value,
)
@property
def file_rotate_each(self):
"""
Return log_file_rotate_at_each.
Returns a tuple of (interval_count, inteval_type).
"""
value = self._proxy.getStringOrNone(
self._section_name,
self._prefix + '_file_rotate_each')
return self._fileRotateEachToMachineReadable(value)
@file_rotate_each.setter
def file_rotate_each(self, value):
if not value:
update_value = None
elif (isinstance(value, basestring) and
self._proxy.isDisabledValue(value)
):
update_value = None
else:
update_value = self._fileRotateEachToHumanReadable(value)
self._updateWithNotify(
setter=self._proxy.setStringOrNone,
name='file_rotate_each',
value=update_value,
)
def _fileRotateEachToMachineReadable(self, value):
"""
Return the machine readable format for `value`.
Inside the configuration file, the value is stored as a human
readable format like::
1 hour | 2 seconds | 2 midnight | 3 Monday | Disabled
When reading the value, it will return::
(1, 'h') | (2, 's') | (2, 'midnight') | (3, 'w0') | None
"""
mapping = {
u'second': u's',
u'seconds': u's',
u'minute': u'm',
u'minutes': u'm',
u'hour': u'h',
u'hours': u'h',
u'day': u'd',
u'days': u'd',
u'midnight': u'midnight',
u'midnights': u'midnight',
u'monday': u'w0',
u'mondays': u'w0',
u'tuesday': u'w1',
u'tuesdays': u'w1',
u'wednesday': u'w2',
u'wednesdays': u'w2',
u'thursday': u'w3',
u'thursdays': u'w3',
u'friday': u'w4',
u'fridays': u'w4',
u'saturday': u'w5',
u'saturdays': u'w5',
u'sunday': u'w6',
u'sundays': u'w6',
}
if not value:
return None
tokens = re.split('\W+', value)
if len(tokens) != 2:
raise self._fileRotateEachError(_(u'Got: "%s"' % (value)))
try:
interval = int(tokens[0])
except ValueError:
raise self._fileRotateEachError(
_(u'Interval is not an integer. Got: "%s"' % (tokens[0])))
if interval < 0:
raise self._fileRotateEachError(
_(u'Interval should not be less than 0'))
when = tokens[1].lower()
try:
when = mapping[when]
except KeyError:
raise self._fileRotateEachError(
_(u'Unknown interval type. Got: "%s"' % (tokens[1])))
return (interval, when)
def _fileRotateEachToHumanReadable(self, value):
"""
Return the human readable representation for file_rotate_each
tuple provided as `value'.
(2, 's') returns 2 seconds
"""
mapping = {
u's': u'second',
u'm': u'minute',
u'h': u'hour',
u'd': u'day',
u'midnight': u'midnight',
u'w0': u'monday',
u'w1': u'tuesday',
u'w2': u'wednesday',
u'w3': u'thursday',
u'w4': u'friday',
u'w5': u'saturday',
u'w6': u'sunday',
}
try:
frequency = int(value[0])
except ValueError:
raise self._fileRotateEachError(
_(u'Bad interval count. Got: "%s"' % (value[0])))
if frequency < 0:
raise self._fileRotateEachError(
_(u'Interval should not be less than 0'))
try:
when = mapping[value[1]]
except KeyError:
raise self._fileRotateEachError(
_(u'Unknown interval type. Got: "%s"' % (value[1])))
return u'%s %s' % (frequency, when)
def _fileRotateEachError(self, details):
return UtilsError(u'1023',
_(u'Wrong value for logger rotation based on time interval. '
u'%s' % (details)))
@property
def enabled_groups(self):
'''Return the list of enabled log groups.'''
value = self._proxy.getString(
self._section_name, self._prefix + '_enabled_groups')
groups = []
for group in value.split(','):
group = group.strip()
if group:
groups.append(group.lower())
return groups
@enabled_groups.setter
def enabled_groups(self, value):
'''Set the list of enabled groups.'''
self._proxy.setString(
self._section_name,
self._prefix + '_enabled_groups',
', '.join(value),
)
@property
def windows_eventlog(self):
"""
Name of source used to log into Windows Event log.
Returns None if event log is not enabled.
"""
return self._proxy.getStringOrNone(
self._section_name, self._prefix + '_windows_eventlog')
@windows_eventlog.setter
def windows_eventlog(self, value):
self._updateWithNotify(
setter=self._proxy.setStringOrNone,
name='windows_eventlog',
value=value,
)
|
|
# -*- coding:utf-8 -*-
'''
Function for building whole packed version of highlight.js out of
pre-packed modules.
'''
import os
import shutil
import re
import argparse
import subprocess
import json
from functools import partial
REPLACES = {
'case_insensitive': 'cI',
'lexems': 'l',
'contains': 'c',
'keywords': 'k',
'subLanguage': 'sL',
'className': 'cN',
'begin': 'b',
'beginWithKeyword': 'bWK',
'end': 'e',
'endsWithParent': 'eW',
'illegal': 'i',
'excludeBegin': 'eB',
'excludeEnd': 'eE',
'returnBegin': 'rB',
'returnEnd': 'rE',
'relevance': 'r',
'IDENT_RE': 'IR',
'UNDERSCORE_IDENT_RE': 'UIR',
'NUMBER_RE': 'NR',
'C_NUMBER_RE': 'CNR',
'BINARY_NUMBER_RE': 'BNR',
'RE_STARTERS_RE': 'RSR',
'APOS_STRING_MODE': 'ASM',
'QUOTE_STRING_MODE': 'QSM',
'BACKSLASH_ESCAPE': 'BE',
'C_LINE_COMMENT_MODE': 'CLCM',
'C_BLOCK_COMMENT_MODE': 'CBLCLM',
'HASH_COMMENT_MODE': 'HCM',
'C_NUMBER_MODE': 'CNM',
'BINARY_NUMBER_MODE': 'BNM',
'NUMBER_MODE': 'NM',
'beginRe': 'bR',
'endRe': 'eR',
'illegalRe': 'iR',
'lexemsRe': 'lR',
'terminators': 't',
'terminator_end': 'tE',
}
CATEGORIES = {
'common': ['bash', 'java', 'ini', 'sql', 'diff', 'php', 'cs', 'cpp', 'ruby', 'python', 'css', 'perl', 'xml', 'javascript', 'http', 'json'],
}
def lang_name(filename):
return os.path.splitext(os.path.basename(filename))[0]
def mapnonstrings(source, func):
result = []
pos = 0
quotes = re.compile('[\'"]')
while pos < len(source):
match = quotes.search(source, pos)
end = match.start() if match else len(source)
result.append(func(source[pos:end]))
pos = end
if match:
terminator = re.compile(r'[%s\\]' % match.group(0))
start = pos
pos += 1
while True:
match = terminator.search(source, pos)
if not match:
raise ValueError('Unmatched quote')
if match.group(0) == '\\':
pos = match.start() + 2
else:
pos = match.start() + 1
result.append(source[start:pos])
break
return ''.join(result)
def compress_content(tools_path, content, filetype='js'):
if filetype == 'js':
for s, r in REPLACES.items():
content = mapnonstrings(content, partial(re.sub, r'\b%s\b' % s, r))
content = re.sub(r'(block|parentNode)\.cN', r'\1.className', content)
args = ['java', '-jar', os.path.join(tools_path, 'yuicompressor.jar'), '--type', filetype]
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(content.encode('utf-8'))
p.stdin.close()
content = p.stdout.read().decode('utf-8')
p.stdout.close()
return content
def parse_header(filename):
'''
Parses possible language description header from a file. If a header is found returns it
as dict, otherwise returns None.
'''
content = open(filename, encoding='utf-8').read(1024)
match = re.search(r'^\s*/\*(.*?)\*/', content, re.S)
if not match:
return
headers = match.group(1).split('\n')
headers = dict(h.strip().split(': ') for h in headers if ': ' in h)
return headers if 'Language' in headers else None
def language_filenames(src_path, languages):
'''
Resolves dependencies and returns the list of actual language filenames
'''
lang_path = os.path.join(src_path, 'languages')
filenames = [os.path.join(lang_path, f) for f in os.listdir(lang_path)]
headers = [parse_header(f) for f in filenames]
infos = [(h, f) for h, f in zip(headers, filenames) if h]
# Filtering infos based on list of languages and categories
if languages:
categories = {l for l in languages if l.startswith(':')}
languages = set(languages) - categories
categories = {c.strip(':') for c in categories}
cat_languages = {l for c, ls in CATEGORIES.items() if c in categories for l in ls}
languages |= cat_languages
infos = [
(i, f) for i, f in infos
if lang_name(f) in languages
]
def append(filename):
if filename not in filenames:
filenames.append(filename)
filenames = []
for info, filename in infos:
if 'Requires' in info:
requires = [r.strip() for r in info['Requires'].split(',')]
for r in requires:
append(r)
append(filename)
return [os.path.join(lang_path, f) for f in filenames]
def strip_read(filename):
s = open(filename).read()
pattern = re.compile(r'^\s*(/\*(.*?)\*/)?\s*', re.DOTALL)
s = pattern.sub('', s)
return s.strip()
def wrap_language(filename, content, compressed):
'''
Wraps a language file content for the browser build. The "compressed" parameter
selects which wrapping code to use:
- If compressed is False the function expects source files to be uncompressed and
wraps them maintaining readability of the source.
- If compressed is True the function expects source files to be already compressed
individually and wraps them with the minimal code, effectively emulating
what yuicompressor does.
'''
name = lang_name(filename)
if compressed:
name = ('["%s"]' if '-' in name or name[0].isdigit() else '.%s') % name
content = content.rstrip(';')
wrap = 'hljs.LANGUAGES%s=%s(hljs);'
else:
wrap = 'hljs.LANGUAGES[\'%s\'] = %s(hljs);\n'
return wrap % (name, content)
def glue_files(hljs_filename, language_filenames, compressed):
'''
Glues files together for the browser build.
'''
if compressed:
hljs = 'var hljs=new %s();' % strip_read(hljs_filename).rstrip(';')
file_func = lambda f: open(f).read()
else:
hljs = 'var hljs = new %s();\n' % strip_read(hljs_filename)
file_func = strip_read
return ''.join([hljs] + [wrap_language(f, file_func(f), compressed) for f in language_filenames])
def build_browser(root, build_path, filenames, options):
src_path = os.path.join(root, 'src')
tools_path = os.path.join(root, 'tools')
print('Building %d files:\n%s' % (len(filenames), '\n'.join(filenames)))
content = glue_files(os.path.join(src_path, 'highlight.js'), filenames, False)
print('Uncompressed size:', len(content.encode('utf-8')))
if options.compress:
print('Compressing...')
content = compress_content(tools_path, content)
print('Compressed size:', len(content.encode('utf-8')))
open(os.path.join(build_path, 'highlight.pack.js'), 'w').write(content)
def build_amd(root, build_path, filenames, options):
src_path = os.path.join(root, 'src')
tools_path = os.path.join(root, 'tools')
print('Building %d files:\n%s' % (len(filenames), '\n'.join(filenames)))
content = glue_files(os.path.join(src_path, 'highlight.js'), filenames, False)
content = 'define(function() {\n%s\nreturn hljs;\n});' % content # AMD wrap
print('Uncompressed size:', len(content.encode('utf-8')))
if options.compress:
print('Compressing...')
content = compress_content(tools_path, content)
print('Compressed size:', len(content.encode('utf-8')))
open(os.path.join(build_path, 'highlight.pack.js'), 'w').write(content)
def build_node(root, build_path, filenames, options):
src_path = os.path.join(root, 'src')
print('Building %d files:' % len(filenames))
for filename in filenames:
print(filename)
content = 'module.exports = %s;' % strip_read(filename)
open(os.path.join(build_path, os.path.basename(filename)), 'w').write(content)
filename = os.path.join(src_path, 'highlight.js')
print(filename)
print('Registering languages with the library...')
hljs = 'var hljs = new %s();' % strip_read(filename)
filenames = map(os.path.basename, filenames)
for filename in filenames:
hljs += '\nhljs.LANGUAGES[\'%s\'] = require(\'./%s\')(hljs);' % (lang_name(filename), filename)
hljs += '\nmodule.exports = hljs;'
open(os.path.join(build_path, 'highlight.js'), 'w').write(hljs)
if options.compress:
print('Notice: not compressing files for "node" target.')
print('Adding package.json...')
package = json.load(open(os.path.join(src_path, 'package.json')))
authors = open(os.path.join(root, 'AUTHORS.en.txt'))
matches = (re.match('^- (?P<name>.*) <(?P<email>.*)>$', a) for a in authors)
package['contributors'] = [m.groupdict() for m in matches if m]
content = json.dumps(package, indent=2)
open(os.path.join(build_path, 'package.json'), 'w').write(content)
def build_cdn(root, build_path, filenames, options):
src_path = os.path.join(root, 'src')
tools_path = os.path.join(root, 'tools')
if not options.compress:
print('Notice: forcing compression for "cdn" target')
options.compress = True
build_browser(root, build_path, filenames, options)
os.rename(os.path.join(build_path, 'highlight.pack.js'), os.path.join(build_path, 'highlight.min.js'))
print('Compressing all languages...')
lang_path = os.path.join(build_path, 'languages')
os.mkdir(lang_path)
all_filenames = language_filenames(src_path, [])
for filename in all_filenames:
print(filename)
content = compress_content(tools_path, open(filename).read())
content = wrap_language(filename, content, True)
open(os.path.join(lang_path, '%s.min.js' % lang_name(filename)), 'w').write(content)
print('Compressing styles...')
build_style_path = os.path.join(build_path, 'styles')
src_style_path = os.path.join(src_path, 'styles')
os.mkdir(build_style_path)
styles = [lang_name(f) for f in os.listdir(src_style_path) if f.endswith('.css')]
for style in styles:
filename = os.path.join(src_style_path, '%s.css' % style)
print(filename)
content = compress_content(tools_path, open(filename).read(), 'css')
open(os.path.join(build_style_path, '%s.min.css' % style), 'w').write(content)
def build(buildfunc, root, args):
build_path = os.path.join(root, 'build')
if os.path.exists(build_path):
shutil.rmtree(build_path)
os.mkdir(build_path)
filenames = language_filenames(os.path.join(root, 'src'), args.languages)
buildfunc(root, build_path, filenames, args)
print('Done.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build highlight.js for various targets')
parser.add_argument(
'languages', nargs='*',
help = 'language (the name of a language file without the .js extension) or :category (currently the only available category is ":common")',
)
parser.add_argument(
'-n', '--no-compress',
dest = 'compress', action = 'store_false', default = True,
help = 'Don\'t compress source files. Compression only works for the "browser" target.',
)
parser.add_argument(
'-t', '--target', dest = 'target',
choices = ['browser', 'node', 'cdn', 'amd'], default = 'browser',
help = 'Target format, default is "browser"',
)
args = parser.parse_args()
buildfunc = locals()['build_%s' % args.target]
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
build(buildfunc, root, args)
|
|
from oldowan.mtconvert.seq2sites import seq2sites
from oldowan.polymorphism import Polymorphism
def test_example_1():
"""Wilson et al 2002 Example 1
Seq: ATACAACCCCCACCCAT
Seq: ATACAACCCCCACCCAT
rCRS: ATACAACCCCCGCCCAT
Sites: 499A
"""
a = Polymorphism(499,0,'A')
seq = 'ATACTACTAATCTCATCAATACAACCCCCACCCATCCTACCCAGCACACACACACCGCTG'
result = seq2sites(seq)
assert len(result) == 1
assert a in result
def test_example_2():
"""Wilson et al 2002 Example 2
Seq: ATACAACCCCACCCAT
Seq: ATACAACCCC-ACCCAT
rCRS: ATACAACCCCCGCCCAT
Sites: 489d 499A
"""
a = Polymorphism(498,0,'-')
b = Polymorphism(499,0,'A')
seq = 'ATACTACTAATCTCATCAATACAACCCCACCCATCCTACCCAGCACACACACACCGCTG'
result = seq2sites(seq)
assert len(result) == 2
assert a in result
assert b in result
def test_example_3():
"""Wilson et al 2002 Example 3
Seq: ATTGATGTC
Seq: ATTGA-TGTC
rCRS: ATTGAATGTC
Sites: 249d
"""
a = Polymorphism(249,0,'-')
seq = 'TGCTTGTAGGACATAATAATAACAATTGATGTCTGCACAGCCACTTTCC'
result = seq2sites(seq)
assert len(result) == 1
assert a in result
def test_example_4():
"""Wilson et al 2002 Example 4
Seq: CATAACAAAATTT
Seq: CATAACAAAA--TTT
rCRS: CATAACAAAAAATTT
Sites: 290d 291d
"""
a = Polymorphism(290,0,'-')
b = Polymorphism(291,0,'-')
seq = 'ACACAGACATCATAACAAAATTTCCACCAAACCCCCCC'
result = seq2sites(seq)
assert len(result) == 2
assert a in result
assert b in result
def test_example_5():
"""Wilson et al 2002 Example 5
Seq: ACCCAGCACACACACACACCGCTG
Seq: ACCCAGCACACACACACACCGCTG
rCRS: ACCCAGCACACACACAC--CGCTG
Sites: 524.1A 524.2C
"""
a = Polymorphism(524,1,'A')
b = Polymorphism(524,2,'C')
seq = 'ACAACCCCCGCCCATCCTACCCAGCACACACACACACCGCTGCTAACCCCATACCCC'
result = seq2sites(seq)
assert len(result) == 2
assert a in result
assert b in result
def test_example_6():
"""Wilson et al 2002 Example 6
Seq: ACCCAGCACACACCGCTGC
Seq: ACCCAGCACACAC----CGCTGC
rCRS: ACCCAGCACACACACACCGCTGC
Sites: 521d 522d 523d 524d
"""
a = Polymorphism(521,0,'-')
b = Polymorphism(522,0,'-')
c = Polymorphism(523,0,'-')
d = Polymorphism(524,0,'-')
seq = 'ACAACCCCCGCCCATCCTACCCAGCACACACCGCTGCTAACCCCATACCCC'
result = seq2sites(seq)
assert len(result) == 4
assert a in result
assert b in result
assert c in result
assert d in result
def test_example_7():
"""Wilson et al 2002 Example 7
Seq: ACCCAACACACACACCGC
Seq: ACCCA--ACACACACACCGC
rCRS: ACCCAGCACACACACACCGC
Sites: 513d 514d
"""
a = Polymorphism(513,0,'-')
b = Polymorphism(514,0,'-')
seq = 'ACAACCCCCGCCCATCCTACCCAACACACACACCGCTGCTAACCCCATACCCC'
result = seq2sites(seq)
assert len(result) == 2
assert a in result
assert b in result
def test_example_8():
"""Wilson et al 2002 Example 8
Seq: ACCCAGTACACACACCG
Seq: ACCCAGTACACACAC--CG
rCRS: ACCCAGCACACACACACCG
Sites: 514T 523d 524d
"""
a = Polymorphism(514,0,'T')
b = Polymorphism(523,0,'-')
c = Polymorphism(524,0,'-')
seq = 'ACAACCCCCGCCCATCCTACCCAGTACACACACCGCTGCTAACCCCATACCCC'
result = seq2sites(seq)
assert len(result) == 3
assert a in result
assert b in result
assert c in result
def test_example_9():
"""Wilson et al 2002 Example 9
Seq: AAACCCCCCCCTCCCATGCT
Seq: AAACCCCCCCCTCCCATGCT
rCRS: AAAACCCCCTC-CCCATGCT
Sites: 16183C 15189C 16190.1T
"""
a = Polymorphism(16183,0,'C')
b = Polymorphism(16189,0,'C')
c = Polymorphism(16190,1,'T')
seq = 'CCTGTAGTACATAAAAACCCAATCCACATCAAACCCCCCCCTCCCATGCTTACAAGCAAGT'
result = seq2sites(seq)
assert len(result) == 3
assert a in result
assert b in result
assert c in result
def test_example_10():
"""Wilson et al 2002 Example 10
Seq: AACCCCCCCCCCCCATGCT
Seq: AACCCCCCCCCCCCATGCT
rCRS: AAAACCCCCTCCCCATGCT
Sites: 16182C 16183C 16189C
"""
a = Polymorphism(16182,0,'C')
b = Polymorphism(16183,0,'C')
c = Polymorphism(16189,0,'C')
seq = 'CCTGTAGTACATAAAAACCCAATCCACATCAACCCCCCCCCCCCATGCTTACAAGCAAGT'
result = seq2sites(seq)
assert len(result) == 3
assert a in result
assert b in result
assert c in result
def test_example_11():
"""Wilson et al 2002 Example 11
Seq: AAACCCCCCCCCCCCATGCT
Seq: AAACCCCCCCCCCCCATGCT
rCRS: AAAACCCCCTCCCC-ATGCT
Sites: 16183C 16189C 16193.1C
"""
a = Polymorphism(16183,0,'C')
b = Polymorphism(16189,0,'C')
c = Polymorphism(16193,1,'C')
seq = 'CCTGTAGTACATAAAAACCCAATCCACATCAAACCCCCCCCCCCCATGCTTACAAGCAAGT'
result = seq2sites(seq)
assert len(result) == 3
assert a in result
assert b in result
assert c in result
def test_example_12():
"""Wilson et al 2002 Example 12
Seq: TTAACCCCCTCCCCCATGCT
Seq: TTAA--CCCCCTCCCCCATGCT
rCRS: TCAAAACCCCCTCCCC-ATGCT
Sites: 16179T 16182d 16183d 16193.1C
"""
a = Polymorphism(16179,0,'T')
b = Polymorphism(16182,0,'-')
c = Polymorphism(16183,0,'-')
d = Polymorphism(16193,1,'C')
seq = 'CCTGTAGTACATAAAAACCCAATCCACATTAACCCCCTCCCCCATGCTTACAAGCAAGTACAGCAATCAACCCTCAACT'
result = seq2sites(seq)
print 'expected: %s' % [a,b,c,d]
print 'actual: %s' % result
assert len(result) == 4
assert a in result
assert b in result
assert c in result
assert d in result
def test_example_13():
"""Wilson et al 2002 Example 13
Seq: AAAACCTCCCCCCATGCT
Seq: AAAACCTCC-CCCCATGCT
rCRS: AAAACCCCCTCCCCATGCT
Sites: 16186T 16189d
"""
a = Polymorphism(16186,0,'T')
b = Polymorphism(16189,0,'-')
seq = 'CCTGTAGTACATAAAAACCCAATCCACATCAAAACCTCCCCCCATGCTTACAAGCAAGT'
result = seq2sites(seq)
assert len(result) == 2
assert a in result
assert b in result
def test_example_14():
"""Wilson et al 2002 Example 14
Seq: AAACCCCCCCTCCCCCATGCT
Seq: AAACCCCCCCTCCCCCATGCT
rCRS: AAAACCCCC-TCCCC-ATGCT
Sites: 16183C 16188.1C 16193.1C
"""
a = Polymorphism(16183,0,'C')
b = Polymorphism(16188,1,'C')
c = Polymorphism(16193,1,'C')
seq = 'CCTGTAGTACATAAAAACCCAATCCACATCAAACCCCCCCTCCCCCATGCTTACAAGCAAGT'
result = seq2sites(seq)
assert len(result) == 3
assert a in result
assert b in result
assert c in result
def test_example_15():
"""Wilson et al 2002 Example 15
Seq: TTAAACCCCCCCCTCCCATGCT
Seq: TTAAACCCCCCCCTCCCATGCT
rCRS: TCAAAACCCCCTC-CCCATGCT
Sites: 16179T 16183C 16189C 16190.1T
"""
a = Polymorphism(16179,0,'T')
b = Polymorphism(16183,0,'C')
c = Polymorphism(16189,0,'C')
d = Polymorphism(16190,1,'T')
seq = 'CCTGTAGTACATAAAAACCCAATCCACATTAAACCCCCCCCTCCCATGCTTACAAGCAAGTACAGCAATCAACCCTCAACT'
result = seq2sites(seq)
assert len(result) == 4
assert a in result
assert b in result
assert c in result
assert d in result
def test_example_16():
"""Wilson et al 2002 Example 16
Seq: AAACCCCCTCCCCCCATGCT
Seq: AAA-CCCCCTCCCCCCATGCT
rCRS: AAAACCCCCTCCCC--ATGCT
Sites: 16183d 16193.1C 16193.2C
"""
a = Polymorphism(16183,0,'-')
b = Polymorphism(16193,1,'C')
c = Polymorphism(16193,2,'C')
seq = 'CCTGTAGTACATAAAAACCCAATCCACATCAAACCCCCTCCCCCCATGCTTACAAGCAAGT'
result = seq2sites(seq)
print 'expected: %s' % [a,b,c]
print 'actual: %s' % result
assert len(result) == 3
assert a in result
assert b in result
assert c in result
def test_example_17():
"""Wilson et al 2002 Example 17
Seq: AAACCCCCCCCCGC
Seq: AAACCCCCCC----CCGC
rCRS: AAACCCCCCCTCCCCCGC
Sites: 310d 311d 312d 313d
"""
a = Polymorphism(310,0,'-')
b = Polymorphism(311,0,'-')
c = Polymorphism(312,0,'-')
d = Polymorphism(313,0,'-')
seq = 'AATTTCCACCAAACCCCCCCCCGCTTCTGGCCACAGCACTT'
result = seq2sites(seq)
assert len(result) == 4
assert a in result
assert b in result
assert c in result
assert d in result
def test_example_18():
"""Wilson et al 2002 Example 18
Seq: AAACCCCCCTCCCCCCGC
Seq: AAACCCCCC-TCCCCCCGC
rCRS: AAACCCCCCCTCCCCC-GC
Sites: 309d 315.1C
"""
a = Polymorphism(309,0,'-')
b = Polymorphism(315,1,'C')
seq = 'CATAACAAAAAATTTCCACCAAACCCCCCTCCCCCCGCTTCTGGCCACAGCACTT'
result = seq2sites(seq)
print 'expected: %s' % [a,b]
print 'actual: %s' % result
assert len(result) == 2
assert a in result
assert b in result
def test_example_19():
"""Wilson et al 2002 Example 19
Seq: AAACCCCCCCTTCCCCCCGC
Seq: AAACCCCCCCTTCCCCCCGC
rCRS: AAACCCCCCCT-CCCCC-GC
Sites: 310.1T 315.1C
"""
a = Polymorphism(310,1,'T')
b = Polymorphism(315,1,'C')
seq = 'AATTTCCACCAAACCCCCCCTTCCCCCCGCTTCTGGCCACAGCACTT'
result = seq2sites(seq)
assert len(result) == 2
assert a in result
assert b in result
def test_example_20():
"""Wilson et al 2002 Example 20
Seq: AAAGACACCCCCCCCCCCCACA
Seq: AAAGACACCCCCCCCCCCCACA
rCRS: AAAGACACCCCCC------ACA
Sites: 573.1C 573.2C 573.3C 573.4C 573.5C 573.6C
"""
a = Polymorphism(573,1,'C')
b = Polymorphism(573,2,'C')
c = Polymorphism(573,3,'C')
d = Polymorphism(573,4,'C')
e = Polymorphism(573,5,'C')
f = Polymorphism(573,6,'C')
seq = 'AACCAAACCCCAAAGACACCCCCCCCCCCCACAGTTTATGTAGCTT'
result = seq2sites(seq)
assert len(result) == 6
assert a in result
assert b in result
assert c in result
assert d in result
assert e in result
assert f in result
def test_example_21():
"""Wilson et al 2002 Example 21
Seq: CTGGAGCACCC
Seq: CTGGAGC------ACCC
rCRS: CTGGAGCCGGAGCACCC
Sites: 105d 106d 107d 108d 109d 110d
"""
a = Polymorphism(105,0,'-')
b = Polymorphism(106,0,'-')
c = Polymorphism(107,0,'-')
d = Polymorphism(108,0,'-')
e = Polymorphism(109,0,'-')
f = Polymorphism(110,0,'-')
seq = 'GCATTGCGAGACGCTGGAGCACCCTATGTCGCAGTATCT'
result = seq2sites(seq)
assert len(result) == 6
assert a in result
assert b in result
assert c in result
assert d in result
assert e in result
assert f in result
def test_example_22():
"""Wilson et al 2002 Example 22
Seq: AGATCCTGGAGCCCCC
Seq: AGATC-CTGGAGCC------CCC
rCRS: AGA-CGCTGGAGCCGGAGCACCC
Sites: 95.1T 97d 106d 107d 108d 109d 110d 111d
"""
a = Polymorphism(95,1,'T')
b = Polymorphism(97,0,'-')
c = Polymorphism(106,0,'-')
d = Polymorphism(107,0,'-')
e = Polymorphism(108,0,'-')
f = Polymorphism(109,0,'-')
g = Polymorphism(110,0,'-')
h = Polymorphism(111,0,'-')
seq = 'TCGTCTGGGGGGTATGCACGCGATAGCATTGCGAGATCCTGGAGCCCCCTATGTCGCAGTATCT'
result = seq2sites(seq)
print 'expected: %s' % [a,b,c,d,e,f,g,h]
print 'actual: %s' % result
assert len(result) == 8
assert a in result
assert b in result
assert c in result
assert d in result
assert e in result
assert f in result
assert g in result
assert h in result
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Dirichlet Multinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
_dirichlet_multinomial_prob_note = """
For each batch of counts `[n_1,...,n_k]`, `P[counts]` is the probability
that after sampling `n` draws from this Dirichlet Multinomial
distribution, the number of draws falling in class `j` is `n_j`. Note that
different sequences of draws can result in the same counts, thus the
probability includes a combinatorial coefficient.
Note that input, "counts", must be a non-negative tensor with dtype `dtype`
and whose shape can be broadcast with `self.alpha`. For fixed leading
dimensions, the last dimension represents counts for the corresponding
Dirichlet Multinomial distribution in `self.alpha`. `counts` is only legal if
it sums up to `n` and its components are equal to integer values.
"""
class DirichletMultinomial(distribution.Distribution):
"""DirichletMultinomial mixture distribution.
This distribution is parameterized by a vector `alpha` of concentration
parameters for `k` classes and `n`, the counts per each class..
#### Mathematical details
The Dirichlet Multinomial is a distribution over k-class count data, meaning
for each k-tuple of non-negative integer `counts = [c_1,...,c_k]`, we have a
probability of these draws being made from the distribution. The distribution
has hyperparameters `alpha = (alpha_1,...,alpha_k)`, and probability mass
function (pmf):
```pmf(counts) = N! / (n_1!...n_k!) * Beta(alpha + c) / Beta(alpha)```
where above `N = sum_j n_j`, `N!` is `N` factorial, and
`Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the multivariate beta
function.
This is a mixture distribution in that `M` samples can be produced by:
1. Choose class probabilities `p = (p_1,...,p_k) ~ Dir(alpha)`
2. Draw integers `m = (n_1,...,n_k) ~ Multinomial(N, p)`
This class provides methods to create indexed batches of Dirichlet
Multinomial distributions. If the provided `alpha` is rank 2 or higher, for
every fixed set of leading dimensions, the last dimension represents one
single Dirichlet Multinomial distribution. When calling distribution
functions (e.g. `dist.pmf(counts)`), `alpha` and `counts` are broadcast to the
same shape (if possible). In all cases, the last dimension of alpha/counts
represents single Dirichlet Multinomial distributions.
#### Examples
```python
alpha = [1, 2, 3]
n = 2
dist = DirichletMultinomial(n, alpha)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
The distribution functions can be evaluated on counts.
```python
# counts same shape as alpha.
counts = [0, 0, 2]
dist.pmf(counts) # Shape []
# alpha will be broadcast to [[1, 2, 3], [1, 2, 3]] to match counts.
counts = [[1, 1, 0], [1, 0, 1]]
dist.pmf(counts) # Shape [2]
# alpha will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.pmf(counts) # Shape [5, 7]
```
Creates a 2-batch of 3-class distributions.
```python
alpha = [[1, 2, 3], [4, 5, 6]] # Shape [2, 3]
n = [3, 3]
dist = DirichletMultinomial(n, alpha)
# counts will be broadcast to [[2, 1, 0], [2, 1, 0]] to match alpha.
counts = [2, 1, 0]
dist.pmf(counts) # Shape [2]
```
"""
# TODO(b/27419586) Change docstring for dtype of alpha once int allowed.
def __init__(self,
n,
alpha,
validate_args=False,
allow_nan_stats=True,
name="DirichletMultinomial"):
"""Initialize a batch of DirichletMultinomial distributions.
Args:
n: Non-negative floating point tensor, whose dtype is the same as
`alpha`. The shape is broadcastable to `[N1,..., Nm]` with `m >= 0`.
Defines this as a batch of `N1 x ... x Nm` different Dirichlet
multinomial distributions. Its components should be equal to integer
values.
alpha: Positive floating point tensor, whose dtype is the same as
`n` with shape broadcastable to `[N1,..., Nm, k]` `m >= 0`. Defines
this as a batch of `N1 x ... x Nm` different `k` class Dirichlet
multinomial distributions.
validate_args: `Boolean`, default `False`. Whether to assert valid
values for parameters `alpha` and `n`, and `x` in `prob` and
`log_prob`. If `False`, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of 2-class Dirichlet multinomial distribution,
# also known as a beta-binomial.
dist = DirichletMultinomial(2.0, [1.1, 2.0])
# Define a 2-batch of 3-class distributions.
dist = DirichletMultinomial([3., 4], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
```
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[n, alpha]) as ns:
# Broadcasting works because:
# * The broadcasting convention is to prepend dimensions of size [1], and
# we use the last dimension for the distribution, wherease
# the batch dimensions are the leading dimensions, which forces the
# distribution dimension to be defined explicitly (i.e. it cannot be
# created automatically by prepending). This forces enough
# explicitivity.
# * All calls involving `counts` eventually require a broadcast between
# `counts` and alpha.
self._alpha = self._assert_valid_alpha(alpha, validate_args)
self._n = self._assert_valid_n(n, validate_args)
self._alpha_sum = math_ops.reduce_sum(
self._alpha, reduction_indices=[-1], keep_dims=False)
super(DirichletMultinomial, self).__init__(
dtype=self._alpha.dtype,
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._alpha, self._n, self._alpha_sum],
name=ns)
@property
def n(self):
"""Parameter defining this distribution."""
return self._n
@property
def alpha(self):
"""Parameter defining this distribution."""
return self._alpha
@property
def alpha_sum(self):
"""Summation of alpha parameter."""
return self._alpha_sum
def _batch_shape(self):
return array_ops.shape(self.alpha_sum)
def _get_batch_shape(self):
return self.alpha_sum.get_shape()
def _event_shape(self):
return array_ops.reverse(array_ops.shape(self.alpha), [True])[0]
def _get_event_shape(self):
# Event shape depends only on alpha, not "n".
return self.alpha.get_shape().with_rank_at_least(1)[-1:]
@distribution_util.AppendDocstring(_dirichlet_multinomial_prob_note)
def _log_prob(self, counts):
counts = self._assert_valid_counts(counts)
ordered_prob = (special_math_ops.lbeta(self.alpha + counts) -
special_math_ops.lbeta(self.alpha))
log_prob = ordered_prob + distribution_util.log_combinations(
self.n, counts)
return log_prob
@distribution_util.AppendDocstring(_dirichlet_multinomial_prob_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _mean(self):
normalized_alpha = self.alpha / array_ops.expand_dims(self.alpha_sum, -1)
return array_ops.expand_dims(self.n, -1) * normalized_alpha
@distribution_util.AppendDocstring(
"""The variance for each batch member is defined as the following:
```
Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) *
(n + alpha_0) / (1 + alpha_0)
```
where `alpha_0 = sum_j alpha_j`.
The covariance between elements in a batch is defined as:
```
Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 *
(n + alpha_0) / (1 + alpha_0)
```
""")
def _variance(self):
alpha_sum = array_ops.expand_dims(self.alpha_sum, -1)
normalized_alpha = self.alpha / alpha_sum
variance = -math_ops.matmul(
array_ops.expand_dims(normalized_alpha, -1),
array_ops.expand_dims(normalized_alpha, -2))
variance = array_ops.matrix_set_diag(variance, normalized_alpha *
(1. - normalized_alpha))
shared_factor = (self.n * (alpha_sum + self.n) /
(alpha_sum + 1) * array_ops.ones_like(self.alpha))
variance *= array_ops.expand_dims(shared_factor, -1)
return variance
def _assert_valid_counts(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
counts = ops.convert_to_tensor(counts, name="counts")
if not self.validate_args:
return counts
candidate_n = math_ops.reduce_sum(counts, reduction_indices=[-1])
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(counts),
check_ops.assert_equal(
self._n, candidate_n,
message="counts do not sum to n"),
distribution_util.assert_integer_form(counts)], counts)
def _assert_valid_alpha(self, alpha, validate_args):
alpha = ops.convert_to_tensor(alpha, name="alpha")
if not validate_args:
return alpha
return control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(alpha, 1),
check_ops.assert_positive(alpha)], alpha)
def _assert_valid_n(self, n, validate_args):
n = ops.convert_to_tensor(n, name="n")
if not validate_args:
return n
return control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(n),
distribution_util.assert_integer_form(n)], n)
|
|
"""
pghoard
Copyright (c) 2015 Ohmu Ltd
See LICENSE for details
"""
import datetime
import hashlib
import json
import multiprocessing
import multiprocessing.pool
import os
import shutil
import time
import unittest
from tempfile import mkdtemp
from unittest.mock import MagicMock, Mock, patch
import pytest
from pghoard.common import write_json_file
from pghoard.restore import (BasebackupFetcher, ChunkFetcher, Restore, RestoreError, create_recovery_conf)
from .base import PGHoardTestCase
class TestRecoveryConf(PGHoardTestCase):
def test_recovery_targets(self, tmpdir):
config_file = tmpdir.join("conf.json").strpath
# Instantiate a fake PG data directory
pg_data_directory = os.path.join(str(self.temp_dir), "PG_DATA_DIRECTORY")
os.makedirs(pg_data_directory)
open(os.path.join(pg_data_directory, "PG_VERSION"), "w").write("9.6")
write_json_file(config_file, {"backup_sites": {"test": {"pg_data_directory": pg_data_directory}}})
r = Restore()
r._get_object_storage = Mock() # pylint: disable=protected-access
with pytest.raises(RestoreError) as excinfo:
r.run(
args=[
"get-basebackup",
"--config",
config_file,
"--target-dir",
tmpdir.strpath,
"--site=test",
"--recovery-target-action=promote",
"--recovery-target-name=foobar",
"--recovery-target-xid=42",
]
)
assert "at most one" in str(excinfo.value)
with pytest.raises(RestoreError) as excinfo:
r.run(
args=[
"get-basebackup",
"--config",
config_file,
"--target-dir",
tmpdir.strpath,
"--site=test",
"--recovery-target-action=promote",
"--recovery-target-time=foobar",
]
)
assert "recovery_target_time 'foobar'" in str(excinfo.value)
def test_find_nearest_backup(self):
r = Restore()
r.storage = Mock()
basebackups = [
{
"name": "2015-02-12_0",
"size": 42,
"metadata": {
"start-time": "2015-02-12T14:07:19+00:00"
},
},
{
"name": "2015-02-13_0",
"size": 42 * 1024 * 1024,
"metadata": {
"start-time": "2015-02-13T14:07:19+00:00"
},
},
]
r.storage.list_basebackups = Mock(return_value=basebackups)
assert r._find_nearest_basebackup()["name"] == "2015-02-13_0" # pylint: disable=protected-access
recovery_time = datetime.datetime(2015, 2, 1)
recovery_time = recovery_time.replace(tzinfo=datetime.timezone.utc)
with pytest.raises(RestoreError):
r._find_nearest_basebackup(recovery_time) # pylint: disable=protected-access
recovery_time = datetime.datetime(2015, 2, 12, 14, 20)
recovery_time = recovery_time.replace(tzinfo=datetime.timezone.utc)
assert r._find_nearest_basebackup(recovery_time)["name"] == "2015-02-12_0" # pylint: disable=protected-access
def test_create_recovery_conf(self):
td = self.temp_dir
fn = os.path.join(td, "recovery.conf")
with open(os.path.join(td, "PG_VERSION"), "w") as fp:
fp.write("9.6")
def getdata():
with open(fn, "r") as fp:
return fp.read()
assert not os.path.exists(fn)
create_recovery_conf(td, "dummysite")
assert "primary_conninfo" not in getdata()
create_recovery_conf(td, "dummysite", primary_conninfo="")
assert "primary_conninfo" not in getdata()
create_recovery_conf(td, "dummysite", primary_conninfo="dbname='test'")
assert "primary_conninfo" in getdata() # make sure it's there
assert "''test''" in getdata() # make sure it's quoted
assert "standby_mode = 'on'" in getdata()
content = create_recovery_conf(td, "dummysite", primary_conninfo="dbname='test'", restore_to_master=True)
assert "primary_conninfo" in content
assert "standby_mode = 'on'" not in content
content = create_recovery_conf(
td, "dummysite", recovery_end_command="echo 'done' > /tmp/done", recovery_target_xid="42"
)
assert content == getdata()
assert "primary_conninfo" not in content
assert "recovery_end_command = 'echo ''done'' > /tmp/done'" in content
# NOTE: multiple recovery targets don't really make sense in
# recovery.conf: PostgreSQL just uses the last entry.
# create_recovery_conf doesn't check them as it's called late enough
# for that check to be useless. Let's just make sure we can write
# lines for all of them.
now = datetime.datetime.now()
content = create_recovery_conf(
td,
"dummysite",
recovery_end_command="/bin/false",
recovery_target_action="shutdown",
recovery_target_name="testpoint",
recovery_target_time=now,
recovery_target_xid="42"
)
assert "recovery_target_action" in content
assert "recovery_target_name" in content
assert "recovery_target_time" in content
assert "recovery_target_xid" in content
assert str(now) in content
with open(os.path.join(td, "PG_VERSION"), "w") as fp:
fp.write("9.3")
content = create_recovery_conf(td, "dummysite", recovery_target_action="pause", recovery_target_xid="42")
assert "pause_at_recovery_target" in content
content = create_recovery_conf(td, "dummysite", recovery_target_action="promote", recovery_target_xid="42")
assert "pause_at_recovery_target" not in content
class TestBasebackupFetcher(unittest.TestCase):
def test_progress_tracking_and_error_handling(self):
config = {"restore_process_count": 4}
site = "some-site"
test_output_file_tmp = mkdtemp(suffix="pghoard-test")
status_output_file = os.path.join(test_output_file_tmp, "pghoard-restore-status.json")
pgdata = "/tmp/test_restore"
tablespaces = {"foo": {"oid": 1234, "path": "/tmp/test_restore2"}}
data_files = [("bar1", 1000), ("bar2", 2000), ((b"baz", {}), 0)]
fetcher = BasebackupFetcher(
app_config=config,
data_files=data_files,
debug=True,
status_output_file=status_output_file,
pgdata=pgdata,
site=site,
tablespaces=tablespaces
)
manager, pool, manager_enter = MagicMock(), MagicMock(), MagicMock()
fetcher.manager_class = lambda: manager
def pool_creator(processes=None):
assert processes == 3
return pool
fetcher.pool_class = pool_creator
progress_dict = dict(bar1=0, bar2=0)
manager.__enter__.return_value = manager_enter
manager_enter.dict.return_value = progress_dict
call = [0]
def check_status_output_file(*, expected_progress):
with open(status_output_file) as status_file:
progress_info = json.load(status_file)
assert progress_info["progress_percent"] == expected_progress
def sleep_mock(sleep_time):
assert sleep_time == 1
if call[0] == 0:
check_status_output_file(expected_progress=0)
assert fetcher.current_progress() == (0, 0)
assert fetcher.jobs_in_progress() is True
progress_dict["bar1"] = 1000
fetcher.job_completed(fetcher.data_files[0]["id"])
elif call[0] == 1:
assert fetcher.current_progress() == (1000, 1000 / 3000)
assert fetcher.jobs_in_progress() is True
progress_dict["bar2"] = 1000
fetcher.job_failed(fetcher.data_files[1]["id"], Exception("test exception"))
check_status_output_file(expected_progress=1000 / 3000)
elif call[0] == 2:
assert fetcher.current_progress() == (2000, 2000 / 3000)
assert fetcher.jobs_in_progress() is True
fetcher.job_completed(fetcher.data_files[2]["id"])
check_status_output_file(expected_progress=2000 / 3000)
elif call[0] == 3:
assert False
call[0] += 1
fetcher.sleep_fn = sleep_mock
with self.assertRaises(RestoreError) as context:
fetcher.fetch_all()
assert str(context.exception) == "Backup download/extraction failed with 1 errors"
manager_enter.dict.assert_called_with([["bar1", 0], ["bar2", 0]])
shutil.rmtree(test_output_file_tmp)
# Runs actual sub processes to decrypt and decompress basebackup chunks
def test_real_processing(self):
for tar in ["tar", "pghoard/gnutaremu.py"]:
self.run_restore_test("basebackup", tar, self.real_processing)
def real_processing(self, fetcher, restore_dir):
assert fetcher.pool_class == multiprocessing.Pool # pylint: disable=comparison-with-callable
fetcher.fetch_all()
self.check_sha256(
os.path.join(restore_dir, "base", "1", "2996"),
"214967296374cae6f099e19910b33a0893f0abc62f50601baa2875ab055cd27b"
)
self.check_sha256(
os.path.join(restore_dir, "base", "1", "3381_vm"),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
self.check_sha256(
os.path.join(restore_dir, "base", "1", "3599"),
"58571c0ad459c3be4da0fddbf814b0269be1197eebac43816b0e58da43fe3639"
)
self.check_sha256(
os.path.join(restore_dir, "base", "1", "3608"),
"cd461a152a9259c2d311ee348a4fa6722c119c1ff9a5b3147a86058d76f9bba8"
)
self.check_sha256(
os.path.join(restore_dir, "base", "1", "6104"),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
self.check_sha256(
os.path.join(restore_dir, "pg_notify", "0000"),
"9f1dcbc35c350d6027f98be0f5c8b43b42ca52b7604459c0c42be3aa88913d47"
)
def test_real_processing_with_threading(self):
for tar in ["tar", "pghoard/gnutaremu.py"]:
self.run_restore_test("basebackup", tar, self.real_processing_with_threading, files=["00000001.pghoard"])
def real_processing_with_threading(self, fetcher, restore_dir):
assert fetcher.pool_class == multiprocessing.pool.ThreadPool
fetcher.fetch_all()
self.check_sha256(
os.path.join(restore_dir, "pg_notify", "0000"),
"9f1dcbc35c350d6027f98be0f5c8b43b42ca52b7604459c0c42be3aa88913d47"
)
def test_real_processing_with_threading_retries_on_timeout(self):
for tar in ["tar", "pghoard/gnutaremu.py"]:
self.run_restore_test(
"basebackup",
tar,
lambda fetcher,
rd: self.real_processing_with_threading_retries_on_timeout(fetcher, rd, 2),
files=["00000001.pghoard"],
)
def test_real_processing_with_threading_retries_on_timeout_fails_after_3(self):
for tar in ["tar", "pghoard/gnutaremu.py"]:
self.run_restore_test(
"basebackup",
tar,
lambda fetcher,
rd: self.real_processing_with_threading_retries_on_timeout(fetcher, rd, 3),
files=["00000001.pghoard"],
)
def real_processing_with_threading_retries_on_timeout(self, fetcher, restore_dir, max_fails):
fail_counter = [0]
class FailingChunkFetcher(ChunkFetcher):
def _fetch_and_extract_one_backup(self, metadata, file_size, fetch_fn):
super()._fetch_and_extract_one_backup(metadata, file_size, fetch_fn)
fail_counter[0] += 1
if fail_counter[0] <= max_fails:
# Corrupt the file to test that retrying failed basebackup chunk yields sensible results
with open(os.path.join(restore_dir, "pg_notify", "0000"), "w") as f:
f.write("foo")
time.sleep(4)
fetcher.max_stale_seconds = 2
with patch("pghoard.restore.ChunkFetcher", new=FailingChunkFetcher):
if max_fails <= 2:
fetcher.fetch_all()
self.check_sha256(
os.path.join(restore_dir, "pg_notify", "0000"),
"9f1dcbc35c350d6027f98be0f5c8b43b42ca52b7604459c0c42be3aa88913d47"
)
else:
with pytest.raises(RestoreError):
fetcher.fetch_all()
def test_tablespaces(self):
def rm_tablespace_paths():
shutil.rmtree("/tmp/nsd5b2b8e4978847ef9b3056b7e01c51a8", ignore_errors=True)
shutil.rmtree("/tmp/ns5252b4c03072434691a11a5795b39477", ignore_errors=True)
rm_tablespace_paths()
tablespaces = {
"nstest1": {
"path": "/tmp/nsd5b2b8e4978847ef9b3056b7e01c51a8",
"oid": 16395
},
"nstest2": {
"path": "/tmp/ns5252b4c03072434691a11a5795b39477",
"oid": 16396
}
}
for tar in ["tar", "pghoard/gnutaremu.py"]:
try:
self.run_restore_test("basebackup_with_ts", tar, self.tablespaces, tablespaces=tablespaces)
finally:
rm_tablespace_paths()
def tablespaces(self, fetcher, restore_dir):
fetcher.fetch_all()
assert not os.path.isdir(os.path.join(restore_dir, "pgdata"))
assert not os.path.isdir(os.path.join(restore_dir, "tablespaces"))
self.check_sha256(
"/tmp/ns5252b4c03072434691a11a5795b39477/PG_10_201707211/16384/16400",
"2d6ea9066c3efb3bb7e2938725e31d7f0e4c9b4ac3e30c3091c5b061d3650300"
)
assert os.path.islink(os.path.join(restore_dir, "pg_tblspc", "16396"))
self.check_sha256(
os.path.join(restore_dir, "pg_tblspc", "16396", "PG_10_201707211", "16384", "16400"),
"2d6ea9066c3efb3bb7e2938725e31d7f0e4c9b4ac3e30c3091c5b061d3650300"
)
self.check_sha256(
"/tmp/nsd5b2b8e4978847ef9b3056b7e01c51a8/PG_10_201707211/16384/16397",
"d5d418c8ebd66ca1f26bdda100195146801b9776a3325abc6c548df8696f2649"
)
assert os.path.islink(os.path.join(restore_dir, "pg_tblspc", "16395"))
self.check_sha256(
os.path.join(restore_dir, "pg_tblspc", "16395", "PG_10_201707211", "16384", "16397"),
"d5d418c8ebd66ca1f26bdda100195146801b9776a3325abc6c548df8696f2649"
)
self.check_sha256(
os.path.join(restore_dir, "base", "13968", "13811"),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
self.check_sha256(
os.path.join(restore_dir, "base", "13968", "2619_vm"),
"64e86044d11dc1e1a8a1e3481b7beb0850fdea6b26a749cb610ef85e0e4aa626"
)
self.check_sha256(
os.path.join(restore_dir, "base", "13968", "3440"),
"84e3bda6f1abdd0fb0aff4bc6587ea07b9d8b61c1a0d6bdc4d16d339a761717f"
)
def run_restore_test(self, path, tar_executable, logic, tablespaces=None, files=None):
chunk_dir = os.path.join("test", path, "chunks")
files = [fn for fn in os.listdir(chunk_dir) if ".metadata" not in fn and (not files or fn in files)]
files = [(fn, os.stat(os.path.join(chunk_dir, fn)).st_size) for fn in files]
with open(os.path.join("test", path, "config.json"), "r") as f:
config = json.loads(f.read())
restore_dir = mkdtemp(prefix=self.__class__.__name__)
scratch_dir = mkdtemp(prefix=self.__class__.__name__)
config["backup_location"] = scratch_dir
config["tar_executable"] = tar_executable
site = next(iter(config["backup_sites"]))
fetcher = BasebackupFetcher(
app_config=config, data_files=files, debug=True, pgdata=restore_dir, site=site, tablespaces=tablespaces or {}
)
try:
logic(fetcher, restore_dir)
finally:
shutil.rmtree(restore_dir)
shutil.rmtree(scratch_dir)
@classmethod
def check_sha256(cls, fn, expected_sha256):
actual_sha256 = hashlib.sha256()
with open(fn, "rb") as f:
actual_sha256.update(f.read())
assert actual_sha256.hexdigest() == expected_sha256
|
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import os
import pickle
from datetime import datetime, timedelta
from pyface.action.menu_manager import MenuManager
from traits.api import HasTraits, Str, Int, Any, on_trait_change, List, Event, Button, Date
from traitsui.api import View, UItem, Item, HGroup, VGroup, EnumEditor, spring
from traitsui.editors import DateEditor
from traitsui.handler import Controller, Handler
from traitsui.menu import Action
from traitsui.tabular_adapter import TabularAdapter
from pychron.core.progress import progress_loader
from pychron.core.ui.tabular_editor import myTabularEditor
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.paths import paths
from pychron.pychron_constants import NULL_STR
class TimeViewAdapter(TabularAdapter):
columns = [('Date', 'rundate'),
('RunID', 'record_id'),
('Type', 'analysis_type'),
('Sample', 'sample'),
('Spectrometer', 'mass_spectrometer'),
('Project', 'project'),
('Irrad.', 'irradiation_info'),
('Device', 'extract_device')]
record_id_width = Int(80)
analysis_type_width = Int(80)
sample_width = Int(95)
project_width = Int(95)
rundate_width = Int(95)
irradiation_info_width = Int(60)
mass_spectrometer_width = Int(80)
extract_device_width = Int(95)
font = 'Helvetica 9'
def get_menu(self, obj, trait, row, column):
if obj.context_menu_enabled:
e = obj.append_replace_enabled
actions = [Action(name='Unselect', action='unselect_analyses'),
Action(name='Replace', action='replace_items', enabled=e),
Action(name='Append', action='append_items', enabled=e),
Action(name='Open', action='recall_items'),
Action(name='Open Copy', action='recall_copies')]
return MenuManager(*actions)
class TVHandler(Handler):
def recall_copies(self, info, obj):
if obj.selected:
obj.context_menu_event = ('open', {'open_copy': True})
def recall_items(self, info, obj):
if obj.selected:
obj.context_menu_event = ('open', {'open_copy': False})
def unselect_analyses(self, info, obj):
obj.selected = []
def replace_items(self, info, obj):
if obj.selected:
obj.context_menu_event = ('replace', None)
def append_items(self, info, obj):
if obj.selected:
obj.context_menu_event = ('append', None)
ATimeView = View(VGroup(icon_button_editor('clear_filter_button', 'clear'),
HGroup(UItem('help_str', style='readonly'), label='Help', show_border=True),
VGroup(
HGroup(UItem('mass_spectrometer', editor=EnumEditor(name='available_mass_spectrometers')),
UItem('analysis_type', editor=EnumEditor(name='available_analysis_types')),
UItem('extract_device', editor=EnumEditor(name='available_extract_devices'))),
HGroup(Item('lowdays', label='Greater Than'),
UItem('lowdate', editor=DateEditor(strftime='%m/%d/%Y'),
style='readonly'),
spring,
Item('highdays', label='Less Than'),
UItem('highdate', editor=DateEditor(strftime='%m/%d/%Y'),
style='readonly'),
spring,
Item('limit')),
label='Filter', show_border=True),
UItem('analyses', editor=myTabularEditor(adapter=TimeViewAdapter(),
column_clicked='column_clicked',
selected='selected',
multi_select=True,
refresh='refresh_table_needed',
dclicked='dclicked',
editable=False))))
class TimeViewModel(HasTraits):
db = Any
oanalyses = List
analyses = List
column_clicked = Event
dclicked = Event
selected = List
refresh_table_needed = Event
clear_filter_button = Button
help_str = 'Select an analysis. Click on the column label to filter results by the selected value'
mass_spectrometer = Str
analysis_type = Str
extract_device = Str
available_mass_spectrometers = List
available_analysis_types = List([NULL_STR, 'Unknown', 'Blank', 'Air', 'Cocktail'])
available_extract_devices = List
highdays = Int(0, enter_set=True, auto_set=False)
lowdays = Int(30, enter_set=True, auto_set=False)
lowdate = Date
highdate = Date
limit = Int(500)
# days_spacer = Int(10000)
_suppress_load_analyses = False
context_menu_event = Event
context_menu_enabled = True
append_replace_enabled = True
_active_column = None
def get_analysis_records(self):
if self.selected:
return self.selected
else:
return self.analyses
@on_trait_change('mass_spectrometer, analysis_type, extract_device, lowdate, highdate, limit')
def _handle_filter(self):
ms = self.mass_spectrometer
at = self.analysis_type
ed = self.extract_device
self._load_analyses(mass_spectrometer=ms, analysis_type=at, extract_device=ed)
def _clear_filter_button_fired(self):
self.analyses = self.oanalyses
self.refresh_table_needed = True
def _column_clicked_changed(self, event):
if event and self.selected:
if self._active_column == event.column:
self._active_column = None
self.analyses = self.oanalyses
else:
self._active_column = event.column
name, field = event.editor.adapter.columns[event.column]
sattrs = {getattr(s, field) for s in self.selected}
self.analyses = [ai for ai in self.analyses if getattr(ai, field) in sattrs]
self.refresh_table_needed = True
def _highdays_changed(self):
self.highdate = datetime.now().date() - timedelta(days=self.highdays)
def _lowdays_changed(self):
self.lowdate = datetime.now().date() - timedelta(days=self.lowdays)
def dump_filter(self):
p = os.path.join(paths.hidden_dir, 'time_view.p')
with open(p, 'wb') as wfile:
obj = {k: getattr(self, k) for k in
('mass_spectrometer', 'analysis_type', 'extract_device',
'lowdays', 'highdays', 'limit')}
pickle.dump(obj, wfile)
def load_filter(self):
p = os.path.join(paths.hidden_dir, 'time_view.p')
if os.path.isfile(p):
with open(p, 'rb') as rfile:
obj = pickle.load(rfile)
self._suppress_load_analyses = True
self.trait_set(**obj)
self._suppress_load_analyses = False
self._handle_filter()
return True
def load(self):
"""
get a set of analyses from the database.
load the available mass spectrometers, analysis_types
:return:
"""
self._load_available()
self._suppress_load_analyses = True
self._highdays_changed()
self._lowdays_changed()
self._suppress_load_analyses = False
if not self.load_filter():
self._load_analyses()
def _load_available(self):
db = self.db
for attr in ('mass_spectrometer', 'extract_device'):
func = getattr(db, 'get_{}s'.format(attr))
ms = func()
ms.sort()
setattr(self, 'available_{}s'.format(attr), [NULL_STR] + [mi.name for mi in ms])
def _load_analyses(self, mass_spectrometer=None, analysis_type=None, extract_device=None):
if self._suppress_load_analyses:
return
db = self.db
ma = self.highdate
mi = self.lowdate
if analysis_type == NULL_STR:
analysis_type = None
if mass_spectrometer == NULL_STR:
mass_spectrometer = None
if extract_device == NULL_STR:
extract_device = None
ans = db.get_analyses_by_date_range(mi, ma,
mass_spectrometers=mass_spectrometer,
analysis_types=analysis_type,
extract_devices=extract_device,
limit=self.limit, order='desc')
self.oanalyses = self._make_records(ans)
self.analyses = self.oanalyses[:]
def _make_records(self, ans):
def func(xi, prog, i, n):
if prog:
prog.change_message('Loading {}'.format(xi.record_id))
return xi
return progress_loader(ans, func, threshold=25)
def traits_view(self):
v = ATimeView
v.handler = TVHandler()
return v
class TimeView(Controller):
model = TimeViewModel
def closed(self, info, is_ok):
if is_ok:
self.model.dump_filter()
def traits_view(self):
v = ATimeView
v.trait_set(resizable=True,
width=900,
height=500,
title='Analysis Time View')
return v
# ============= EOF =============================================
|
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
import pytest
import sys
from azure.core.exceptions import HttpResponseError, DecodeError, ResourceExistsError
from azure.storage.queue import (
QueueClient,
QueueServiceClient,
TextBase64EncodePolicy,
TextBase64DecodePolicy,
BinaryBase64EncodePolicy,
BinaryBase64DecodePolicy
)
from azure.storage.queue._message_encoding import NoEncodePolicy, NoDecodePolicy
from settings.testcase import QueuePreparer
from devtools_testutils.storage import StorageTestCase
# ------------------------------------------------------------------------------
TEST_QUEUE_PREFIX = 'mytestqueue'
# ------------------------------------------------------------------------------
class StorageQueueEncodingTest(StorageTestCase):
# --Helpers-----------------------------------------------------------------
def _get_queue_reference(self, qsc, prefix=TEST_QUEUE_PREFIX):
queue_name = self.get_resource_name(prefix)
queue = qsc.get_queue_client(queue_name)
return queue
def _create_queue(self, qsc, prefix=TEST_QUEUE_PREFIX):
queue = self._get_queue_reference(qsc, prefix)
try:
created = queue.create_queue()
except ResourceExistsError:
pass
return queue
def _validate_encoding(self, queue, message):
# Arrange
try:
created = queue.create_queue()
except ResourceExistsError:
pass
# Action.
queue.send_message(message)
# Asserts
dequeued = next(queue.receive_messages())
self.assertEqual(message, dequeued.content)
# --------------------------------------------------------------------------
@QueuePreparer()
def test_message_text_xml(self, storage_account_name, storage_account_key):
# Arrange.
qsc = QueueServiceClient(self.account_url(storage_account_name, "queue"), storage_account_key)
message = u'<message1>'
queue = qsc.get_queue_client(self.get_resource_name(TEST_QUEUE_PREFIX))
# Asserts
assert isinstance(queue._config.message_encode_policy, NoEncodePolicy)
assert isinstance(queue._config.message_decode_policy, NoDecodePolicy)
self._validate_encoding(queue, message)
@QueuePreparer()
def test_message_text_xml_whitespace(self, storage_account_name, storage_account_key):
# Arrange.
qsc = QueueServiceClient(self.account_url(storage_account_name, "queue"), storage_account_key)
message = u' mess\t age1\n'
queue = qsc.get_queue_client(self.get_resource_name(TEST_QUEUE_PREFIX))
# Asserts
self._validate_encoding(queue, message)
@QueuePreparer()
def test_message_text_xml_invalid_chars(self, storage_account_name, storage_account_key):
# Action.
qsc = QueueServiceClient(self.account_url(storage_account_name, "queue"), storage_account_key)
queue = self._get_queue_reference(qsc)
message = u'\u0001'
# Asserts
with self.assertRaises(HttpResponseError):
queue.send_message(message)
@QueuePreparer()
def test_message_text_base64(self, storage_account_name, storage_account_key):
# Arrange.
qsc = QueueServiceClient(self.account_url(storage_account_name, "queue"), storage_account_key)
queue = QueueClient(
account_url=self.account_url(storage_account_name, "queue"),
queue_name=self.get_resource_name(TEST_QUEUE_PREFIX),
credential=storage_account_key,
message_encode_policy=TextBase64EncodePolicy(),
message_decode_policy=TextBase64DecodePolicy())
message = u'\u0001'
# Asserts
self._validate_encoding(queue, message)
@QueuePreparer()
def test_message_bytes_base64(self, storage_account_name, storage_account_key):
# Arrange.
qsc = QueueServiceClient(self.account_url(storage_account_name, "queue"), storage_account_key)
queue = QueueClient(
account_url=self.account_url(storage_account_name, "queue"),
queue_name=self.get_resource_name(TEST_QUEUE_PREFIX),
credential=storage_account_key,
message_encode_policy=BinaryBase64EncodePolicy(),
message_decode_policy=BinaryBase64DecodePolicy())
message = b'xyz'
# Asserts
self._validate_encoding(queue, message)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="Not applicable on Python 2.7")
@QueuePreparer()
def test_message_bytes_fails(self, storage_account_name, storage_account_key):
# Arrange
qsc = QueueServiceClient(self.account_url(storage_account_name, "queue"), storage_account_key)
queue = qsc.get_queue_client(self.get_resource_name(TEST_QUEUE_PREFIX))
queue.create_queue()
# Action.
with self.assertRaises(TypeError) as e:
message = b'xyz'
queue.send_message(message)
# Asserts
self.assertTrue(str(e.exception).startswith('Message content must not be bytes. Use the BinaryBase64EncodePolicy to send bytes.'))
@QueuePreparer()
def test_message_text_fails(self, storage_account_name, storage_account_key):
# Arrange
qsc = QueueServiceClient(self.account_url(storage_account_name, "queue"), storage_account_key)
queue = QueueClient(
account_url=self.account_url(storage_account_name, "queue"),
queue_name=self.get_resource_name(TEST_QUEUE_PREFIX),
credential=storage_account_key,
message_encode_policy=BinaryBase64EncodePolicy(),
message_decode_policy=BinaryBase64DecodePolicy())
# Action.
with self.assertRaises(TypeError) as e:
message = u'xyz'
queue.send_message(message)
# Asserts
self.assertTrue(str(e.exception).startswith('Message content must be bytes'))
@QueuePreparer()
def test_message_base64_decode_fails(self, storage_account_name, storage_account_key):
# Arrange
qsc = QueueServiceClient(self.account_url(storage_account_name, "queue"), storage_account_key)
queue = QueueClient(
account_url=self.account_url(storage_account_name, "queue"),
queue_name=self.get_resource_name(TEST_QUEUE_PREFIX),
credential=storage_account_key,
message_encode_policy=None,
message_decode_policy=BinaryBase64DecodePolicy())
try:
queue.create_queue()
except ResourceExistsError:
pass
message = u'xyz'
queue.send_message(message)
# Action.
with self.assertRaises(DecodeError) as e:
queue.peek_messages()
# Asserts
self.assertNotEqual(-1, str(e.exception).find('Message content is not valid base 64'))
def test_message_no_encoding(self):
# Arrange
queue = QueueClient(
account_url="https://account.queue.core.windows.net",
queue_name="queue",
credential="account_key",
message_encode_policy=None,
message_decode_policy=None)
# Asserts
assert isinstance(queue._config.message_encode_policy, NoEncodePolicy)
assert isinstance(queue._config.message_decode_policy, NoDecodePolicy)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#----------------------------------------------------------------------
# Copyright (c) 2011-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
""" Acceptance tests for AM API v1, v2, and v3."""
import copy
import datetime
import dateutil.parser
import json
import os
import pprint
import re
import sys
import time
import tempfile
import unittest
import xml.etree.ElementTree as etree
from gcf.geni.util import rspec_util
from gcf.geni.util.rspec_schema import *
from gcf.geni.util import urn_util
from gcf.geni.util import error_util
import gcf.oscript as omni
import omni_unittest as ut
from omni_unittest import NotSuccessError, NotDictAssertionError, NotNoneAssertionError
from omni_unittest import NotXMLAssertionError, NoResourcesAssertionError, WrongRspecType
from gcf.omnilib.util import OmniError, NoSliceCredError, RefusedError, AMAPIError
import gcf.omnilib.util.json_encoding as json_encoding
import gcf.omnilib.util.credparsing as credparsing
import am_api_accept as accept
# Works at PLC
PGV2_RSPEC_NAME = "ProtoGENI"
PGV2_RSPEC_NUM = '2'
RSPEC_NAME = "GENI"
RSPEC_NUM = '3'
TMP_DIR="."
REQ_RSPEC_FILE="request.xml"
REQ_RSPEC_FILE_1="request1.xml"
REQ_RSPEC_FILE_2="request2.xml"
REQ_RSPEC_FILE_3="request3.xml"
BAD_RSPEC_FILE="bad.xml"
SLEEP_TIME=30 # Pause between AM API calls in seconds
SUCCESS = 0
################################################################################
#
# Test AM API calls for accurate and complete functionality.
#
# This script relies on the unittest module.
#
# To run:
# am_api_accept_scaling.py -a eg-bbn -V 2 --rspec-file twoegvmsoneline.rspec --un-bound ScalingTest.test_CreateSliverWorkflow_scalingTest
#
# To add a new test:
# Create a new method with a name starting with 'test_". It will
# automatically be run when am_api_accept.py is called.
#
################################################################################
NUM_SLEEP = 12
MAX_TIME_TO_CREATESLIVER = 3*60 # 3 minutes
NUM_SLICES = 3 # number of slices to create
DEFAULT_SLICE_NAME = "scale" # eg scale01, scale02, etc
class ScalingTest(accept.Test):
def test_CreateSliverWorkflow_scalingTest(self):
"""test_CreateSliverWorkflow_ScalingTest: Do CreateSliver workflow with multiple slices"""
self.logger.info("\n=== Test.test_CreateSliverWorkflow_scalingTest ===")
if self.options_copy.rspeclint:
rspec_util.rspeclint_exists()
rspec_namespace = self.manifest_namespace
rspec_schema = self.manifest_schema
request = []
numslivers = []
manifest = []
slivers = []
manifest2 = []
slicenames = []
num_slices = self.options_copy.num_slices
# make always --un-bound (since this test assumes that you can
# offer the same rspec to an aggregate multiple times)
self.options_copy.bound = False
for i in xrange(num_slices):
slicenames.append("")
# slicenames[i] = self.create_slice_name()+str(i)
slicenames[i] = self.options_copy.slice_name+str(i)
for i in xrange(num_slices):
# if reusing a slice name, don't create (or delete) the slice
self.subtest_createslice( slicenames[i] )
print "%d: CreateSlice [%s] completed..."%(i, slicenames[i])
time.sleep(self.options_copy.sleep_time)
# in case some slivers were left laying around from last
# time, try to delete them now
for i in xrange(num_slices):
try:
self.subtest_generic_DeleteSliver( slicenames[i] )
time.sleep(self.options_copy.sleep_time)
except:
pass
try:
for i in xrange(num_slices):
# Check for the existance of the Request RSpec file
# self.assertTrue( os.path.exists(self.options_copy.rspec_file_list[i]),
self.assertTrue( os.path.exists(self.options_copy.rspec_file),
"Request RSpec file, '%s' for 'CreateSliver' call " \
"expected to exist " \
"but does not."
# % self.options_copy.rspec_file_list[i] )
% self.options_copy.rspec_file )
# with open(self.options_copy.rspec_file_list[i]) as f:
with open(self.options_copy.rspec_file) as f:
request.append("")
request[i] = "".join(f.readlines())
numslivers.append(-1)
manifest.append("")
slivers.append("")
# self.options_copy.rspec_file = self.options_copy.rspec_file_list[i]
time.sleep(self.options_copy.sleep_time)
# # False args mean in v3+, don't do Provision or POA
# createReturn = self.subtest_generic_CreateSliver( slicenames[i], False, False )
sliceExpiration = self.getSliceExpiration( slicenames[i] )
createReturn = self.subtest_generic_CreateSliver( slicenames[i], expectedExpiration=sliceExpiration )
print "%d: CreateSliver on slice [%s] completed..."%(i, slicenames[i])
numslivers[i], tmpManifest, slivers[i] = createReturn
manifest[i] = "".join(tmpManifest)
self.assertRspecType( "".join(request[i]), 'request')
self.assertRspecType( "".join(manifest[i]), 'manifest')
# manifest should be valid XML
self.assertIsXML( manifest[i],
"Manifest RSpec returned by 'CreateSliver' on slice '%s' " \
"expected to be wellformed XML file " \
"but was not. Return was: " \
"\n%s\n" \
"... edited for length ..."
% (slicenames[i], manifest[i][:100]))
if self.options_copy.rspeclint:
self.assertTrue(rspec_util.validate_rspec( manifest[i],
namespace=rspec_namespace,
schema=rspec_schema ),
"Return from 'CreateSliver' " \
"expected to pass rspeclint " \
"but did not. Return was: " \
"\n%s\n" \
"... edited for length ..."
% (manifest[i][:100]))
# Make sure the Manifest returned the nodes identified
# in the Request
if rspec_util.has_child_node( manifest[i], self.RSpecVersion()):
if self.options_copy.bound:
self.assertCompIDsEqual( "".join(request[i]),
"".join(manifest[i]),
self.RSpecVersion(),
"Request RSpec and Manifest RSpec " \
"returned by 'ListResources' on slice '%s' " \
"expected to have same component_ids " \
"but did not." % slicenames[i])
self.assertClientIDsEqual( "".join(request[i]),
"".join(manifest[i]),
self.RSpecVersion(),
"Request RSpec and Manifest RSpec " \
"returned by 'ListResources' on slice '%s' " \
"expected to have same client_ids " \
"but did not." % slicenames[i])
else:
# the top level node should have a child
self.assertResourcesExist( "".join(manifest[i]),
"Manifest RSpec returned by 'CreateSliver' on slice '%s' " \
"expected to NOT be empty " \
"but was. Return was: " \
"\n%s\n"
% (slicenames[i], "".join(manifest[i])))
# Separate for loop here guarantees time has passed on each AM since createsliver call
self.subtest_SliverStatus_scaling(slicenames)
except:
raise
finally:
time.sleep(self.options_copy.sleep_time)
for i in xrange(num_slices):
try:
self.subtest_generic_DeleteSliver( slicenames[i] )
print "%d: DeleteSliver on slice [%s] completed..."%(i, slicenames[i])
except:
pass
self.success = True
def subtest_SliverStatus_scaling(self, slicenames):
num_slices = len(slicenames)
have_slept = 0
long_sleep = max( 5, self.options_copy.max_time / NUM_SLEEP )
short_sleep = 30
# before starting check if this is going to fail for unrecoverable reasons having nothing to do with being ready
# maybe get the slice credential
# self.subtest_generic_SliverStatus( slicename )
slices_to_test = set(range(num_slices))
status_ready = {}
while have_slept <= self.options_copy.max_time:
tmp_slices_to_test = copy.deepcopy(slices_to_test)
for i in tmp_slices_to_test:
status_ready[i] = False
try:
# checks geni_operational_status to see if ready
if self.options_copy.api_version >= 3:
geni_status = "geni_ready"
else:
geni_status = "ready"
self.subtest_generic_SliverStatus( slicenames[i], status=geni_status )
status_ready[i]=True
slices_to_test.remove( i )
except Exception, e:
self.logger.info("Waiting for SliverStatus to succeed and return status of '%s'" % geni_status)
self.logger.info("Exception raised: %s" % e)
self.logger.debug("===> Starting to sleep")
self.logger.debug("=== sleep %s seconds ==="%str(long_sleep))
time.sleep( short_sleep )
have_slept += short_sleep
time.sleep( long_sleep )
have_slept += long_sleep
self.logger.debug("<=== Finished sleeping")
for i in set(range(num_slices)):
if status_ready[i]:
print "%d: SliverStatus on slice [%s] completed with status READY"%(i, slicenames[i])
else:
print "%d: SliverStatus on slice [%s] completed WITHOUT status ready."%(i, slicenames[i])
print "%d: Consider setting --max-createsliver-time value to be greater than %s seconds."%(i, self.options_copy.max_time)
for i in set(range(num_slices)):
self.assertTrue( status_ready[i],
"SliverStatus on slice '%s' expected to be '%s' but was not" % (slicenames[i], geni_status))
@classmethod
def getParser( cls, parser=accept.Test.getParser(), usage=None):
parser.add_option( "--max-createsliver-time",
action="store", type='int',
default = MAX_TIME_TO_CREATESLIVER,
dest='max_time',
help="Max number of seconds will attempt to check status of a sliver before failing [default: %default]")
parser.add_option( "--num-slices",
action="store", type='int',
default=NUM_SLICES,
dest='num_slices',
help="Number of slices to create [default: %default]")
parser.add_option( "--slice-name",
action="store", type='string',
default=DEFAULT_SLICE_NAME,
dest='slice_name',
help="Use slice name as base of slice name [default: %default]")
return parser
@classmethod
def scaling_parser( cls, parser=None, usage=None):
if parser is None:
parser = cls.getParser()
argv = ScalingTest.unittest_parser(parser=parser, usage=usage)
return argv
if __name__ == '__main__':
usage = "\n %s -a am-undertest" \
"\n Also try --vv" % sys.argv[0]
argv = ScalingTest.scaling_parser(usage=usage)
unittest.main()
|
|
from pycocotools.coco import COCO
import numpy as np
import logging
import os
import sys
import cv2
import shutil
import json
"""
COCO SUPPORT
"""
def _mapSubject(annotation,mapping):
"""
map annotation category_id to a subject
:param mapping:
:param annotation:
:return:
@type mapping: dict
"""
return mapping[annotation['category_id']] if annotation['category_id'] in mapping else 'man-made object'
def createMaskImage(image_array, imagefilename, coco, lookup, subjectMapping={}, areaConstraint=(0,sys.maxint)):
"""
Given an image and its Coco data, pick a mask from the segmented image.
:param image
:param imageData:
:return:
@type imageData: dict
@type image_array: numpy.array
@type coco: COCO
@type lookup: dict
"""
def defaultMask(image):
h, w = image.size
real_mask = np.zeros((w, h), dtype=np.uint8)
real_mask[w / 4:3 * w / 4, h / 4:3 * h / 4] = 255
return 'other',real_mask
imgId = lookup[os.path.split(imagefilename)[1]]
imageData = coco.loadImgs(ids=[imgId])[0]
annIds = coco.getAnnIds(imgIds=[imgId])
annotations = coco.loadAnns(annIds)
logging.getLogger('maskgen').info('Processing image name: {}'.format(imagefilename))
image_width,image_height = image_array.shape[0],image_array.shape[1]
factor = float(imageData['width']) / image_width
valid_annotations = [annotation for annotation in annotations
if annotation['area'] * factor >= areaConstraint[0] and annotation['area'] * factor <= areaConstraint[1]]
if len(valid_annotations) > 0:
position = np.random.randint(0, len(valid_annotations))
annotation = annotations[position]
real_mask = coco.annToMask(annotation)
real_mask = real_mask.astype(np.uint8)
real_mask[real_mask>0] = 255
if real_mask.shape != (image_width, image_height):
real_mask = cv2.resize(real_mask,(image_height,image_width))
subject = _mapSubject(annotation,subjectMapping)
return subject,real_mask
return defaultMask(image)
# mask[real_mask > 0] = [color/65536,color%65536/256,color%256]
def loadCoco(annotationsFile):
return COCO(annotationsFile)
def createFileNameToIDLookup(coco,imgIds=[], catIds=[]):
"""
Create an index of file name to coco image ID
:param coco:
:return:
@type coco: COCO
"""
return { image_data['file_name']:image_data['id'] for image_data in coco.loadImgs(coco.getImgIds(imgIds=imgIds,catIds=catIds))}
def createMaskImageWithParams(image_array, imagefilename, params, areaConstraint=(0,sys.maxint)):
"""
Using parameters for the coco and coco.index as they would appear in the global state,
create mask using one of the select annotations.
@see createBatchProjectGlobalState.
:param image_array:
:param imagefilename:
:param params:
:param areaConstraint:
:return:
@type image_array: numpy.ndarray
@type params: dict
"""
if 'coco.annotations' in params:
annotationPath = params['coco.annotations']
if not os.path.exits(annotationPath):
logging.getLogger('maskgen').error(
'Cannot load COCO annotations. Annotation path set to coco.annotations is invalid.')
return None,None
coco = COCO(annotationPath)
else:
if 'coco' not in params:
logging.getLogger('maskgen').error('Cannot build mask. Missing parameter coco.')
return None,None
coco = params['coco']
index = params['coco.index'] if 'coco.index' in params else createFileNameToIDLookup(coco)
return createMaskImage(image_array, imagefilename, coco,index,areaConstraint=areaConstraint)
def createBatchProjectGlobalState(global_state):
"""
Check the global state for a batch project. Initialize coco and return additions to the global state if missing
:param global_state:
:return:
@type global_state: dict
"""
if 'coco.annotations' not in global_state:
logging.getLogger('maskgen').error('Cannot load COCO annotations. Missing parameter coco.annotations.')
return {}
annotationPath = global_state['coco.annotations']
if not os.path.exists(annotationPath):
logging.getLogger('maskgen').error('Cannot load COCO annotations. Annotation path set to coco.annotations is invalid.')
return {}
coco = loadCoco(annotationPath)
return {'coco' : coco, 'coco.index' : createFileNameToIDLookup(coco), 'coco.subject': {}}
def moveValidImages(image_dir,target_dir,annotationPath,areaConstraint=(0,sys.maxint),maxCount=None, newAnnotationPath=None):
"""
Move the images from the source folder to the target folder if they represent a valid
image that contains images that meet the area constraints.
Download the image from flickr if missing.
If image_dir and target_dir are the same, images that do not meet the criteria are removed.
:param image_dir:
:param target_dir:
:param annotationPath:
:param areaConstraint:
:param maxCount: maximum number of images to move/download
:param newAnnotationPath: if provided, save the annotations for the select images
:return:
"""
coco = COCO(annotationPath)
keep = []
annotations_to_keep = []
for imgId in coco.getImgIds():
if maxCount is not None and len(keep) >= maxCount:
break
if imgId not in coco.anns:
continue
#this download is broken...downloading invalid 500x500 images!
coco.download(tarDir=image_dir, imgIds=[imgId])
imageData = coco.loadImgs(ids=[imgId])[0]
target_file = os.path.join(target_dir,imageData['file_name'])
source_file = os.path.join(image_dir, imageData['file_name'])
if not os.path.exists(target_file):
logging.getLogger('maskgen').warn('File Not Found: {}'.format(imageData['file_name']))
else:
annotations = coco.loadAnns(ids=[imgId])
valid_annotations = [annotation for annotation in annotations
if annotation['area'] >= areaConstraint[0] and annotation['area'] <= areaConstraint[1]]
if len(valid_annotations) > 0:
if source_file != target_file:
shutil.move(source_file,target_file)
keep.append(imgId)
annotations_to_keep.extend(valid_annotations)
elif source_file == target_file:
os.remove(source_file)
if newAnnotationPath is not None:
dataset = {'info': coco.dataset['info'],
'images': coco.loadImgs(ids=keep),
'categories': coco.dataset['categories'],
'annotations': annotations_to_keep}
with open(newAnnotationPath, 'w') as f:
json.dump(dataset, f, indent=2, encoding='utf-8')
def createSubset(annotationPath,filename, areaConstraint=(0,sys.maxint),maxCount=None):
"""
Move the images from the source folder to the target folder if they represent a valid
image that contains images that meet the area constraints.
Download the image from flickr if missing.
If image_dir and target_dir are the same, images that do not meet the criteria are removed.
:param image_dir:
:param target_dir:
:param annotationPath:
:param areaConstraint:
:param maxCount: maximum number of images to move/download
:return:
"""
coco = COCO(annotationPath)
keep = []
annotations_to_keep = []
for imgId in coco.getImgIds():
if maxCount is not None and len(keep) >= maxCount:
break
if imgId not in coco.anns:
continue
annIds = coco.getAnnIds(imgIds=[imgId])
annotations = coco.loadAnns(ids=annIds)
valid_annotations = [annotation for annotation in annotations
if annotation['area'] >= areaConstraint[0] and annotation['area'] <= areaConstraint[1]]
if len(valid_annotations) > 0:
keep.append(imgId)
annotations_to_keep.extend(valid_annotations)
dataset = {'info':coco.dataset['info'],
'images':coco.loadImgs(ids=keep),
'categories':coco.dataset['categories'],
'annotations':annotations_to_keep}
with open(filename, 'w') as f:
json.dump(dataset, f, indent=2, encoding='utf-8')
def main(argv=None):
createSubset('/Users/ericrobertson/Downloads/annotations/instances_train2014.json',
'tests/other_plugins/CocoMaskSelector/annotations.json',
maxCount=30)
if __name__ == "__main__":
import sys
sys.exit(main())
|
|
#!/usr/bin/env python
"""Main apt-select script"""
import requests
import re
from sys import exit, stderr, version_info
from os import getcwd
from apt_select.arguments import get_args, DEFAULT_COUNTRY, SKIPPED_FILE_GENERATION
from apt_select.mirrors import Mirrors
from apt_select.apt import System, Sources, SourcesFileError
from apt_select.utils import DEFAULT_REQUEST_HEADERS
# Support input for Python 2 and 3
get_input = input
if version_info[:2] <= (2, 7):
get_input = raw_input
def set_args():
"""Set arguments, disallow bad combination"""
parser = get_args()
args = parser.parse_args()
# Convert status argument to format used by Launchpad
args.min_status = args.min_status.replace('-', ' ')
if not args.ping_only and (args.min_status != 'unknown'):
args.min_status = args.min_status.capitalize()
if args.choose and (not args.top_number or args.top_number < 2):
parser.print_usage()
exit((
"error: -c/--choose option requires -t/--top-number NUMBER "
"where NUMBER is greater than 1."
))
if not args.country:
stderr.write('WARNING: no country code provided. defaulting to US.\n')
args.country = DEFAULT_COUNTRY
elif not re.match(r'^[a-zA-Z]{2}$', args.country):
exit((
"Invalid country. %s is not in ISO 3166-1 alpha-2 "
"format" % args.country
))
return args
def get_mirrors(mirrors_url, country):
"""Fetch list of Ubuntu mirrors"""
stderr.write("Getting list of mirrors...")
response = requests.get(mirrors_url, headers=DEFAULT_REQUEST_HEADERS)
if response.status_code == requests.codes.NOT_FOUND:
exit(
"The mirror list for country: %s was not found at %s" % (
country, mirrors_url
)
)
stderr.write("done.\n")
return response.text.splitlines()
def print_status(info, rank):
"""Print full mirror status report for ranked item"""
for key in ("Org", "Speed"):
info.setdefault(key, "N/A")
print((
"%(rank)d. %(mirror)s\n"
"%(tab)sLatency: %(ms).2f ms\n"
"%(tab)sOrg: %(org)s\n"
"%(tab)sStatus: %(status)s\n"
"%(tab)sSpeed: %(speed)s" % {
'tab': ' ',
'rank': rank ,
'mirror': info['Host'],
'ms': info['Latency'],
'org': info['Organisation'],
'status': info['Status'],
'speed': info['Speed']
}
))
def print_latency(info, rank, max_host_len):
"""Print latency information for mirror in ranked report"""
print("%(rank)d. %(mirror)s: %(padding)s%(ms).2f ms" % {
'rank': rank,
'padding': (max_host_len - info.get('host_len', max_host_len)) * ' ',
'mirror': info['Host'],
'ms': info['Latency']
})
def ask(query):
"""Ask for unput from user"""
answer = get_input(query)
return answer
def get_selected_mirror(list_size):
"""Prompt for user input to select desired mirror"""
key = ask("Choose a mirror (1 - %d)\n'q' to quit " % list_size)
while True:
try:
key = int(key)
except ValueError:
if key == 'q':
exit()
else:
if (key >= 1) and (key <= list_size):
break
key = ask("Invalid entry ")
return key
def yes_or_no(query):
"""Get definitive answer"""
opts = ('yes', 'no')
answer = ask(query)
while answer != opts[0]:
if answer == opts[1]:
exit(0)
answer = ask("Please enter '%s' or '%s': " % opts)
def apt_select():
"""Run apt-select: Ubuntu archive mirror reporting tool"""
try:
system = System()
except OSError as err:
exit("Error setting system information:\n\t%s" % err)
try:
sources = Sources(system.codename)
except SourcesFileError as err:
exit("Error with current apt sources:\n\t%s" % err)
args = set_args()
mirrors_loc = "mirrors.ubuntu.com"
mirrors_url = "http://%s/%s.txt" % (mirrors_loc, args.country.upper())
mirrors_list = get_mirrors(mirrors_url, args.country)
archives = Mirrors(mirrors_list, args.ping_only, args.min_status)
archives.get_rtts()
if archives.got["ping"] < args.top_number:
args.top_number = archives.got["ping"]
if args.top_number == 0:
exit("Cannot connect to any mirrors in %s\n." % mirrors_list)
if not args.ping_only:
archives.get_launchpad_urls()
if not archives.abort_launch:
# Mirrors needs a limit to stop launching threads
archives.status_num = args.top_number
stderr.write("Looking up %d status(es)\n" % args.top_number)
archives.lookup_statuses(
system.codename.capitalize(),
system.arch,
args.min_status
)
if args.top_number > 1:
stderr.write('\n')
if args.ping_only or archives.abort_launch:
archives.top_list = archives.ranked[:args.top_number]
sources.set_current_archives()
current_url = sources.urls['current']
if archives.urls.get(current_url):
archives.urls[current_url]['Host'] += " (current)"
show_status = False
max_host_len = 0
if not args.ping_only and not archives.abort_launch:
show_status = True
else:
def set_hostname_len(url, i):
hostname_len = len(str(i) + archives.urls[url]['Host'])
archives.urls[url]['host_len'] = hostname_len
return hostname_len
max_host_len = max([set_hostname_len(url, i+1)
for i, url in enumerate(archives.top_list)])
for i, url in enumerate(archives.top_list):
info = archives.urls[url]
rank = i + 1
if show_status:
print_status(info, rank)
else:
print_latency(info, rank, max_host_len)
key = 0
if args.choose:
key = get_selected_mirror(len(archives.top_list)) - 1
if args.list_only:
exit()
new_mirror = archives.top_list[key]
print("Selecting mirror %(mirror)s ..." % {'mirror': new_mirror})
if current_url == new_mirror:
stderr.write(
"%(url)s is the currently used mirror.\n"
"%(message)s\n" % {
'url': current_url,
'message': sources.skip_gen_msg
})
exit(SKIPPED_FILE_GENERATION)
work_dir = getcwd()
if work_dir == sources.DIRECTORY[0:-1]:
query = (
"'%(dir)s' is the current directory.\n"
"Generating a new '%(apt)s' file will "
"overwrite the current file.\n"
"You should copy or backup '%(apt)s' before replacing it.\n"
"Continue?\n[yes|no] " % {
'dir': sources.DIRECTORY,
'apt': sources.APT_FILE
}
)
yes_or_no(query)
new_mirror = archives.top_list[key]
try:
sources.generate_new_config(work_dir, new_mirror)
except SourcesFileError as err:
exit("Error generating new config file" % err)
else:
print("New config file saved to %s" % sources.new_file_path)
exit()
def main():
try:
apt_select()
except KeyboardInterrupt:
stderr.write("Aborting...\n")
if __name__ == '__main__':
main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# Linear algebra ops.
band_part = array_ops.matrix_band_part
cholesky = linalg_ops.cholesky
cholesky_solve = linalg_ops.cholesky_solve
det = linalg_ops.matrix_determinant
slogdet = gen_linalg_ops.log_matrix_determinant
tf_export('linalg.slogdet')(slogdet)
diag = array_ops.matrix_diag
diag_part = array_ops.matrix_diag_part
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
eye = linalg_ops.eye
inv = linalg_ops.matrix_inverse
logm = gen_linalg_ops.matrix_logarithm
lu = gen_linalg_ops.lu
tf_export('linalg.logm')(logm)
lstsq = linalg_ops.matrix_solve_ls
norm = linalg_ops.norm
qr = linalg_ops.qr
set_diag = array_ops.matrix_set_diag
solve = linalg_ops.matrix_solve
sqrtm = linalg_ops.matrix_square_root
svd = linalg_ops.svd
tensordot = math_ops.tensordot
trace = math_ops.trace
transpose = array_ops.matrix_transpose
triangular_solve = linalg_ops.matrix_triangular_solve
@tf_export('linalg.logdet')
@dispatch.add_dispatch_support
def logdet(matrix, name=None):
"""Computes log of the determinant of a hermitian positive definite matrix.
```python
# Compute the determinant of a matrix while reducing the chance of over- or
underflow:
A = ... # shape 10 x 10
det = tf.exp(tf.linalg.logdet(A)) # scalar
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op`. Defaults to `logdet`.
Returns:
The natural log of the determinant of `matrix`.
@compatibility(numpy)
Equivalent to numpy.linalg.slogdet, although no sign is returned since only
hermitian positive definite matrices are supported.
@end_compatibility
"""
# This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
with ops.name_scope(name, 'logdet', [matrix]):
chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum(
math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
axis=[-1])
@tf_export('linalg.adjoint')
@dispatch.add_dispatch_support
def adjoint(matrix, name=None):
"""Transposes the last two dimensions of and conjugates tensor `matrix`.
For example:
```python
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of
matrix.
"""
with ops.name_scope(name, 'adjoint', [matrix]):
matrix = ops.convert_to_tensor(matrix, name='matrix')
return array_ops.matrix_transpose(matrix, conjugate=True)
# This section is ported nearly verbatim from Eigen's implementation:
# https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html
def _matrix_exp_pade3(matrix):
"""3rd-order Pade approximant for matrix exponential."""
b = [120.0, 60.0, 12.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
tmp = matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade5(matrix):
"""5th-order Pade approximant for matrix exponential."""
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade7(matrix):
"""7th-order Pade approximant for matrix exponential."""
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade9(matrix):
"""9th-order Pade approximant for matrix exponential."""
b = [
17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0,
2162160.0, 110880.0, 3960.0, 90.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
matrix_8 = math_ops.matmul(matrix_6, matrix_2)
tmp = (
matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 +
b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = (
b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 +
b[0] * ident)
return matrix_u, matrix_v
def _matrix_exp_pade13(matrix):
"""13th-order Pade approximant for matrix exponential."""
b = [
64764752532480000.0, 32382376266240000.0, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0,
33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp_u = (
math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) +
b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp_u)
tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2
matrix_v = (
math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 +
b[2] * matrix_2 + b[0] * ident)
return matrix_u, matrix_v
@tf_export('linalg.expm')
def matrix_exponential(input, name=None): # pylint: disable=redefined-builtin
r"""Computes the matrix exponential of one or more square matrices.
exp(A) = \sum_{n=0}^\infty A^n/n!
The exponential is computed using a combination of the scaling and squaring
method and the Pade approximation. Details can be found in:
Nicholas J. Higham, "The scaling and squaring method for the matrix
exponential revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
form square matrices. The output is a tensor of the same shape as the input
containing the exponential for all input submatrices `[..., :, :]`.
Args:
input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or
`complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
the matrix exponential of the input.
Raises:
ValueError: An unsupported type is provided as input.
@compatibility(scipy)
Equivalent to scipy.linalg.expm
@end_compatibility
"""
with ops.name_scope(name, 'matrix_exponential', [input]):
matrix = ops.convert_to_tensor(input, name='input')
if matrix.shape[-2:] == [0, 0]:
return matrix
batch_shape = matrix.shape[:-2]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(matrix)[:-2]
# reshaping the batch makes the where statements work better
matrix = array_ops.reshape(
matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0))
l1_norm = math_ops.reduce_max(
math_ops.reduce_sum(
math_ops.abs(matrix),
axis=array_ops.size(array_ops.shape(matrix)) - 2),
axis=-1)
const = lambda x: constant_op.constant(x, l1_norm.dtype)
def _nest_where(vals, cases):
assert len(vals) == len(cases) - 1
if len(vals) == 1:
return array_ops.where(
math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1])
else:
return array_ops.where(
math_ops.less(l1_norm, const(vals[0])), cases[0],
_nest_where(vals[1:], cases[1:]))
if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]:
maxnorm = const(3.925724783138660)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(matrix / math_ops.pow(
constant_op.constant(2.0, dtype=matrix.dtype),
math_ops.cast(
squarings,
matrix.dtype))[..., array_ops.newaxis, array_ops.newaxis])
conds = (4.258730016922831e-001, 1.880152677804762e+000)
u = _nest_where(conds, (u3, u5, u7))
v = _nest_where(conds, (v3, v5, v7))
elif matrix.dtype in [dtypes.float64, dtypes.complex128]:
maxnorm = const(5.371920351148152)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(matrix)
u9, v9 = _matrix_exp_pade9(matrix)
u13, v13 = _matrix_exp_pade13(matrix / math_ops.pow(
constant_op.constant(2.0, dtype=matrix.dtype),
math_ops.cast(
squarings,
matrix.dtype))[..., array_ops.newaxis, array_ops.newaxis])
conds = (1.495585217958292e-002, 2.539398330063230e-001,
9.504178996162932e-001, 2.097847961257068e+000)
u = _nest_where(conds, (u3, u5, u7, u9, u13))
v = _nest_where(conds, (v3, v5, v7, v9, v13))
else:
raise ValueError('tf.linalg.expm does not support matrices of type %s' %
matrix.dtype)
numer = u + v
denom = -u + v
result = linalg_ops.matrix_solve(denom, numer)
max_squarings = math_ops.reduce_max(squarings)
i = const(0.0)
c = lambda i, r: math_ops.less(i, max_squarings)
def b(i, r):
return i + 1, array_ops.where(
math_ops.less(i, squarings), math_ops.matmul(r, r), r)
_, result = control_flow_ops.while_loop(c, b, [i, result])
if not matrix.shape.is_fully_defined():
return array_ops.reshape(
result,
array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0))
return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:]))
@tf_export('linalg.tridiagonal_solve')
def tridiagonal_solve(diagonals,
rhs,
diagonals_format='compact',
transpose_rhs=False,
conjugate_rhs=False,
name=None,
partial_pivoting=True):
r"""Solves tridiagonal systems of equations.
The input can be supplied in various formats: `matrix`, `sequence` and
`compact`, specified by the `diagonals_format` arg.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
In `sequence` format, `diagonals` are supplied as a tuple or list of three
tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing
superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either
`M-1` or `M`; in the latter case, the last element of superdiagonal and the
first element of subdiagonal will be ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `compact` format is recommended as the one with best performance. In case
you need to cast a tensor into a compact format manually, use `tf.gather_nd`.
An example for a tensor of shape [m, m]:
```python
rhs = tf.constant([...])
matrix = tf.constant([[...]])
m = matrix.shape[0]
dummy_idx = [0, 0] # An arbitrary element to use as a dummy
indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal
[[i, i] for i in range(m)], # Diagonal
[dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal
diagonals=tf.gather_nd(matrix, indices)
x = tf.linalg.tridiagonal_solve(diagonals, rhs)
```
Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or
`[..., M, K]`. The latter allows to simultaneously solve K systems with the
same left-hand sides and K different right-hand sides. If `transpose_rhs`
is set to `True` the expected shape is `[..., M]` or `[..., K, M]`.
The batch dimensions, denoted as `...`, must be the same in `diagonals` and
`rhs`.
The output is a tensor of the same shape as `rhs`: either `[..., M]` or
`[..., M, K]`.
The op isn't guaranteed to raise an error if the input matrix is not
invertible. `tf.debugging.check_numerics` can be applied to the output to
detect invertibility problems.
**Note**: with large batch sizes, the computation on the GPU may be slow, if
either `partial_pivoting=True` or there are multiple right-hand sides
(`K > 1`). If this issue arises, consider if it's possible to disable pivoting
and have `K = 1`, or, alternatively, consider using CPU.
On CPU, solution is computed via Gaussian elimination with or without partial
pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE
library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as
`diagonals`.
diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is
`compact`.
transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect
if the shape of rhs is [..., M]).
conjugate_rhs: If `True`, `rhs` is conjugated before solving.
name: A name to give this `Op` (optional).
partial_pivoting: whether to perform partial pivoting. `True` by default.
Partial pivoting makes the procedure more stable, but slower. Partial
pivoting is unnecessary in some cases, including diagonally dominant and
symmetric positive definite matrices (see e.g. theorem 9.12 in [1]).
Returns:
A `Tensor` of shape [..., M] or [..., M, K] containing the solutions.
Raises:
ValueError: An unsupported type is provided as input, or when the input
tensors have incorrect shapes.
[1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms:
Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7.
"""
if diagonals_format == 'compact':
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
name)
if diagonals_format == 'sequence':
if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3:
raise ValueError('Expected diagonals to be a sequence of length 3.')
superdiag, maindiag, subdiag = diagonals
if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or
not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])):
raise ValueError(
'Tensors representing the three diagonals must have the same shape,'
'except for the last dimension, got {}, {}, {}'.format(
subdiag.shape, maindiag.shape, superdiag.shape))
m = tensor_shape.dimension_value(maindiag.shape[-1])
def pad_if_necessary(t, name, last_dim_padding):
n = tensor_shape.dimension_value(t.shape[-1])
if not n or n == m:
return t
if n == m - 1:
paddings = ([[0, 0] for _ in range(len(t.shape) - 1)] +
[last_dim_padding])
return array_ops.pad(t, paddings)
raise ValueError('Expected {} to be have length {} or {}, got {}.'.format(
name, m, m - 1, n))
subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0])
superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1])
diagonals = array_ops.stack((superdiag, maindiag, subdiag), axis=-2)
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
name)
if diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
m = m1 or m2
if not m:
raise ValueError('The size of the matrix needs to be known for '
'diagonals_format="matrix"')
# Extract diagonals; use input[..., 0, 0] as "dummy" m-th elements of sub-
# and superdiagonal.
# gather_nd slices into first indices, whereas we need to slice into the
# last two, so transposing back and forth is necessary.
dummy_idx = [0, 0]
indices = ([[[1, 0], [0, 0], dummy_idx]] +
[[[i + 1, i], [i, i], [i - 1, i]] for i in range(1, m - 1)] +
[[dummy_idx, [m - 1, m - 1], [m - 2, m - 1]]])
diagonals = array_ops.transpose(
array_ops.gather_nd(array_ops.transpose(diagonals), indices))
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
name)
raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format))
def _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting, name):
"""Helper function used after the input has been cast to compact form."""
diags_rank, rhs_rank = len(diagonals.shape), len(rhs.shape)
if diags_rank < 2:
raise ValueError(
'Expected diagonals to have rank at least 2, got {}'.format(diags_rank))
if rhs_rank != diags_rank and rhs_rank != diags_rank - 1:
raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format(
diags_rank - 1, diags_rank, rhs_rank))
if diagonals.shape[-2] and diagonals.shape[-2] != 3:
raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2]))
if not diagonals.shape[:-2].is_compatible_with(rhs.shape[:diags_rank - 2]):
raise ValueError('Batch shapes {} and {} are incompatible'.format(
diagonals.shape[:-2], rhs.shape[:diags_rank - 2]))
def check_num_lhs_matches_num_rhs():
if (diagonals.shape[-1] and rhs.shape[-2] and
diagonals.shape[-1] != rhs.shape[-2]):
raise ValueError('Expected number of left-hand sided and right-hand '
'sides to be equal, got {} and {}'.format(
diagonals.shape[-1], rhs.shape[-2]))
if rhs_rank == diags_rank - 1:
# Rhs provided as a vector, ignoring transpose_rhs
if conjugate_rhs:
rhs = math_ops.conj(rhs)
rhs = array_ops.expand_dims(rhs, -1)
check_num_lhs_matches_num_rhs()
return array_ops.squeeze(
linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, name),
-1)
if transpose_rhs:
rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs)
elif conjugate_rhs:
rhs = math_ops.conj(rhs)
check_num_lhs_matches_num_rhs()
result = linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, name)
return array_ops.matrix_transpose(result) if transpose_rhs else result
@tf_export('linalg.tridiagonal_matmul')
def tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None):
r"""Multiplies tridiagonal matrix by matrix.
`diagonals` is representation of 3-diagonal NxN matrix, which depends on
`diagonals_format`.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
If `sequence` format, `diagonals` is list or tuple of three tensors:
`[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element
of `superdiag` first element of `subdiag` are ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `sequence` format is recommended as the one with the best performance.
`rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`.
Example:
```python
superdiag = tf.constant([-1, -1, 0], dtype=tf.float64)
maindiag = tf.constant([2, 2, 2], dtype=tf.float64)
subdiag = tf.constant([0, -1, -1], dtype=tf.float64)
diagonals = [superdiag, maindiag, subdiag]
rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64)
x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence')
```
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`.
diagonals_format: one of `sequence`, or `compact`. Default is `compact`.
name: A name to give this `Op` (optional).
Returns:
A `Tensor` of shape [..., M, N] containing the result of multiplication.
Raises:
ValueError: An unsupported type is provided as input, or when the input
tensors have incorrect shapes.
"""
if diagonals_format == 'compact':
superdiag = diagonals[..., 0, :]
maindiag = diagonals[..., 1, :]
subdiag = diagonals[..., 2, :]
elif diagonals_format == 'sequence':
superdiag, maindiag, subdiag = diagonals
elif diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if not m1 or not m2:
raise ValueError('The size of the matrix needs to be known for '
'diagonals_format="matrix"')
if m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
# TODO(b/131695260): use matrix_diag_part when it supports extracting
# arbitrary diagonals.
maindiag = array_ops.matrix_diag_part(diagonals)
diagonals = array_ops.transpose(diagonals)
dummy_index = [0, 0]
superdiag_indices = [[i + 1, i] for i in range(0, m1 - 1)] + [dummy_index]
subdiag_indices = [dummy_index] + [[i - 1, i] for i in range(1, m1)]
superdiag = array_ops.transpose(
array_ops.gather_nd(diagonals, superdiag_indices))
subdiag = array_ops.transpose(
array_ops.gather_nd(diagonals, subdiag_indices))
else:
raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format)
# C++ backend requires matrices.
# Converting 1-dimensional vectors to matrices with 1 row.
superdiag = array_ops.expand_dims(superdiag, -2)
maindiag = array_ops.expand_dims(maindiag, -2)
subdiag = array_ops.expand_dims(subdiag, -2)
return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name)
def _maybe_validate_matrix(a, validate_args):
"""Checks that input is a `float` matrix."""
assertions = []
if not a.dtype.is_floating:
raise TypeError('Input `a` must have `float`-like `dtype` '
'(saw {}).'.format(a.dtype.name))
if a.shape is not None and a.shape.rank is not None:
if a.shape.rank < 2:
raise ValueError('Input `a` must have at least 2 dimensions '
'(saw: {}).'.format(a.shape.rank))
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least(
a, rank=2, message='Input `a` must have at least 2 dimensions.'))
return assertions
@tf_export('linalg.matrix_rank')
def matrix_rank(a, tol=None, validate_args=False, name=None):
"""Compute the matrix rank of one or more matrices.
Arguments:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
tol: Threshold below which the singular value is counted as 'zero'.
Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'matrix_rank'.
Returns:
matrix_rank: (Batch of) `int32` scalars representing the number of non-zero
singular values.
"""
with ops.name_scope(name or 'matrix_rank'):
a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
s = svd(a, compute_uv=False)
if tol is None:
if (a.shape[-2:]).is_fully_defined():
m = np.max(a.shape[-2:].as_list())
else:
m = math_ops.reduce_max(array_ops.shape(a)[-2:])
eps = np.finfo(a.dtype.as_numpy_dtype).eps
tol = (
eps * math_ops.cast(m, a.dtype) *
math_ops.reduce_max(s, axis=-1, keepdims=True))
return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1)
@tf_export('linalg.pinv')
def pinv(a, rcond=None, validate_args=False, name=None):
"""Compute the Moore-Penrose pseudo-inverse of one or more matrices.
Calculate the [generalized inverse of a matrix](
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its
singular-value decomposition (SVD) and including all large singular values.
The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves'
[the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then
`A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if
`U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then
`A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]
This function is analogous to [`numpy.linalg.pinv`](
https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html).
It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the
default `rcond` is `1e-15`. Here the default is
`10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
rcond: `Tensor` of small singular value cutoffs. Singular values smaller
(in modulus) than `rcond` * largest_singular_value (again, in modulus) are
set to zero. Must broadcast against `tf.shape(a)[:-2]`.
Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'pinv'.
Returns:
a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except
rightmost two dimensions are transposed.
Raises:
TypeError: if input `a` does not have `float`-like `dtype`.
ValueError: if input `a` has fewer than 2 dimensions.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
a = tf.constant([[1., 0.4, 0.5],
[0.4, 0.2, 0.25],
[0.5, 0.25, 0.35]])
tf.matmul(tf.linalg..pinv(a), a)
# ==> array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
a = tf.constant([[1., 0.4, 0.5, 1.],
[0.4, 0.2, 0.25, 2.],
[0.5, 0.25, 0.35, 3.]])
tf.matmul(tf.linalg..pinv(a), a)
# ==> array([[ 0.76, 0.37, 0.21, -0.02],
[ 0.37, 0.43, -0.33, 0.02],
[ 0.21, -0.33, 0.81, 0.01],
[-0.02, 0.02, 0.01, 1. ]], dtype=float32)
```
#### References
[1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press,
Inc., 1980, pp. 139-142.
"""
with ops.name_scope(name or 'pinv'):
a = ops.convert_to_tensor(a, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
dtype = a.dtype.as_numpy_dtype
if rcond is None:
def get_dim_size(dim):
dim_val = tensor_shape.dimension_value(a.shape[dim])
if dim_val is not None:
return dim_val
return array_ops.shape(a)[dim]
num_rows = get_dim_size(-2)
num_cols = get_dim_size(-1)
if isinstance(num_rows, int) and isinstance(num_cols, int):
max_rows_cols = float(max(num_rows, num_cols))
else:
max_rows_cols = math_ops.cast(
math_ops.maximum(num_rows, num_cols), dtype)
rcond = 10. * max_rows_cols * np.finfo(dtype).eps
rcond = ops.convert_to_tensor(rcond, dtype=dtype, name='rcond')
# Calculate pseudo inverse via SVD.
# Note: if a is Hermitian then u == v. (We might observe additional
# performance by explicitly setting `v = u` in such cases.)
[
singular_values, # Sigma
left_singular_vectors, # U
right_singular_vectors, # V
] = svd(
a, full_matrices=False, compute_uv=True)
# Saturate small singular values to inf. This has the effect of make
# `1. / s = 0.` while not resulting in `NaN` gradients.
cutoff = rcond * math_ops.reduce_max(singular_values, axis=-1)
singular_values = array_ops.where_v2(
singular_values > array_ops.expand_dims_v2(cutoff, -1), singular_values,
np.array(np.inf, dtype))
# By the definition of the SVD, `a == u @ s @ v^H`, and the pseudo-inverse
# is defined as `pinv(a) == v @ inv(s) @ u^H`.
a_pinv = math_ops.matmul(
right_singular_vectors / array_ops.expand_dims_v2(singular_values, -2),
left_singular_vectors,
adjoint_b=True)
if a.shape is not None and a.shape.rank is not None:
a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]]))
return a_pinv
@tf_export('linalg.lu_solve')
def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None):
"""Solves systems of linear eqns `A X = RHS`, given LU factorizations.
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
rhs: Matrix-shaped float `Tensor` representing targets for which to solve;
`A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[...,
tf.newaxis])[..., 0]`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_solve').
Returns:
x: The `X` in `A @ X = RHS`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[1., 2],
[3, 4]],
[[7, 8],
[3, 4]]]
inv_x = tf.linalg.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with ops.name_scope(name or 'lu_solve'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
rhs = ops.convert_to_tensor(rhs, dtype_hint=lower_upper.dtype, name='rhs')
assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
rhs = array_ops.identity(rhs)
if (rhs.shape.rank == 2 and perm.shape.rank == 1):
# Both rhs and perm have scalar batch_shape.
permuted_rhs = array_ops.gather(rhs, perm, axis=-2)
else:
# Either rhs or perm have non-scalar batch_shape or we can't determine
# this information statically.
rhs_shape = array_ops.shape(rhs)
broadcast_batch_shape = array_ops.broadcast_dynamic_shape(
rhs_shape[:-2],
array_ops.shape(perm)[:-1])
d, m = rhs_shape[-2], rhs_shape[-1]
rhs_broadcast_shape = array_ops.concat([broadcast_batch_shape, [d, m]],
axis=0)
# Tile out rhs.
broadcast_rhs = array_ops.broadcast_to(rhs, rhs_broadcast_shape)
broadcast_rhs = array_ops.reshape(broadcast_rhs, [-1, d, m])
# Tile out perm and add batch indices.
broadcast_perm = array_ops.broadcast_to(perm, rhs_broadcast_shape[:-1])
broadcast_perm = array_ops.reshape(broadcast_perm, [-1, d])
broadcast_batch_size = math_ops.reduce_prod(broadcast_batch_shape)
broadcast_batch_indices = array_ops.broadcast_to(
math_ops.range(broadcast_batch_size)[:, array_ops.newaxis],
[broadcast_batch_size, d])
broadcast_perm = array_ops.stack(
[broadcast_batch_indices, broadcast_perm], axis=-1)
permuted_rhs = array_ops.gather_nd(broadcast_rhs, broadcast_perm)
permuted_rhs = array_ops.reshape(permuted_rhs, rhs_broadcast_shape)
lower = set_diag(
band_part(lower_upper, num_lower=-1, num_upper=0),
array_ops.ones(
array_ops.shape(lower_upper)[:-1], dtype=lower_upper.dtype))
return linear_operator_util.matrix_triangular_solve_with_broadcast(
lower_upper, # Only upper is accessed.
linear_operator_util.matrix_triangular_solve_with_broadcast(
lower, permuted_rhs),
lower=False)
@tf_export('linalg.lu_matrix_inverse')
def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):
"""Computes the inverse given the LU decomposition(s) of one or more matrices.
This op is conceptually identical to,
```python
inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X))
tf.assert_near(tf.matrix_inverse(X), inv_X)
# ==> True
```
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_matrix_inverse').
Returns:
inv_x: The matrix_inv, i.e.,
`tf.matrix_inverse(tf.linalg.lu_reconstruct(lu, perm))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
inv_x = tf.linalg.lu_matrix_inverse(*tf.linalg.lu(x))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with ops.name_scope(name or 'lu_matrix_inverse'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
shape = array_ops.shape(lower_upper)
return lu_solve(
lower_upper,
perm,
rhs=eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype),
validate_args=False)
@tf_export('linalg.lu_reconstruct')
def lu_reconstruct(lower_upper, perm, validate_args=False, name=None):
"""The reconstruct one or more matrices from their LU decomposition(s).
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_reconstruct').
Returns:
x: The original input to `tf.linalg.lu`, i.e., `x` as in,
`lu_reconstruct(*tf.linalg.lu(x))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
x_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x))
tf.assert_near(x, x_reconstructed)
# ==> True
```
"""
with ops.name_scope(name or 'lu_reconstruct'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
shape = array_ops.shape(lower_upper)
lower = set_diag(
band_part(lower_upper, num_lower=-1, num_upper=0),
array_ops.ones(shape[:-1], dtype=lower_upper.dtype))
upper = band_part(lower_upper, num_lower=0, num_upper=-1)
x = math_ops.matmul(lower, upper)
if (lower_upper.shape is None or lower_upper.shape.rank is None or
lower_upper.shape.rank != 2):
# We either don't know the batch rank or there are >0 batch dims.
batch_size = math_ops.reduce_prod(shape[:-2])
d = shape[-1]
x = array_ops.reshape(x, [batch_size, d, d])
perm = array_ops.reshape(perm, [batch_size, d])
perm = map_fn.map_fn(array_ops.invert_permutation, perm)
batch_indices = array_ops.broadcast_to(
math_ops.range(batch_size)[:, array_ops.newaxis], [batch_size, d])
x = array_ops.gather_nd(x, array_ops.stack([batch_indices, perm],
axis=-1))
x = array_ops.reshape(x, shape)
else:
x = array_ops.gather(x, array_ops.invert_permutation(perm))
x.set_shape(lower_upper.shape)
return x
def lu_reconstruct_assertions(lower_upper, perm, validate_args):
"""Returns list of assertions related to `lu_reconstruct` assumptions."""
assertions = []
message = 'Input `lower_upper` must have at least 2 dimensions.'
if lower_upper.shape.rank is not None and lower_upper.shape.rank < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least_v2(lower_upper, rank=2, message=message))
message = '`rank(lower_upper)` must equal `rank(perm) + 1`'
if lower_upper.shape.rank is not None and perm.shape.rank is not None:
if lower_upper.shape.rank != perm.shape.rank + 1:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank(
lower_upper, rank=array_ops.rank(perm) + 1, message=message))
message = '`lower_upper` must be square.'
if lower_upper.shape[:-2].is_fully_defined():
if lower_upper.shape[-2] != lower_upper.shape[-1]:
raise ValueError(message)
elif validate_args:
m, n = array_ops.split(
array_ops.shape(lower_upper)[-2:], num_or_size_splits=2)
assertions.append(check_ops.assert_equal(m, n, message=message))
return assertions
def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):
"""Returns list of assertions related to `lu_solve` assumptions."""
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
message = 'Input `rhs` must have at least 2 dimensions.'
if rhs.shape.ndims is not None:
if rhs.shape.ndims < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least(rhs, rank=2, message=message))
message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'
if (lower_upper.shape[-1] is not None and rhs.shape[-2] is not None):
if lower_upper.shape[-1] != rhs.shape[-2]:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_equal(
array_ops.shape(lower_upper)[-1],
array_ops.shape(rhs)[-2],
message=message))
return assertions
|
|
"""Support for Climate devices of (EMEA/EU-based) Honeywell TCC systems."""
from __future__ import annotations
from datetime import datetime as dt
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_ECO,
PRESET_HOME,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import PRECISION_TENTHS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
from . import (
ATTR_DURATION_DAYS,
ATTR_DURATION_HOURS,
ATTR_DURATION_UNTIL,
ATTR_SYSTEM_MODE,
ATTR_ZONE_TEMP,
CONF_LOCATION_IDX,
SVC_RESET_ZONE_OVERRIDE,
SVC_SET_SYSTEM_MODE,
EvoChild,
EvoDevice,
)
from .const import (
DOMAIN,
EVO_AUTO,
EVO_AUTOECO,
EVO_AWAY,
EVO_CUSTOM,
EVO_DAYOFF,
EVO_FOLLOW,
EVO_HEATOFF,
EVO_PERMOVER,
EVO_RESET,
EVO_TEMPOVER,
)
_LOGGER = logging.getLogger(__name__)
PRESET_RESET = "Reset" # reset all child zones to EVO_FOLLOW
PRESET_CUSTOM = "Custom"
HA_HVAC_TO_TCS = {HVAC_MODE_OFF: EVO_HEATOFF, HVAC_MODE_HEAT: EVO_AUTO}
TCS_PRESET_TO_HA = {
EVO_AWAY: PRESET_AWAY,
EVO_CUSTOM: PRESET_CUSTOM,
EVO_AUTOECO: PRESET_ECO,
EVO_DAYOFF: PRESET_HOME,
EVO_RESET: PRESET_RESET,
} # EVO_AUTO: None,
HA_PRESET_TO_TCS = {v: k for k, v in TCS_PRESET_TO_HA.items()}
EVO_PRESET_TO_HA = {
EVO_FOLLOW: PRESET_NONE,
EVO_TEMPOVER: "temporary",
EVO_PERMOVER: "permanent",
}
HA_PRESET_TO_EVO = {v: k for k, v in EVO_PRESET_TO_HA.items()}
STATE_ATTRS_TCS = ["systemId", "activeFaults", "systemModeStatus"]
STATE_ATTRS_ZONES = ["zoneId", "activeFaults", "setpointStatus", "temperatureStatus"]
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Create the evohome Controller, and its Zones, if any."""
if discovery_info is None:
return
broker = hass.data[DOMAIN]["broker"]
_LOGGER.debug(
"Found the Location/Controller (%s), id=%s, name=%s (location_idx=%s)",
broker.tcs.modelType,
broker.tcs.systemId,
broker.tcs.location.name,
broker.params[CONF_LOCATION_IDX],
)
controller = EvoController(broker, broker.tcs)
zones = []
for zone in broker.tcs.zones.values():
if zone.modelType == "HeatingZone" or zone.zoneType == "Thermostat":
_LOGGER.debug(
"Adding: %s (%s), id=%s, name=%s",
zone.zoneType,
zone.modelType,
zone.zoneId,
zone.name,
)
new_entity = EvoZone(broker, zone)
zones.append(new_entity)
else:
_LOGGER.warning(
"Ignoring: %s (%s), id=%s, name=%s: unknown/invalid zone type, "
"report as an issue if you feel this zone type should be supported",
zone.zoneType,
zone.modelType,
zone.zoneId,
zone.name,
)
async_add_entities([controller] + zones, update_before_add=True)
class EvoClimateEntity(EvoDevice, ClimateEntity):
"""Base for an evohome Climate device."""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize a Climate device."""
super().__init__(evo_broker, evo_device)
self._preset_modes = None
@property
def hvac_modes(self) -> list[str]:
"""Return a list of available hvac operation modes."""
return list(HA_HVAC_TO_TCS)
@property
def preset_modes(self) -> list[str] | None:
"""Return a list of available preset modes."""
return self._preset_modes
class EvoZone(EvoChild, EvoClimateEntity):
"""Base for a Honeywell TCC Zone."""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize a Honeywell TCC Zone."""
super().__init__(evo_broker, evo_device)
if evo_device.modelType.startswith("VisionProWifi"):
# this system does not have a distinct ID for the zone
self._unique_id = f"{evo_device.zoneId}z"
else:
self._unique_id = evo_device.zoneId
self._name = evo_device.name
self._icon = "mdi:radiator"
if evo_broker.client_v1:
self._precision = PRECISION_TENTHS
else:
self._precision = self._evo_device.setpointCapabilities["valueResolution"]
self._preset_modes = list(HA_PRESET_TO_EVO)
self._supported_features = SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE
async def async_zone_svc_request(self, service: dict, data: dict) -> None:
"""Process a service request (setpoint override) for a zone."""
if service == SVC_RESET_ZONE_OVERRIDE:
await self._evo_broker.call_client_api(
self._evo_device.cancel_temp_override()
)
return
# otherwise it is SVC_SET_ZONE_OVERRIDE
temperature = max(min(data[ATTR_ZONE_TEMP], self.max_temp), self.min_temp)
if ATTR_DURATION_UNTIL in data:
duration = data[ATTR_DURATION_UNTIL]
if duration.total_seconds() == 0:
await self._update_schedule()
until = dt_util.parse_datetime(self.setpoints.get("next_sp_from", ""))
else:
until = dt_util.now() + data[ATTR_DURATION_UNTIL]
else:
until = None # indefinitely
until = dt_util.as_utc(until) if until else None
await self._evo_broker.call_client_api(
self._evo_device.set_temperature(temperature, until=until)
)
@property
def hvac_mode(self) -> str:
"""Return the current operating mode of a Zone."""
if self._evo_tcs.systemModeStatus["mode"] in (EVO_AWAY, EVO_HEATOFF):
return HVAC_MODE_AUTO
is_off = self.target_temperature <= self.min_temp
return HVAC_MODE_OFF if is_off else HVAC_MODE_HEAT
@property
def target_temperature(self) -> float:
"""Return the target temperature of a Zone."""
return self._evo_device.setpointStatus["targetHeatTemperature"]
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode, e.g., home, away, temp."""
if self._evo_tcs.systemModeStatus["mode"] in (EVO_AWAY, EVO_HEATOFF):
return TCS_PRESET_TO_HA.get(self._evo_tcs.systemModeStatus["mode"])
return EVO_PRESET_TO_HA.get(self._evo_device.setpointStatus["setpointMode"])
@property
def min_temp(self) -> float:
"""Return the minimum target temperature of a Zone.
The default is 5, but is user-configurable within 5-35 (in Celsius).
"""
return self._evo_device.setpointCapabilities["minHeatSetpoint"]
@property
def max_temp(self) -> float:
"""Return the maximum target temperature of a Zone.
The default is 35, but is user-configurable within 5-35 (in Celsius).
"""
return self._evo_device.setpointCapabilities["maxHeatSetpoint"]
async def async_set_temperature(self, **kwargs) -> None:
"""Set a new target temperature."""
temperature = kwargs["temperature"]
if (until := kwargs.get("until")) is None:
if self._evo_device.setpointStatus["setpointMode"] == EVO_FOLLOW:
await self._update_schedule()
until = dt_util.parse_datetime(self.setpoints.get("next_sp_from", ""))
elif self._evo_device.setpointStatus["setpointMode"] == EVO_TEMPOVER:
until = dt_util.parse_datetime(self._evo_device.setpointStatus["until"])
until = dt_util.as_utc(until) if until else None
await self._evo_broker.call_client_api(
self._evo_device.set_temperature(temperature, until=until)
)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set a Zone to one of its native EVO_* operating modes.
Zones inherit their _effective_ operating mode from their Controller.
Usually, Zones are in 'FollowSchedule' mode, where their setpoints are a
function of their own schedule and the Controller's operating mode, e.g.
'AutoWithEco' mode means their setpoint is (by default) 3C less than scheduled.
However, Zones can _override_ these setpoints, either indefinitely,
'PermanentOverride' mode, or for a set period of time, 'TemporaryOverride' mode
(after which they will revert back to 'FollowSchedule' mode).
Finally, some of the Controller's operating modes are _forced_ upon the Zones,
regardless of any override mode, e.g. 'HeatingOff', Zones to (by default) 5C,
and 'Away', Zones to (by default) 12C.
"""
if hvac_mode == HVAC_MODE_OFF:
await self._evo_broker.call_client_api(
self._evo_device.set_temperature(self.min_temp, until=None)
)
else: # HVAC_MODE_HEAT
await self._evo_broker.call_client_api(
self._evo_device.cancel_temp_override()
)
async def async_set_preset_mode(self, preset_mode: str | None) -> None:
"""Set the preset mode; if None, then revert to following the schedule."""
evo_preset_mode = HA_PRESET_TO_EVO.get(preset_mode, EVO_FOLLOW)
if evo_preset_mode == EVO_FOLLOW:
await self._evo_broker.call_client_api(
self._evo_device.cancel_temp_override()
)
return
temperature = self._evo_device.setpointStatus["targetHeatTemperature"]
if evo_preset_mode == EVO_TEMPOVER:
await self._update_schedule()
until = dt_util.parse_datetime(self.setpoints.get("next_sp_from", ""))
else: # EVO_PERMOVER
until = None
until = dt_util.as_utc(until) if until else None
await self._evo_broker.call_client_api(
self._evo_device.set_temperature(temperature, until=until)
)
async def async_update(self) -> None:
"""Get the latest state data for a Zone."""
await super().async_update()
for attr in STATE_ATTRS_ZONES:
self._device_state_attrs[attr] = getattr(self._evo_device, attr)
class EvoController(EvoClimateEntity):
"""Base for a Honeywell TCC Controller/Location.
The Controller (aka TCS, temperature control system) is the parent of all the child
(CH/DHW) devices. It is implemented as a Climate entity to expose the controller's
operating modes to HA.
It is assumed there is only one TCS per location, and they are thus synonymous.
"""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize a Honeywell TCC Controller/Location."""
super().__init__(evo_broker, evo_device)
self._unique_id = evo_device.systemId
self._name = evo_device.location.name
self._icon = "mdi:thermostat"
self._precision = PRECISION_TENTHS
modes = [m["systemMode"] for m in evo_broker.config["allowedSystemModes"]]
self._preset_modes = [
TCS_PRESET_TO_HA[m] for m in modes if m in list(TCS_PRESET_TO_HA)
]
self._supported_features = SUPPORT_PRESET_MODE if self._preset_modes else 0
async def async_tcs_svc_request(self, service: dict, data: dict) -> None:
"""Process a service request (system mode) for a controller.
Data validation is not required, it will have been done upstream.
"""
if service == SVC_SET_SYSTEM_MODE:
mode = data[ATTR_SYSTEM_MODE]
else: # otherwise it is SVC_RESET_SYSTEM
mode = EVO_RESET
if ATTR_DURATION_DAYS in data:
until = dt_util.start_of_local_day()
until += data[ATTR_DURATION_DAYS]
elif ATTR_DURATION_HOURS in data:
until = dt_util.now() + data[ATTR_DURATION_HOURS]
else:
until = None
await self._set_tcs_mode(mode, until=until)
async def _set_tcs_mode(self, mode: str, until: dt | None = None) -> None:
"""Set a Controller to any of its native EVO_* operating modes."""
until = dt_util.as_utc(until) if until else None
await self._evo_broker.call_client_api(
self._evo_tcs.set_status(mode, until=until)
)
@property
def hvac_mode(self) -> str:
"""Return the current operating mode of a Controller."""
tcs_mode = self._evo_tcs.systemModeStatus["mode"]
return HVAC_MODE_OFF if tcs_mode == EVO_HEATOFF else HVAC_MODE_HEAT
@property
def current_temperature(self) -> float | None:
"""Return the average current temperature of the heating Zones.
Controllers do not have a current temp, but one is expected by HA.
"""
temps = [
z.temperatureStatus["temperature"]
for z in self._evo_tcs.zones.values()
if z.temperatureStatus["isAvailable"]
]
return round(sum(temps) / len(temps), 1) if temps else None
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode, e.g., home, away, temp."""
return TCS_PRESET_TO_HA.get(self._evo_tcs.systemModeStatus["mode"])
@property
def min_temp(self) -> float:
"""Return None as Controllers don't have a target temperature."""
return None
@property
def max_temp(self) -> float:
"""Return None as Controllers don't have a target temperature."""
return None
async def async_set_temperature(self, **kwargs) -> None:
"""Raise exception as Controllers don't have a target temperature."""
raise NotImplementedError("Evohome Controllers don't have target temperatures.")
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set an operating mode for a Controller."""
await self._set_tcs_mode(HA_HVAC_TO_TCS.get(hvac_mode))
async def async_set_preset_mode(self, preset_mode: str | None) -> None:
"""Set the preset mode; if None, then revert to 'Auto' mode."""
await self._set_tcs_mode(HA_PRESET_TO_TCS.get(preset_mode, EVO_AUTO))
async def async_update(self) -> None:
"""Get the latest state data for a Controller."""
self._device_state_attrs = {}
attrs = self._device_state_attrs
for attr in STATE_ATTRS_TCS:
if attr == "activeFaults":
attrs["activeSystemFaults"] = getattr(self._evo_tcs, attr)
else:
attrs[attr] = getattr(self._evo_tcs, attr)
|
|
# Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Process the output of the power allocator trace in the current
directory's trace.dat"""
from collections import OrderedDict
import pandas as pd
import re
from matplotlib import pyplot as plt
from trappy.base import Base
from trappy.run import Run
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup, plot_hist
class Thermal(Base):
"""Process the thermal framework data in a ftrace dump"""
unique_word = "thermal_temperature:"
name = "thermal"
pivot = "id"
def __init__(self):
super(Thermal, self).__init__(unique_word=self.unique_word)
def plot_temperature(self, control_temperature=None, title="", width=None,
height=None, ylim="range", ax=None, legend_label=""):
"""Plot the temperature.
If control_temp is a pd.Series() representing the (possible)
variation of control_temp during the run, draw it using a
dashed yellow line. Otherwise, only the temperature is
plotted.
"""
title = normalize_title("Temperature", title)
if len(self.data_frame) == 0:
raise ValueError("Empty DataFrame")
setup_plot = False
if not ax:
ax = pre_plot_setup(width, height)
setup_plot = True
temp_label = normalize_title("Temperature", legend_label)
(self.data_frame["temp"] / 1000).plot(ax=ax, label=temp_label)
if control_temperature is not None:
ct_label = normalize_title("Control", legend_label)
control_temperature.plot(ax=ax, color="y", linestyle="--",
label=ct_label)
if setup_plot:
post_plot_setup(ax, title=title, ylim=ylim)
plt.legend()
def plot_temperature_hist(self, ax, title):
"""Plot a temperature histogram"""
temps = self.data_frame["temp"] / 1000
title = normalize_title("Temperature", title)
xlim = (0, temps.max())
plot_hist(temps, ax, title, "C", 30, "Temperature", xlim, "default")
Run.register_class(Thermal, "thermal")
class ThermalGovernor(Base):
"""Process the power allocator data in a ftrace dump"""
unique_word = "thermal_power_allocator:"
name = "thermal_governor"
pivot = "thermal_zone_id"
def __init__(self):
super(ThermalGovernor, self).__init__(
unique_word=self.unique_word,
)
def plot_temperature(self, title="", width=None, height=None, ylim="range",
ax=None, legend_label=""):
"""Plot the temperature"""
dfr = self.data_frame
curr_temp = dfr["current_temperature"]
control_temp_series = (curr_temp + dfr["delta_temperature"]) / 1000
title = normalize_title("Temperature", title)
setup_plot = False
if not ax:
ax = pre_plot_setup(width, height)
setup_plot = True
temp_label = normalize_title("Temperature", legend_label)
(curr_temp / 1000).plot(ax=ax, label=temp_label)
control_temp_series.plot(ax=ax, color="y", linestyle="--",
label="control temperature")
if setup_plot:
post_plot_setup(ax, title=title, ylim=ylim)
plt.legend()
def plot_input_power(self, actor_order, title="", width=None, height=None,
ax=None):
"""Plot input power
actor_order is an array with the order in which the actors
were registered.
"""
dfr = self.data_frame
in_cols = [s for s in dfr.columns if re.match("req_power[0-9]+", s)]
plot_dfr = dfr[in_cols]
# Rename the columns from "req_power0" to "A15" or whatever is
# in actor_order. Note that we can do it just with an
# assignment because the columns are already sorted (i.e.:
# req_power0, req_power1...)
plot_dfr.columns = actor_order
title = normalize_title("Input Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_weighted_input_power(self, actor_weights, title="", width=None,
height=None, ax=None):
"""Plot weighted input power
actor_weights is an array of tuples. First element of the
tuple is the name of the actor, the second is the weight. The
array is in the same order as the req_power appear in the
trace.
"""
dfr = self.data_frame
in_cols = [s for s in dfr.columns if re.match(r"req_power\d+", s)]
plot_dfr_dict = OrderedDict()
for in_col, (name, weight) in zip(in_cols, actor_weights):
plot_dfr_dict[name] = dfr[in_col] * weight / 1024
plot_dfr = pd.DataFrame(plot_dfr_dict)
title = normalize_title("Weighted Input Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_output_power(self, actor_order, title="", width=None, height=None,
ax=None):
"""Plot output power
actor_order is an array with the order in which the actors
were registered.
"""
out_cols = [s for s in self.data_frame.columns
if re.match("granted_power[0-9]+", s)]
# See the note in plot_input_power()
plot_dfr = self.data_frame[out_cols]
plot_dfr.columns = actor_order
title = normalize_title("Output Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_inout_power(self, title=""):
"""Make multiple plots showing input and output power for each actor"""
dfr = self.data_frame
actors = []
for col in dfr.columns:
match = re.match("P(.*)_in", col)
if match and col != "Ptot_in":
actors.append(match.group(1))
for actor in actors:
cols = ["P" + actor + "_in", "P" + actor + "_out"]
this_title = normalize_title(actor, title)
dfr[cols].plot(title=this_title)
Run.register_class(ThermalGovernor, "thermal")
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import toscaparser.utils.yamlparser
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import URLException
from toscaparser.common.exception import ValidationError
from toscaparser.utils.gettextutils import _
from translator.common.utils import CompareUtils
from translator.common.utils import YamlUtils
from translator.osc.v1.tests import fakes
from translator.osc.v1.tests import utils
from translator.osc.v1 import translate
class TestTranslateTemplate(testtools.TestCase):
def setUp(self):
super(TestTranslateTemplate, self).setUp()
self.app = fakes.FakeApp()
self.app.client_manager = fakes.FakeClientManager()
self.app.client_manager.translator = None
self.cmd = translate.TranslateTemplate(self.app, None)
def check_parser(self, cmd, args, verify_args):
cmd_parser = cmd.get_parser('check_parser')
try:
parsed_args = cmd_parser.parse_args(args)
except SystemExit:
raise Exception("Argument parse failed")
for av in verify_args:
attr, value = av
if attr:
self.assertIn(attr, parsed_args)
self.assertEqual(getattr(parsed_args, attr), value)
return parsed_args
def _check_error(self, tosca_file, hot_file, params, assert_error,
expected_msg, c_error):
arglist = ["--template-file", tosca_file,
"--template-type", "tosca"]
parsed_args = self.check_parser(self.cmd, arglist, [])
parsed_args.parameter = params
self.assertRaises(assert_error, self.cmd.take_action,
parsed_args)
ExceptionCollector.assertExceptionMessage(c_error, expected_msg)
@mock.patch('sys.stdout', new_callable=StringIO)
def _check_success(self, tosca_file, hot_file, params, mock_stdout):
arglist = ["--template-file", tosca_file,
"--template-type", "tosca"]
parsed_args = self.check_parser(self.cmd, arglist, [])
parsed_args.parameter = params
self.cmd.take_action(parsed_args)
expected_output = YamlUtils.get_dict(hot_file)
mock_stdout_yaml = "\n".join(mock_stdout.getvalue().split("\n"))
actual_output = toscaparser.utils.yamlparser.simple_parse(
mock_stdout_yaml)
self.assertEqual({}, CompareUtils.diff_dicts(
actual_output, expected_output))
def test_osc_translate_single_server(self):
tosca_file = utils.get_template_path("tosca_single_server.yaml")
hot_file = utils.get_template_path("hot_output/hot_single_server.yaml")
params = {'cpus': 1}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_single_server_defaults_with_input(self):
tosca_file = utils.get_template_path(
"tosca_single_server_with_defaults.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_single_server_with_defaults_with_input.yaml")
params = {'cpus': '1'}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_single_server_defaults_without_input(self):
tosca_file = utils.get_template_path(
"tosca_single_server_with_defaults.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_single_server_with_defaults_without_input.yaml")
self._check_success(tosca_file, hot_file, {})
def test_osc_translate_wordpress_single_instance(self):
tosca_file = utils.get_template_path(
"tosca_single_instance_wordpress.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_single_instance_wordpress.yaml")
params = {'db_name': 'wordpress',
'db_user': 'wp_user',
'db_pwd': 'wp_pass',
'db_root_pwd': 'passw0rd',
'db_port': 3366,
'cpus': 8}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_helloworld(self):
tosca_file = utils.get_template_path(
"tosca_helloworld.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_hello_world.yaml")
self._check_success(tosca_file, hot_file, {})
def test_osc_translate_host_assignment(self):
tosca_file = utils.get_template_path(
"test_host_assignment.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_host_assignment.yaml")
self._check_success(tosca_file, hot_file, {})
def test_osc_translate_elk(self):
tosca_file = utils.get_template_path(
"tosca_elk.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_elk.yaml")
params = {'github_url':
'http://github.com/paypal/rest-api-sample-app-nodejs.git',
'my_cpus': 4}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_nodejs_mongodb_two_instances(self):
tosca_file = utils.get_template_path(
"tosca_nodejs_mongodb_two_instances.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_nodejs_mongodb_two_instances.yaml")
params = {'github_url':
'http://github.com/paypal/rest-api-sample-app-nodejs.git',
'my_cpus': 4}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_blockstorage_with_attachment(self):
tosca_file = utils.get_template_path(
"storage/tosca_blockstorage_with_attachment.yaml")
hot_file = utils.get_template_path(
"hot_output/storage/hot_blockstorage_with_attachment.yaml")
params = {'cpus': 1,
'storage_location': '/dev/vdc',
'storage_size': '2000 MB',
'storage_snapshot_id': 'ssid'}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_blockstorage_with_custom_relationship_type(self):
tosca_file = utils.get_template_path(
"storage/tosca_blockstorage_with_custom_relationship_type.yaml")
hot_file = utils.get_template_path(
"hot_output/storage/"
"hot_blockstorage_with_custom_relationship_type.yaml")
params = {'cpus': 1,
'storage_location': '/dev/vdc',
'storage_size': '1 GB',
'storage_snapshot_id': 'ssid'}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_blockstorage_with_relationship_template(self):
tosca_file = utils.get_template_path(
"storage/" +
"tosca_blockstorage_with_relationship_template.yaml")
hot_file = utils.get_template_path(
"hot_output/storage/" +
"hot_blockstorage_with_relationship_template.yaml")
params = {'cpus': 1,
'storage_location': '/dev/vdc',
'storage_size': '1 GB'}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_blockstorage_with_attachment_notation1(self):
tosca_file = utils.get_template_path(
"storage/" +
"tosca_blockstorage_with_attachment_notation1.yaml")
hot_file1 = utils.get_template_path(
"hot_output/storage/" +
"hot_blockstorage_with_attachment_notation1_alt1.yaml")
hot_file2 = utils.get_template_path(
"hot_output/storage/" +
"hot_blockstorage_with_attachment_notation1_alt2.yaml")
params = {'cpus': 1,
'storage_location': 'some_folder',
'storage_size': '1 GB',
'storage_snapshot_id': 'ssid'}
try:
self._check_success(tosca_file, hot_file1, params)
except Exception:
self._check_success(tosca_file, hot_file2, params)
def test_osc_translate_blockstorage_with_attachment_notation2(self):
tosca_file = utils.get_template_path(
"storage/" +
"tosca_blockstorage_with_attachment_notation2.yaml")
hot_file1 = utils.get_template_path(
"hot_output/storage/" +
"hot_blockstorage_with_attachment_notation2_alt1.yaml")
hot_file2 = utils.get_template_path(
"hot_output/storage/" +
"hot_blockstorage_with_attachment_notation2_alt2.yaml")
params = {'cpus': 1,
'storage_location': '/dev/vdc',
'storage_size': '1 GB',
'storage_snapshot_id': 'ssid'}
try:
self._check_success(tosca_file, hot_file1, params)
except Exception:
self._check_success(tosca_file, hot_file2, params)
def test_osc_translate_multiple_blockstorage_with_attachment(self):
tosca_file = utils.get_template_path(
"storage/" +
"tosca_multiple_blockstorage_with_attachment.yaml")
hot_file1 = utils.get_template_path(
"hot_output/storage/" +
"hot_multiple_blockstorage_with_attachment_alt1.yaml")
hot_file2 = utils.get_template_path(
"hot_output/storage/" +
"hot_multiple_blockstorage_with_attachment_alt2.yaml")
params = {'cpus': 1,
'storage_location': '/dev/vdc',
'storage_size': '1 GB',
'storage_snapshot_id': 'ssid'}
try:
self._check_success(tosca_file, hot_file1, params)
except Exception:
self._check_success(tosca_file, hot_file2, params)
def test_osc_translate_single_object_store(self):
tosca_file = utils.get_template_path(
"storage/tosca_single_object_store.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_single_object_store.yaml")
params = {'objectstore_name': 'myobjstore'}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_one_server_one_network(self):
tosca_file = utils.get_template_path(
"network/tosca_one_server_one_network.yaml")
hot_file = utils.get_template_path(
"hot_output/network/" +
"hot_one_server_one_network.yaml")
params = {'network_name': 'private_net'}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_server_on_existing_network(self):
tosca_file = utils.get_template_path(
"network/" +
"tosca_server_on_existing_network.yaml")
hot_file = utils.get_template_path(
"hot_output/network/" +
"hot_server_on_existing_network.yaml")
params = {'network_name': 'private_net'}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_two_servers_one_network(self):
tosca_file = utils.get_template_path(
"network/tosca_two_servers_one_network.yaml")
hot_file = utils.get_template_path(
"hot_output/network/" +
"hot_two_servers_one_network.yaml")
params = {'network_name': 'my_private_net',
'network_cidr': '10.0.0.0/24',
'network_start_ip': '10.0.0.100',
'network_end_ip': '10.0.0.150'}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_one_server_three_networks(self):
tosca_file = utils.get_template_path(
"network/" +
"tosca_one_server_three_networks.yaml")
hot_file = utils.get_template_path(
"hot_output/network/" +
"hot_one_server_three_networks.yaml")
self._check_success(tosca_file, hot_file, {})
def test_osc_translate_software_component(self):
tosca_file = utils.get_template_path("tosca_software_component.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_software_component.yaml")
params = {'cpus': '1',
'download_url': 'http://www.software.com/download'}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_web_application(self):
tosca_file = utils.get_template_path("tosca_web_application.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_web_application.yaml")
params = {'cpus': '2', 'context_root': 'my_web_app'}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_template_with_url_import(self):
tosca_file = utils.get_template_path(
"tosca_single_instance_wordpress_with_url_import.yaml")
hot_file = utils.get_template_path(
"hot_output/hot_single_instance_wordpress.yaml")
params = {'db_name': 'wordpress',
'db_user': 'wp_user',
'db_pwd': 'wp_pass',
'db_root_pwd': 'passw0rd',
'db_port': 3366,
'cpus': 8}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_template_by_url_with_local_import(self):
tosca_file = ("https://raw.githubusercontent.com/openstack/" +
"heat-translator/master/translator/tests/data/" +
"tosca_single_instance_wordpress.yaml")
hot_file = utils.get_template_path(
"hot_output/" +
"hot_single_instance_wordpress.yaml")
params = {'db_name': 'wordpress',
'db_user': 'wp_user',
'db_pwd': 'wp_pass',
'db_root_pwd': 'passw0rd',
'db_port': 3366,
'cpus': 8}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_template_by_url_with_local_abspath_import(self):
tosca_file = ("https://raw.githubusercontent.com/openstack/" +
"heat-translator/master/translator/tests/data/" +
"tosca_single_instance_wordpress_with_local_abspath" +
"_import.yaml")
hot_file = utils.get_template_path(
"hot_output/" +
"hot_single_instance_wordpress.yaml")
params = {'db_name': 'wordpress',
'db_user': 'wp_user',
'db_pwd': 'wp_pass',
'db_root_pwd': 'passw0rd',
'db_port': 3366,
'cpus': 8}
expected_msg = _('Absolute file name "/tmp/wordpress.yaml" cannot be '
'used in a URL-based input template "https://raw.'
'githubusercontent.com/openstack/heat-translator/'
'master/translator/tests/data/tosca_single_instance_'
'wordpress_with_local_abspath_import.yaml".')
self._check_error(tosca_file, hot_file, params, ValidationError,
expected_msg, ImportError)
def test_osc_translate_template_by_url_with_url_import(self):
tosca_url = ("https://raw.githubusercontent.com/openstack/" +
"heat-translator/master/translator/tests/data/" +
"tosca_single_instance_wordpress_with_url_import.yaml")
hot_file = utils.get_template_path(
"hot_output/" +
"hot_single_instance_wordpress.yaml")
params = {'db_name': 'wordpress',
'db_user': 'wp_user',
'db_pwd': 'wp_pass',
'db_root_pwd': 'passw0rd',
'db_port': 3366,
'cpus': 8}
self._check_success(tosca_url, hot_file, params)
def test_osc_translate_hello_world_csar(self):
tosca_file = utils.get_template_path("csar_hello_world.zip")
hot_file = utils.get_template_path(
"hot_output/hot_hello_world.yaml")
self._check_success(tosca_file, hot_file, {})
def test_osc_single_instance_wordpress_csar(self):
tosca_file = utils.get_template_path(
"csar_single_instance_wordpress.zip")
hot_file = utils.get_template_path(
"hot_output/" +
"hot_single_instance_wordpress_from_csar.yaml")
params = {'db_name': 'wordpress',
'db_user': 'wp_user',
'db_pwd': 'wp_pass',
'db_root_pwd': 'passw0rd',
'db_port': 3366,
'cpus': 8}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_elk_csar_from_url(self):
tosca_file = ("https://github.com/openstack/heat-translator/raw/" +
"master/translator/tests/data/csar_elk.zip")
hot_file = utils.get_template_path(
"hot_output/hot_elk_from_csar.yaml")
params = {'github_url':
'http://github.com/paypal/rest-api-sample-app-nodejs.git',
'my_cpus': 4}
self._check_success(tosca_file, hot_file, params)
def test_osc_translate_csar_not_zip(self):
tosca_file = utils.get_template_path("csar_not_zip.zip")
hot_file = ''
expected_msg = _('"%s" is not a valid zip file.') % tosca_file
self._check_error(tosca_file, hot_file, {}, ValidationError,
expected_msg, ValidationError)
def test_osc_translate_csar_metadata_not_yaml(self):
tosca_file = utils.get_template_path("csar_metadata_not_yaml.zip")
hot_file = ''
expected_msg = _('The file "TOSCA-Metadata/TOSCA.meta" in the CSAR '
'"%s" does not contain valid YAML'
' content.') % tosca_file
self._check_error(tosca_file, hot_file, {}, ValidationError,
expected_msg, ValidationError)
def test_osc_translate_csar_wrong_metadata_file(self):
tosca_file = utils.get_template_path("csar_wrong_metadata_file.zip")
hot_file = ''
expected_msg = _('"%s" is not a valid CSAR as it does not contain the '
'required file "TOSCA.meta" in the folder '
'"TOSCA-Metadata".') % tosca_file
self._check_error(tosca_file, hot_file, {}, ValidationError,
expected_msg, ValidationError)
def test_osc_translate_csar_wordpress_invalid_import_path(self):
tosca_file = utils.get_template_path(
"csar_wordpress_invalid_import_path.zip")
hot_file = ''
expected_msg = _('Import '
'"Invalid_import_path/wordpress.yaml" is not valid.')
self._check_error(tosca_file, hot_file, {}, ValidationError,
expected_msg, ImportError)
def test_osc_translate_csar_wordpress_invalid_script_url(self):
tosca_file = utils.get_template_path(
"csar_wordpress_invalid_script_url.zip")
hot_file = ''
expected_msg = _('The resource at '
'"https://raw.githubusercontent.com/openstack/'
'heat-translator/master/translator/tests/data/'
'custom_types/wordpress1.yaml" cannot be accessed.')
self._check_error(tosca_file, hot_file, {}, ValidationError,
expected_msg, URLException)
|
|
#!/usr/bin/env python
'''
An abstraction of the IPython Parallel task interface.
Given a PBS_NODEFILE, this class launches the controller and engines via ssh
using a temporary profile.
Author: Monte Lunacek, [email protected]
'''
import os
import subprocess
import time
import socket
import signal
import shutil
import sys
import argparse
import logging
import uuid
import jinja2 as jin
import datetime
import json
from IPython import parallel
# Template constants
ipcontroller = jin.Template('''
c = get_config()
c.HubFactory.ip = '*'
''')
ipengine = jin.Template('''
c = get_config()
c.EngineFactory.timeout = 300
c.IPEngineApp.log_to_file = True
c.IPEngineApp.log_level = 30
c.EngineFactory.ip = '*'
''')
class ClusterFormatter(logging.Formatter):
def format(self, record):
a = "{0}: {1}".format(datetime.date.today(), str(record.lineno).rjust(4))
return "{0} {1}".format(a, record.msg)
def get_logger(debug):
logger = logging.getLogger('ipcluster')
logger.setLevel(logging.CRITICAL)
if debug == True:
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
formatter = ClusterFormatter()
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
class Cluster:
def __init__(self, **kwargs):
"""Creates a profile, logger, starts engines and controllers"""
self.args = {}
self.args['ppn'] = kwargs.get('ppn', 12)
self.args['debug'] = kwargs.get('debug', False)
self.args['terminate'] = kwargs.get('terminate', True)
self.args['profile'] = kwargs.get('profile', None)
self.cwd = os.getcwd()
self.directory = os.getcwd()
self.set_ppn(self.args['ppn'])
self.node_list = self.pbs_nodes()
self.logger = get_logger(self.args['debug'])
# Create the profile
self.profile = 'temp_' + str(uuid.uuid1())
if self.args['profile'] is not None:
self.profile = self.args['profile']
self.logger.debug(self.profile)
self.logger.debug(self.args['profile'])
self.ipengine_path()
self.create_profile()
self.start_controller()
self.start_engines()
self.save()
self.logger.debug('Engines have started')
def set_ppn(self,ppn):
"""Environment variable override"""
try:
ppn = os.environ['PPN']
except KeyError, e:
pass
self.ppn = int(ppn)
def ipengine_path(self):
"""Find the full path for ipengine"""
p = subprocess.Popen(['which','ipengine'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
res = p.stdout.readlines()
if len(res) == 0:
exit(1)
self.ipengine = res[0].strip('\n')
def pbs_nodes(self):
"""Returns an array of nodes from the PBS_NODEFILE"""
nodes = []
try:
filename = os.environ['PBS_NODEFILE']
except KeyError, e:
exit(1)
with open(filename,'r') as file:
for line in file:
node_name = line.split()[0]
if node_name not in nodes:
nodes.append(node_name)
#TODO add self.args['nodes'] as an option
return nodes
def create_profile(self):
"""Calls the ipython profile create command"""
msg = 'creating profile {0}'.format(self.profile)
self.logger.debug(msg)
cmd = subprocess.Popen(['ipython','profile','create','--parallel','--profile='+self.profile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid)
cmd.wait()
# Append settings
self.profile_directory = os.path.join(os.path.join(os.environ['HOME'],'.ipython'),'profile_'+ self.profile)
tmp = ipcontroller.render({})
with open(os.path.join(self.profile_directory,'ipcontroller_config.py'),'w') as f:
f.write(tmp)
tmp = ipengine.render({})
with open(os.path.join(self.profile_directory,'ipengine_config.py'),'w') as f:
f.write(tmp)
def start_controller(self):
"""Starts the ipcontroller"""
self.logger.debug('starting controller')
cmd = ['ipcontroller']
cmd.append('--profile='+self.profile)
cmd.append('--log-to-file')
cmd.append('--log-level=50')
cmd.append("--ip='*'")
self.controller = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid)
time.sleep(1)
self.wait_for_controller()
def wait_for_controller(self):
"""Loops until the controller is ready"""
tic = time.time()
while True:
if time.time() - tic > 30:
break
try:
rc = parallel.Client(profile=self.profile)
return True
except ValueError, e:
time.sleep(2)
except IOError, e:
time.sleep(2)
except:
time.sleep(2)
def start_engines(self):
msg = 'starting {0} engines'.format(len(self.node_list)*self.ppn)
self.logger.debug(msg)
"""Starts and waits for the engines"""
self.engines = []
self.hostname = socket.gethostname()
for node in self.node_list:
for i in xrange(self.ppn):
if self.hostname != node:
cmd = ['ssh']
cmd.append(node)
cmd.append(self.ipengine)
else:
cmd = [self.ipengine]
cmd.append('--profile='+self.profile)
cmd.append('--log-to-file')
cmd.append('--log-level=20')
cmd.append('--work-dir={0}'.format(self.cwd))
# print ' '.join(cmd)
tmp = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid)
self.engines.append(tmp)
time.sleep(0.1)
self.wait_for_engines()
def wait_for_engines(self):
"""Loops until engies have started"""
tic = time.time()
while True and time.time() - tic < 120:
try:
rc = parallel.Client(profile=self.profile)
msg = 'Waiting for engines: {0} or {1}'
msg = msg.format(len(rc), len(self.engines))
self.logger.debug(msg)
if len(rc.ids) == len(self.engines):
return True
else:
time.sleep(2)
except ValueError, e:
time.sleep(2)
except IOError, e:
time.sleep(2)
def remove_profile(self):
"""Removes the profile directory"""
count = 0
while True and count < 20:
try:
shutil.rmtree(self.profile_directory)
count += 1
return True
except OSError, e:
time.sleep(1)
return False
def client(self):
return parallel.Client(profile=self.profile)
@staticmethod
def remove_profile(tmp):
"""Removes the profile directory"""
count = 0
while True and count < 20:
try:
shutil.rmtree(tmp['profile_directory'])
count += 1
return True
except OSError:
time.sleep(1)
return False
@staticmethod
def terminate_cluster(tmp):
try:
for engine in tmp['engines']:
os.killpg( engine, signal.SIGINT)
except OSError:
pass
try:
os.killpg( tmp['controller'], signal.SIGINT)
except AttributeError, OSError:
pass
def save(self):
tmp = {}
tmp['profile'] = self.profile
tmp['profile_directory'] = self.profile_directory
tmp['engines'] = [ x.pid for x in self.engines]
tmp['controller'] = self.controller.pid
with open('profile.json','w') as outfile:
outfile.write(json.dumps(tmp))
def __del__(self):
''' Either delete the cluster or write the profile
information to profile.json'''
tmp = {}
tmp['profile'] = self.profile
tmp['profile_directory'] = self.profile_directory
tmp['engines'] = [ x.pid for x in self.engines]
tmp['controller'] = self.controller.pid
if self.args['terminate'] == True:
self.logger.debug('terminating cluster')
self.terminate_cluster(tmp)
self.remove_profile(tmp)
def read_profile():
with open('profile.json','r') as infile:
data = json.loads(infile.read())
return data
def client():
'''return the client using profile.json'''
data = read_profile()
return parallel.Client(profile=data['profile'])
def delete():
'''Delete cluster using profile.json'''
data = read_profile()
Cluster.terminate_cluster(data)
Cluster.remove_profile(data)
def get_args(argv):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help=None, dest='command')
start = subparsers.add_parser('start', help='start a cluster.\n')
start.add_argument('--ppn', help='processors per node', dest='ppn')
start.add_argument('--debug', help='print debug messages', dest='debug')
start.add_argument('--profile', help='name of profile', dest='profile')
start.set_defaults(ppn=12)
start.set_defaults(debug=True)
start.set_defaults(profile=None)
stop = subparsers.add_parser('stop', help='stop the cluster.\n')
return parser.parse_args(argv)
if __name__ == '__main__':
args = get_args(sys.argv[1:])
if args.command == 'start':
opts = dict()
opts['ppn'] = args.ppn
opts['debug'] = args.debug
opts['terminate'] = False
opts['profile'] = args.profile
c = Cluster(**opts)
elif args.command == 'stop':
delete()
else:
print 'Not a valid command', args.command
|
|
"""The tests for numeric state automation."""
from datetime import timedelta
import logging
from unittest.mock import patch
import pytest
import voluptuous as vol
import homeassistant.components.automation as automation
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
)
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
assert_setup_component,
async_fire_time_changed,
async_mock_service,
mock_component,
)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
async def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
await async_setup_component(
hass,
"input_number",
{
"input_number": {
"value_3": {"min": 0, "max": 255, "initial": 3},
"value_5": {"min": 0, "max": 255, "initial": 5},
"value_8": {"min": 0, "max": 255, "initial": 8},
"value_10": {"min": 0, "max": 255, "initial": 10},
"value_12": {"min": 0, "max": 255, "initial": 12},
"value_100": {"min": 0, "max": 255, "initial": 100},
}
},
)
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_removal(hass, calls, below):
"""Test the firing with removed entity."""
hass.states.async_set("test.entity", 11)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# Entity disappears
hass.states.async_remove("test.entity")
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_below(hass, calls, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
context = Context()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
# Set above 12 so the automation will fire again
hass.states.async_set("test.entity", 12)
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_over_to_below(hass, calls, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_entities_change_over_to_below(hass, calls, below):
"""Test the firing with changed entities."""
hass.states.async_set("test.entity_1", 11)
hass.states.async_set("test.entity_2", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 2
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_below_to_below(hass, calls, below):
"""Test the firing with changed entity."""
context = Context()
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10 so this should fire
hass.states.async_set("test.entity", 9, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
# already below so should not fire again
hass.states.async_set("test.entity", 5)
await hass.async_block_till_done()
assert len(calls) == 1
# still below so should not fire again
hass.states.async_set("test.entity", 3)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_below_fires_on_entity_change_to_equal(hass, calls, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 10 is not below 10 so this should not fire again
hass.states.async_set("test.entity", 10)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_initial_entity_below(hass, calls, below):
"""Test the firing when starting with a match."""
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# Fire on first update even if initial state was already below
hass.states.async_set("test.entity", 8)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_fires_on_initial_entity_above(hass, calls, above):
"""Test the firing when starting with a match."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# Fire on first update even if initial state was already above
hass.states.async_set("test.entity", 12)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_above(hass, calls, above):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is above 10
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_below_to_above(hass, calls, above):
"""Test the firing with changed entity."""
# set initial state
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is above 10 and 9 is below
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_above_to_above(hass, calls, above):
"""Test the firing with changed entity."""
# set initial state
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 12 is above 10 so this should fire
hass.states.async_set("test.entity", 12)
await hass.async_block_till_done()
assert len(calls) == 1
# already above, should not fire again
hass.states.async_set("test.entity", 15)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_not_above_fires_on_entity_change_to_equal(hass, calls, above):
"""Test the firing with changed entity."""
# set initial state
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 10 is not above 10 so this should not fire again
hass.states.async_set("test.entity", 10)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_below_range(hass, calls, above, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_below_above_range(hass, calls, above, below):
"""Test the firing with changed entity."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 4 is below 5
hass.states.async_set("test.entity", 4)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_over_to_below_range(hass, calls, above, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_over_to_below_above_range(
hass, calls, above, below
):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": above,
"above": below,
},
"action": {"service": "test.automation"},
}
},
)
# 4 is below 5 so it should not fire
hass.states.async_set("test.entity", 4)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (100, "input_number.value_100"))
async def test_if_not_fires_if_entity_not_match(hass, calls, below):
"""Test if not fired with non matching entity."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.another_entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_and_warns_if_below_entity_unknown(hass, caplog, calls):
"""Test if warns with unknown below entity."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": "input_number.unknown",
},
"action": {"service": "test.automation"},
}
},
)
caplog.clear()
caplog.set_level(logging.WARNING)
hass.states.async_set("test.entity", 1)
await hass.async_block_till_done()
assert len(calls) == 0
assert len(caplog.record_tuples) == 1
assert caplog.record_tuples[0][1] == logging.WARNING
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_below_with_attribute(hass, calls, below):
"""Test attributes change."""
hass.states.async_set("test.entity", 11, {"test_attribute": 11})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9, {"test_attribute": 11})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_not_below_with_attribute(
hass, calls, below
):
"""Test attributes."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10
hass.states.async_set("test.entity", 11, {"test_attribute": 9})
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_attribute_change_with_attribute_below(hass, calls, below):
"""Test attributes change."""
hass.states.async_set("test.entity", "entity", {"test_attribute": 11})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": 9})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_attribute_change_with_attribute_not_below(
hass, calls, below
):
"""Test attributes change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": 11})
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_with_attribute_below(hass, calls, below):
"""Test attributes change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10, entity state value should not be tested
hass.states.async_set("test.entity", "9", {"test_attribute": 11})
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_with_not_attribute_below(
hass, calls, below
):
"""Test attributes change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10, entity state value should not be tested
hass.states.async_set("test.entity", "entity")
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_fires_on_attr_change_with_attribute_below_and_multiple_attr(
hass, calls, below
):
"""Test attributes change."""
hass.states.async_set(
"test.entity", "entity", {"test_attribute": 11, "not_test_attribute": 11}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is not below 10
hass.states.async_set(
"test.entity", "entity", {"test_attribute": 9, "not_test_attribute": 11}
)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_template_list(hass, calls, below):
"""Test template list."""
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 11]})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute[2] }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 3 is below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 3]})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10.0, "input_number.value_10"))
async def test_template_string(hass, calls, below):
"""Test template string."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute | multiply(10) }}",
"below": below,
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"below",
"above",
"from_state.state",
"to_state.state",
)
)
},
},
}
},
)
hass.states.async_set("test.entity", "test state 1", {"test_attribute": "1.2"})
await hass.async_block_till_done()
hass.states.async_set("test.entity", "test state 2", {"test_attribute": "0.9"})
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"]
== f"numeric_state - test.entity - {below} - None - test state 1 - test state 2"
)
async def test_not_fires_on_attr_change_with_attr_not_below_multiple_attr(hass, calls):
"""Test if not fired changed attributes."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10
hass.states.async_set(
"test.entity", "entity", {"test_attribute": 11, "not_test_attribute": 9}
)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_action(hass, calls, above, below):
"""Test if action."""
entity_id = "domain.test_entity"
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "numeric_state",
"entity_id": entity_id,
"above": above,
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(entity_id, 10)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, 8)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, 9)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fails_setup_bad_for(hass, calls, above, below):
"""Test for setup failure for bad for."""
hass.states.async_set("test.entity", 5)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"invalid": 5},
},
"action": {"service": "homeassistant.turn_on"},
}
},
)
with patch.object(numeric_state_trigger, "_LOGGER") as mock_logger:
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert mock_logger.error.called
async def test_if_fails_setup_for_without_above_below(hass, calls):
"""Test for setup failures for missing above or below."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"for": {"seconds": 5},
},
"action": {"service": "homeassistant.turn_on"},
}
},
)
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_not_fires_on_entity_change_with_for(hass, calls, above, below):
"""Test for not firing on entity change with for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
hass.states.async_set("test.entity", 15)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_not_fires_on_entities_change_with_for_after_stop(
hass, calls, above, below
):
"""Test for not firing on entities change with for after stop."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity_1", 9)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity_1", 15)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
hass.states.async_set("test.entity_1", 9)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entity_change_with_for_attribute_change(
hass, calls, above, below
):
"""Test for firing on entity change with for and attribute change."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=4)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity", 9, attributes={"mock_attr": "attr_change"})
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=4)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entity_change_with_for(hass, calls, above, below):
"""Test for firing on entity change with for."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_wait_template_with_trigger(hass, calls, above):
"""Test using wait template with 'trigger.entity_id'."""
hass.states.async_set("test.entity", "0")
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": [
{"wait_template": "{{ states(trigger.entity_id) | int < 10 }}"},
{
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
("platform", "entity_id", "to_state.state")
)
},
},
],
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "12")
hass.states.async_set("test.entity", "8")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "numeric_state - test.entity - 12"
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entities_change_no_overlap(hass, calls, above, below):
"""Test for firing on entities change with no overlap."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {"some": "{{ trigger.entity_id }}"},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=10)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1"
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=10)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2"
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entities_change_overlap(hass, calls, above, below):
"""Test for firing on entities change with overlap."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {"some": "{{ trigger.entity_id }}"},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1"
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2"
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_change_with_for_template_1(hass, calls, above, below):
"""Test for firing on change with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": "{{ 5 }}"},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_change_with_for_template_2(hass, calls, above, below):
"""Test for firing on change with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": "{{ 5 }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_change_with_for_template_3(hass, calls, above, below):
"""Test for firing on change with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": "00:00:{{ 5 }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_invalid_for_template(hass, calls, above, below):
"""Test for invalid for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": "{{ five }}",
},
"action": {"service": "test.automation"},
}
},
)
with patch.object(numeric_state_trigger, "_LOGGER") as mock_logger:
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert mock_logger.error.called
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entities_change_overlap_for_template(
hass, calls, above, below
):
"""Test for firing on entities change with overlap and for template."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": '{{ 5 if trigger.entity_id == "test.entity_1"'
" else 10 }}",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.entity_id }} - {{ trigger.for }}"
},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1 - 0:00:05"
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
mock_utcnow.return_value += timedelta(seconds=5)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2 - 0:00:10"
def test_below_above():
"""Test above cannot be above below."""
with pytest.raises(vol.Invalid):
numeric_state_trigger.TRIGGER_SCHEMA(
{"platform": "numeric_state", "above": 1200, "below": 1000}
)
def test_schema_input_number():
"""Test input_number only is accepted for above/below."""
with pytest.raises(vol.Invalid):
numeric_state_trigger.TRIGGER_SCHEMA(
{
"platform": "numeric_state",
"above": "input_datetime.some_input",
"below": 1000,
}
)
with pytest.raises(vol.Invalid):
numeric_state_trigger.TRIGGER_SCHEMA(
{
"platform": "numeric_state",
"below": "input_datetime.some_input",
"above": 1200,
}
)
@pytest.mark.parametrize("above", (3, "input_number.value_3"))
async def test_attribute_if_fires_on_entity_change_with_both_filters(
hass, calls, above
):
"""Test for firing if both filters are match attribute."""
hass.states.async_set("test.entity", "bla", {"test-measurement": 1})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"attribute": "test-measurement",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "bla", {"test-measurement": 4})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (3, "input_number.value_3"))
async def test_attribute_if_not_fires_on_entities_change_with_for_after_stop(
hass, calls, above
):
"""Test for not firing on entity change with for after stop trigger."""
hass.states.async_set("test.entity", "bla", {"test-measurement": 1})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"attribute": "test-measurement",
"for": 5,
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "bla", {"test-measurement": 4})
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
((8, 12),),
)
async def test_variables_priority(hass, calls, above, below):
"""Test an externally defined trigger variable is overridden."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger_variables": {"trigger": "illegal"},
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": '{{ 5 if trigger.entity_id == "test.entity_1"'
" else 10 }}",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.entity_id }} - {{ trigger.for }}"
},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1 - 0:00:05"
@pytest.mark.parametrize("multiplier", (1, 5))
async def test_template_variable(hass, calls, multiplier):
"""Test template variable."""
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 11]})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger_variables": {"multiplier": multiplier},
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute[2] * multiplier}}",
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 3 is below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 3]})
await hass.async_block_till_done()
if multiplier * 3 < 10:
assert len(calls) == 1
else:
assert len(calls) == 0
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model definitions for simple speech recognition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import math
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
MAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M
SILENCE_LABEL = '_silence_'
SILENCE_INDEX = 0
UNKNOWN_WORD_LABEL = '_unknown_'
UNKNOWN_WORD_INDEX = 1
BACKGROUND_NOISE_DIR_NAME = '_background_noise_'
RANDOM_SEED = 59185
def prepare_words_list(wanted_words):
"""Prepends common tokens to the custom word list.
Args:
wanted_words: List of strings containing the custom words.
Returns:
List with the standard silence and unknown tokens added.
"""
return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words
def which_set(filename, validation_percentage, testing_percentage):
"""Determines which data partition the file should belong to.
We want to keep files in the same training, validation, or testing sets even
if new ones are added over time. This makes it less likely that testing
samples will accidentally be reused in training when long runs are restarted
for example. To keep this stability, a hash of the filename is taken and used
to determine which set it should belong to. This determination only depends on
the name and the set proportions, so it won't change as other files are added.
It's also useful to associate particular files as related (for example words
spoken by the same person), so anything after '_nohash_' in a filename is
ignored for set determination. This ensures that 'bobby_nohash_0.wav' and
'bobby_nohash_1.wav' are always in the same set, for example.
Args:
filename: File path of the data sample.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
String, one of 'training', 'validation', or 'testing'.
"""
base_name = os.path.basename(filename)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put a wav in, so the data set creator has a way of
# grouping wavs that are close variations of each other.
hash_name = re.sub(r'_nohash_.*$', '', base_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_WAVS_PER_CLASS + 1)) *
(100.0 / MAX_NUM_WAVS_PER_CLASS))
if percentage_hash < validation_percentage:
result = 'validation'
elif percentage_hash < (testing_percentage + validation_percentage):
result = 'testing'
else:
result = 'training'
return result
def load_wav_file(filename):
"""Loads an audio file and returns a float PCM-encoded array of samples.
Args:
filename: Path to the .wav file to load.
Returns:
Numpy array holding the sample data as floats between -1.0 and 1.0.
"""
with tf.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1)
return sess.run(
wav_decoder,
feed_dict={wav_filename_placeholder: filename}).audio.flatten()
def save_wav_file(filename, wav_data, sample_rate):
"""Saves audio sample data to a .wav audio file.
Args:
filename: Path to save the file to.
wav_data: 2D array of float PCM-encoded audio data.
sample_rate: Samples per second to encode in the file.
"""
with tf.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.placeholder(tf.string, [])
sample_rate_placeholder = tf.placeholder(tf.int32, [])
wav_data_placeholder = tf.placeholder(tf.float32, [None, 1])
wav_encoder = contrib_audio.encode_wav(wav_data_placeholder,
sample_rate_placeholder)
wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder)
sess.run(
wav_saver,
feed_dict={
wav_filename_placeholder: filename,
sample_rate_placeholder: sample_rate,
wav_data_placeholder: np.reshape(wav_data, (-1, 1))
})
class AudioProcessor(object):
"""Handles loading, partitioning, and preparing audio training data."""
def __init__(self, data_url, data_dir, silence_percentage, unknown_percentage,
wanted_words, validation_percentage, testing_percentage,
model_settings):
self.data_dir = data_dir
self.maybe_download_and_extract_dataset(data_url, data_dir)
self.prepare_data_index(silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage)
self.prepare_background_data()
self.prepare_processing_graph(model_settings)
def maybe_download_and_extract_dataset(self, data_url, dest_directory):
"""Download and extract data set tar file.
If the data set we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a
directory.
If the data_url is none, don't download anything and expect the data
directory to contain the correct files already.
Args:
data_url: Web location of the tar file containing the data set.
dest_directory: File path to extract data to.
"""
if not data_url:
return
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
try:
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
except:
tf.logging.error('Failed to download URL: %s to folder: %s', data_url,
filepath)
tf.logging.error('Please make sure you have enough free space and'
' an internet connection')
raise
print()
statinfo = os.stat(filepath)
tf.logging.info('Successfully downloaded %s (%d bytes)', filename,
statinfo.st_size)
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def prepare_data_index(self, silence_percentage, unknown_percentage,
wanted_words, validation_percentage,
testing_percentage):
"""Prepares a list of the samples organized by set and label.
The training loop needs a list of all the available data, organized by
which partition it should belong to, and with ground truth labels attached.
This function analyzes the folders below the `data_dir`, figures out the
right
labels for each file based on the name of the subdirectory it belongs to,
and uses a stable hash to assign it to a data set partition.
Args:
silence_percentage: How much of the resulting data should be background.
unknown_percentage: How much should be audio outside the wanted classes.
wanted_words: Labels of the classes we want to be able to recognize.
validation_percentage: How much of the data set to use for validation.
testing_percentage: How much of the data set to use for testing.
Returns:
Dictionary containing a list of file information for each set partition,
and a lookup map for each class to determine its numeric index.
Raises:
Exception: If expected files are not found.
"""
# Make sure the shuffling and picking of unknowns is deterministic.
random.seed(RANDOM_SEED)
wanted_words_index = {}
for index, wanted_word in enumerate(wanted_words):
wanted_words_index[wanted_word] = index + 2
self.data_index = {'validation': [], 'testing': [], 'training': []}
unknown_index = {'validation': [], 'testing': [], 'training': []}
all_words = {}
# Look through all the subfolders to find audio samples
search_path = os.path.join(self.data_dir, '*', '*.wav')
for wav_path in gfile.Glob(search_path):
word = re.search('.*/([^/]+)/.*.wav', wav_path).group(1).lower()
# Treat the '_background_noise_' folder as a special case, since we expect
# it to contain long audio samples we mix in to improve training.
if word == BACKGROUND_NOISE_DIR_NAME:
continue
all_words[word] = True
set_index = which_set(wav_path, validation_percentage, testing_percentage)
# If it's a known class, store its detail, otherwise add it to the list
# we'll use to train the unknown label.
if word in wanted_words_index:
self.data_index[set_index].append({'label': word, 'file': wav_path})
else:
unknown_index[set_index].append({'label': word, 'file': wav_path})
if not all_words:
raise Exception('No .wavs found at ' + search_path)
for index, wanted_word in enumerate(wanted_words):
if wanted_word not in all_words:
raise Exception('Expected to find ' + wanted_word +
' in labels but only found ' +
', '.join(all_words.keys()))
# We need an arbitrary file to load as the input for the silence samples.
# It's multiplied by zero later, so the content doesn't matter.
silence_wav_path = self.data_index['training'][0]['file']
for set_index in ['validation', 'testing', 'training']:
set_size = len(self.data_index[set_index])
silence_size = int(math.ceil(set_size * silence_percentage / 100))
for _ in range(silence_size):
self.data_index[set_index].append({
'label': SILENCE_LABEL,
'file': silence_wav_path
})
# Pick some unknowns to add to each partition of the data set.
random.shuffle(unknown_index[set_index])
unknown_size = int(math.ceil(set_size * unknown_percentage / 100))
self.data_index[set_index].extend(unknown_index[set_index][:unknown_size])
# Make sure the ordering is random.
for set_index in ['validation', 'testing', 'training']:
random.shuffle(self.data_index[set_index])
# Prepare the rest of the result data structure.
self.words_list = prepare_words_list(wanted_words)
self.word_to_index = {}
for word in all_words:
if word in wanted_words_index:
self.word_to_index[word] = wanted_words_index[word]
else:
self.word_to_index[word] = UNKNOWN_WORD_INDEX
self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX
def prepare_background_data(self):
"""Searches a folder for background noise audio, and loads it into memory.
It's expected that the background audio samples will be in a subdirectory
named '_background_noise_' inside the 'data_dir' folder, as .wavs that match
the sample rate of the training data, but can be much longer in duration.
If the '_background_noise_' folder doesn't exist at all, this isn't an
error, it's just taken to mean that no background noise augmentation should
be used. If the folder does exist, but it's empty, that's treated as an
error.
Returns:
List of raw PCM-encoded audio samples of background noise.
Raises:
Exception: If files aren't found in the folder.
"""
self.background_data = []
background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME)
if not os.path.exists(background_dir):
return self.background_data
with tf.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1)
search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME,
'*.wav')
for wav_path in gfile.Glob(search_path):
wav_data = sess.run(
wav_decoder,
feed_dict={wav_filename_placeholder: wav_path}).audio.flatten()
self.background_data.append(wav_data)
if not self.background_data:
raise Exception('No background wav files were found in ' + search_path)
def prepare_processing_graph(self, model_settings):
"""Builds a TensorFlow graph to apply the input distortions.
Creates a graph that loads a WAVE file, decodes it, scales the volume,
shifts it in time, adds in background noise, calculates a spectrogram, and
then builds an MFCC fingerprint from that.
This must be called with an active TensorFlow session running, and it
creates multiple placeholder inputs, and one output:
- wav_filename_placeholder_: Filename of the WAV to load.
- foreground_volume_placeholder_: How loud the main clip should be.
- time_shift_padding_placeholder_: Where to pad the clip.
- time_shift_offset_placeholder_: How much to move the clip in time.
- background_data_placeholder_: PCM sample data for background noise.
- background_volume_placeholder_: Loudness of mixed-in background.
- mfcc_: Output 2D fingerprint of processed audio.
Args:
model_settings: Information about the current model being trained.
"""
desired_samples = model_settings['desired_samples']
self.wav_filename_placeholder_ = tf.placeholder(tf.string, [])
wav_loader = io_ops.read_file(self.wav_filename_placeholder_)
wav_decoder = contrib_audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=desired_samples)
# Allow the audio sample's volume to be adjusted.
self.foreground_volume_placeholder_ = tf.placeholder(tf.float32, [])
scaled_foreground = tf.multiply(wav_decoder.audio,
self.foreground_volume_placeholder_)
# Shift the sample's start position, and pad any gaps with zeros.
self.time_shift_padding_placeholder_ = tf.placeholder(tf.int32, [2, 2])
self.time_shift_offset_placeholder_ = tf.placeholder(tf.int32, [2])
padded_foreground = tf.pad(
scaled_foreground,
self.time_shift_padding_placeholder_,
mode='CONSTANT')
sliced_foreground = tf.slice(padded_foreground,
self.time_shift_offset_placeholder_,
[desired_samples, -1])
# Mix in background noise.
self.background_data_placeholder_ = tf.placeholder(tf.float32,
[desired_samples, 1])
self.background_volume_placeholder_ = tf.placeholder(tf.float32, [])
background_mul = tf.multiply(self.background_data_placeholder_,
self.background_volume_placeholder_)
background_add = tf.add(background_mul, sliced_foreground)
background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)
# Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.
spectrogram = contrib_audio.audio_spectrogram(
background_clamp,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
self.mfcc_ = contrib_audio.mfcc(
spectrogram,
wav_decoder.sample_rate,
dct_coefficient_count=model_settings['dct_coefficient_count'])
def set_size(self, mode):
"""Calculates the number of samples in the dataset partition.
Args:
mode: Which partition, must be 'training', 'validation', or 'testing'.
Returns:
Number of samples in the partition.
"""
return len(self.data_index[mode])
def get_data(self, how_many, offset, model_settings, background_frequency,
background_volume_range, time_shift, mode, sess):
"""Gather samples from the data set, applying transformations as needed.
When the mode is 'training', a random selection of samples will be returned,
otherwise the first N clips in the partition will be used. This ensures that
validation always uses the same samples, reducing noise in the metrics.
Args:
how_many: Desired number of samples to return. -1 means the entire
contents of this partition.
offset: Where to start when fetching deterministically.
model_settings: Information about the current model being trained.
background_frequency: How many clips will have background noise, 0.0 to
1.0.
background_volume_range: How loud the background noise will be.
time_shift: How much to randomly shift the clips by in time.
mode: Which partition to use, must be 'training', 'validation', or
'testing'.
sess: TensorFlow session that was active when processor was created.
Returns:
List of sample data for the transformed samples, and list of labels in
one-hot form.
"""
# Pick one of the partitions to choose samples from.
candidates = self.data_index[mode]
if how_many == -1:
sample_count = len(candidates)
else:
sample_count = max(0, min(how_many, len(candidates) - offset))
# Data and labels will be populated and returned.
data = np.zeros((sample_count, model_settings['fingerprint_size']))
labels = np.zeros((sample_count, model_settings['label_count']))
desired_samples = model_settings['desired_samples']
use_background = self.background_data and (mode == 'training')
pick_deterministically = (mode != 'training')
# Use the processing graph we created earlier to repeatedly to generate the
# final output sample data we'll use in training.
for i in xrange(offset, offset + sample_count):
# Pick which audio sample to use.
if how_many == -1 or pick_deterministically:
sample_index = i
else:
sample_index = np.random.randint(len(candidates))
sample = candidates[sample_index]
# If we're time shifting, set up the offset for this sample.
if time_shift > 0:
time_shift_amount = np.random.randint(-time_shift, time_shift)
else:
time_shift_amount = 0
if time_shift_amount > 0:
time_shift_padding = [[time_shift_amount, 0], [0, 0]]
time_shift_offset = [0, 0]
else:
time_shift_padding = [[0, -time_shift_amount], [0, 0]]
time_shift_offset = [-time_shift_amount, 0]
input_dict = {
self.wav_filename_placeholder_: sample['file'],
self.time_shift_padding_placeholder_: time_shift_padding,
self.time_shift_offset_placeholder_: time_shift_offset,
}
# Choose a section of background noise to mix in.
if use_background:
background_index = np.random.randint(len(self.background_data))
background_samples = self.background_data[background_index]
background_offset = np.random.randint(
0, len(background_samples) - model_settings['desired_samples'])
background_clipped = background_samples[background_offset:(
background_offset + desired_samples)]
background_reshaped = background_clipped.reshape([desired_samples, 1])
if np.random.uniform(0, 1) < background_frequency:
background_volume = np.random.uniform(0, background_volume_range)
else:
background_volume = 0
else:
background_reshaped = np.zeros([desired_samples, 1])
background_volume = 0
input_dict[self.background_data_placeholder_] = background_reshaped
input_dict[self.background_volume_placeholder_] = background_volume
# If we want silence, mute out the main sample but leave the background.
if sample['label'] == SILENCE_LABEL:
input_dict[self.foreground_volume_placeholder_] = 0
else:
input_dict[self.foreground_volume_placeholder_] = 1
# Run the graph to produce the output audio.
data[i - offset, :] = sess.run(self.mfcc_, feed_dict=input_dict).flatten()
label_index = self.word_to_index[sample['label']]
labels[i - offset, label_index] = 1
return data, labels
def get_unprocessed_data(self, how_many, model_settings, mode):
"""Retrieve sample data for the given partition, with no transformations.
Args:
how_many: Desired number of samples to return. -1 means the entire
contents of this partition.
model_settings: Information about the current model being trained.
mode: Which partition to use, must be 'training', 'validation', or
'testing'.
Returns:
List of sample data for the samples, and list of labels in one-hot form.
"""
candidates = self.data_index[mode]
if how_many == -1:
sample_count = len(candidates)
else:
sample_count = how_many
desired_samples = model_settings['desired_samples']
words_list = self.words_list
data = np.zeros((sample_count, desired_samples))
labels = []
with tf.Session(graph=tf.Graph()) as sess:
wav_filename_placeholder = tf.placeholder(tf.string, [])
wav_loader = io_ops.read_file(wav_filename_placeholder)
wav_decoder = contrib_audio.decode_wav(
wav_loader, desired_channels=1, desired_samples=desired_samples)
foreground_volume_placeholder = tf.placeholder(tf.float32, [])
scaled_foreground = tf.multiply(wav_decoder.audio,
foreground_volume_placeholder)
for i in range(sample_count):
if how_many == -1:
sample_index = i
else:
sample_index = np.random.randint(len(candidates))
sample = candidates[sample_index]
input_dict = {wav_filename_placeholder: sample['file']}
if sample['label'] == SILENCE_LABEL:
input_dict[foreground_volume_placeholder] = 0
else:
input_dict[foreground_volume_placeholder] = 1
data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten()
label_index = self.word_to_index[sample['label']]
labels.append(words_list[label_index])
return data, labels
|
|
from PySide import QtGui, QtOpenGL, QtCore
from OpenGL import GL
from OpenGL import GL
from OpenGL.GL import shaders
from OpenGL.arrays import vbo
from OpenGL.GL.ARB import texture_rg
from OpenGL.GL.ARB import half_float_vertex
from ctypes import c_void_p
import numpy
import math
vert_src = """#version 120
attribute vec2 positionIn;
attribute vec2 texIn;
varying vec2 texcoord;
void main()
{
gl_Position= vec4(positionIn * 2.0 - vec2(1),0,1);
texcoord = texIn;
}
"""
frag_src = """#version 120
uniform sampler2D texture;
uniform int hdr_mode;
uniform float g;
uniform float m;
uniform float s;
varying vec2 texcoord;
float convert(float x)
{
return clamp(pow(x*m,g) *s, 0.0, 1.0);
}
void main()
{
vec2 coords = vec2(texcoord.x, 1.0 - texcoord.y);
vec3 tex = texture2D(texture, coords).xyz;
if (hdr_mode == 1) {
gl_FragColor = vec4(convert(tex.x), convert(tex.y), convert(tex.z), 1);
} else {
gl_FragColor = vec4(tex,1);
}
}
"""
class DisplayWidget(QtGui.QWidget):
def __init__(self, parent):
super(DisplayWidget, self).__init__(parent)
self.buffers = None
self.glWidget = GLWidget(self)
self.bufferComboBox = QtGui.QComboBox(self)
policy = QtGui.QSizePolicy()
policy.setHorizontalPolicy(QtGui.QSizePolicy.Expanding)
self.bufferComboBox.setSizePolicy(policy)
label = QtGui.QLabel("Buffers:")
label.setBuddy(self.bufferComboBox)
self.bufferComboBox.currentIndexChanged["QString"].connect(
self.onBufferSelectChange)
self.interactiveCheckBox = QtGui.QCheckBox("Interactive", self)
midLayout = QtGui.QHBoxLayout()
midLayout.addWidget(label)
midLayout.addWidget(self.bufferComboBox)
midLayout.addWidget(self.interactiveCheckBox)
self.label = QtGui.QLabel("Exposure: 0", self)
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.slider.setRange(-100,100)
self.slider.setValue(0)
self.slider.valueChanged.connect(self.onExposureChange)
bottomLayout = QtGui.QHBoxLayout()
bottomLayout.addWidget(self.label)
bottomLayout.addWidget(self.slider)
bottomLayout.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.glWidget)
layout.addLayout(midLayout)
layout.addLayout(bottomLayout)
self.setLayout(layout)
def setBuffers(self, buffers):
for i in xrange(self.bufferComboBox.count()):
self.bufferComboBox.removeItem(0)
self.buffers = buffers
buffersList = buffers.keys()
buffersList.sort()
self.bufferComboBox.addItems(buffersList)
def setActiveBuffer(self, bufferName):
idx = self.bufferComboBox.findText(bufferName)
if idx == self.bufferComboBox.currentIndex():
self.refreshDisplay()
else:
self.bufferComboBox.setCurrentIndex(idx)
def onBufferSelectChange(self, value):
if str(value) in self.buffers:
ndarray = self.buffers[str(value)].data
self.glWidget.copyDataToTexture(ndarray)
if ndarray.dtype == numpy.float32 or ndarray.dtype == numpy.float16:
self.slider.setEnabled(True)
else:
self.slider.setEnabled(False)
self.glWidget.glDraw()
def onExposureChange(self):
value = 0.1 * self.slider.value()
self.glWidget.exposure = value
self.label.setText("Exposure: " + str(value))
self.glWidget.glDraw()
def refreshDisplay(self):
self.onBufferSelectChange(self.bufferComboBox.currentText())
def sizeHint(self):
return QtCore.QSize(400,400)
class GLWidget(QtOpenGL.QGLWidget):
def __init__(self, parent):
super(GLWidget, self).__init__(parent)
self.w = 440
self.h = 440
self.rightBtnDown = False
self.texture = None
self.texturedata = None
self.shader = None
self.hdr_mode = 0
self.vbo = None
self.scale = 0.5
self.steps = 0
self.cx = 0.5
self.cy = 0.5
self.gamma = 1.0/2.2
self.exposure = 0
self.zoomFactor = 1.35
self.panFactor = 0.002
def initializeGL(self):
pass
def copyDataToTexture(self, ndarray):
# Update dimensions of widget
self.texturedata = ndarray
self.w = ndarray.shape[0]
self.h = ndarray.shape[1]
self.updateGeometry()
# Generate new texture
if not self.texture:
try:
self.texture = GL.glGenTextures(1)
except Exception:
return
target = GL.GL_TEXTURE_2D
GL.glBindTexture(target, self.texture)
GL.glTexParameterf(target, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexParameterf(target, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
GL.glTexParameterf(target, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
GL.glTexParameterf(target, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE)
GL.glTexParameteri(target, GL.GL_GENERATE_MIPMAP, GL.GL_FALSE);
# Get texture format
channels = ndarray.shape[2] if ndarray.ndim == 3 else 1
if channels == 1:
glFormat = GL.GL_RED
elif channels == 2:
glFormat = GL.GL_RG
elif channels == 3:
glFormat = GL.GL_RGB
elif channels == 4:
glFormat = GL.GL_RGBA
glInternalFormat = glFormat
# Get texture type
if ndarray.dtype == numpy.float32:
glType = GL.GL_FLOAT
# Need to use the exposure shader if floating point
self.hdr_mode = 1
# The internal format changes with floating point textures
if channels == 1:
glInternalFormat = texture_rg.GL_R32F
elif channels == 2:
glInternalFormat = texture_rg.GL_RG32F
elif channels == 3:
glInternalFormat = GL.GL_RGB32F
elif channels == 4:
glInternalFormat = GL.GL_RGBA32F
elif ndarray.dtype == numpy.float16:
glType = GL.GL_FLOAT
# Need to use the exposure shader if floating point
self.hdr_mode = 1
# The internal format changes with floating point textures
if channels == 1:
glInternalFormat = texture_rg.GL_R16F
elif channels == 2:
glInternalFormat = texture_rg.GL_RG16F
elif channels == 3:
glInternalFormat = GL.GL_RGB16F
elif channels == 4:
glInternalFormat = GL.GL_RGBA16F
else:
glType = GL.GL_UNSIGNED_BYTE
self.hdr_mode = 0
# Copy data to texture
GL.glTexImage2D(target, 0, glInternalFormat, self.w, self.h,
0, glFormat, glType, ndarray)
GL.glBindTexture(target, 0)
def resizeGL(self, width, height):
GL.glViewport(0,0,width,height)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GL.glOrtho(0,1,0,1,0,1)
GL.glMatrixMode(GL.GL_MODELVIEW)
def compileShaders(self):
# Build shaders
vert_shader = shaders.compileShader(vert_src, GL.GL_VERTEX_SHADER)
frag_shader = shaders.compileShader(frag_src, GL.GL_FRAGMENT_SHADER)
self.shader = shaders.compileProgram(vert_shader, frag_shader)
def paintGL(self):
if GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == 33305:
return
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
if not self.texture:
return
if not self.shader:
self.compileShaders()
if not self.vbo:
self.vbo = GL.glGenBuffers(1)
shaders.glUseProgram(self.shader)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vbo)
vertices = numpy.array(
[-self.scale + self.cx, -self.scale + self.cy ,
self.scale + self.cx, -self.scale + self.cy,
self.scale + self.cx, self.scale + self.cy,
-self.scale + self.cx, self.scale + self.cy,
0,0,1,0,1,1,0,1], dtype = numpy.float32)
GL.glBufferData(GL.GL_ARRAY_BUFFER, 64, vertices, GL.GL_STATIC_DRAW)
loc = GL.glGetAttribLocation(self.shader, "positionIn")
GL.glEnableVertexAttribArray(loc)
GL.glVertexAttribPointer(loc, 2, GL.GL_FLOAT, 0, 8, c_void_p(0))
loc = GL.glGetAttribLocation(self.shader, "texIn")
GL.glEnableVertexAttribArray(loc)
GL.glVertexAttribPointer(loc, 2, GL.GL_FLOAT, 0, 8, c_void_p(32))
def _uniformLoc(name):
return GL.glGetUniformLocation(self.shader,name)
GL.glUniform1f(_uniformLoc("g"), self.gamma);
GL.glUniform1f(_uniformLoc("m"), math.pow(2, self.exposure + 2.47393))
GL.glUniform1f(_uniformLoc("s"), math.pow(2, -3.5 * self.gamma))
GL.glUniform1i(_uniformLoc("hdr_mode"), self.hdr_mode);
GL.glUniform1i(_uniformLoc("texture"), 0);
GL.glActiveTexture(GL.GL_TEXTURE0);
GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture)
GL.glDrawArrays(GL.GL_QUADS, 0, 4);
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
loc = GL.glGetAttribLocation(self.shader, "positionIn")
GL.glDisableVertexAttribArray(loc)
loc = GL.glGetAttribLocation(self.shader, "texIn")
GL.glDisableVertexAttribArray(loc)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
shaders.glUseProgram(0)
if self.rightBtnDown:
self.renderPixelInfo()
def mousePressEvent(self, event):
self.lastPos = event.pos()
if event.button()== QtCore.Qt.RightButton:
self.rightBtnDown = True
self.glDraw()
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
self.rightBtnDown = False
self.glDraw()
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if event.buttons() & QtCore.Qt.LeftButton:
self.cx += self.panFactor*dx
self.cy -= self.panFactor*dy
self.correctCenterCoordinates()
self.lastPos = event.pos()
self.glDraw()
def wheelEvent(self, event):
if event.delta() > 0:
self.steps += 1
else:
self.steps -= 1
# Only allow inital zoom (not smaller)
if self.steps < 0:
self.steps = 0
self.scale = 0.5 * math.pow(self.zoomFactor, self.steps)
self.correctCenterCoordinates()
self.glDraw()
def correctCenterCoordinates(self):
if -self.scale + self.cx > 0:
self.cx = self.scale
if self.scale + self.cx < 1:
self.cx = 1 - self.scale
if -self.scale + self.cy > 0:
self.cy = self.scale
if self.scale + self.cy < 1:
self.cy = 1 - self.scale
def sizeHint(self):
return QtCore.QSize(self.w,self.h)
def renderPixelInfo(self):
# Get pixel positions px and py
size = 2.0*(self.scale)
offx = self.w * (self.scale - self.cx) / size
offy = self.h * (self.scale - self.cy) / size
px = int(offx + (self.lastPos.x() * self.w) / (self.width() * size))
py = int(offy + (self.lastPos.y() * self.h) / (self.height()* size))
py = self.h - py
px = min(max(px,0), self.w - 1)
py = min(max(py,0), self.h - 1)
val = [None, None, None, None]
for i in xrange(self.texturedata.shape[2]):
val[i] = self.texturedata[px][py][i]
texts = ["x:%i y:%i" % (px,py),
"R:%f" % val[0] if val[0] else "n/a",
"G:%f" % val[1] if val[1] else "n/a",
"B:%f" % val[2] if val[2] else "n/a"]
font = QtGui.QFont()
font.setFamily("Monospace")
#font.setFixedPitch(True);
metrics = QtGui.QFontMetrics(font)
sx = 20 # spacing variable
w,h = metrics.width(texts[1]), metrics.height()
metrics.width(" ")
x,y = self.lastPos.x(), self.height() - self.lastPos.y() - sx
dx,dy = 1.0/self.width(), 1.0/self.height()
# Calculate pixel info position
# Swap position if outside screen
if x + 1.5*sx + w < self.width():
x0 = x + 0.75*sx
x1 = x + 1.5*sx + w + 10
tx = x + sx
else:
x0 = x - 0.75*sx
x1 = x - 1.5*sx - w
tx = x - sx - w
if y + sx - 5 * h > 0:
y0 = y + sx
y1 = y + sx - 5 * h
ty = self.height()-y
else:
y0 = y - sx + 3 * h
y1 = y - sx + 8 * h
ty = self.height()-y - 5 * h - 0.5*sx
# Draw transparent quad
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA);
GL.glEnable(GL.GL_BLEND)
GL.glBegin(GL.GL_QUADS)
GL.glColor4f(0,0,0,0.8)
for x,y in zip([x0,x1,x1,x0],[y0,y0,y1,y1]):
GL.glVertex2f(x * dx, y * dy)
GL.glEnd()
GL.glDisable(GL.GL_BLEND)
# Render text
GL.glColor4f(1,1,1,1)
for i,text in enumerate(texts):
self.renderText(tx, ty + i*h, text, font)
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import functools
import os
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.script import Script
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
import status_params
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
hostname = config['hostname']
metron_home = status_params.metron_home
parsers = status_params.parsers
parser_error_topic = config['configurations']['metron-parsers-env']['parser_error_topic']
geoip_hdfs_dir = "/apps/metron/geo/default/"
metron_user = status_params.metron_user
metron_group = config['configurations']['metron-env']['metron_group']
metron_log_dir = config['configurations']['metron-env']['metron_log_dir']
metron_pid_dir = config['configurations']['metron-env']['metron_pid_dir']
metron_rest_port = status_params.metron_rest_port
metron_management_ui_host = status_params.metron_management_ui_host
metron_management_ui_port = status_params.metron_management_ui_port
metron_alerts_ui_host = status_params.metron_alerts_ui_host
metron_alerts_ui_port = status_params.metron_alerts_ui_port
metron_jvm_flags = config['configurations']['metron-rest-env']['metron_jvm_flags']
metron_spring_profiles_active = config['configurations']['metron-rest-env']['metron_spring_profiles_active']
metron_jdbc_driver = config['configurations']['metron-rest-env']['metron_jdbc_driver']
metron_jdbc_url = config['configurations']['metron-rest-env']['metron_jdbc_url']
metron_jdbc_username = config['configurations']['metron-rest-env']['metron_jdbc_username']
metron_jdbc_password = config['configurations']['metron-rest-env']['metron_jdbc_password']
metron_jdbc_platform = config['configurations']['metron-rest-env']['metron_jdbc_platform']
metron_jdbc_client_path = config['configurations']['metron-rest-env']['metron_jdbc_client_path']
metron_spring_options = config['configurations']['metron-rest-env']['metron_spring_options']
metron_escalation_topic = config['configurations']['metron-rest-env']['metron_escalation_topic']
metron_config_path = metron_home + '/config'
metron_zookeeper_config_dir = status_params.metron_zookeeper_config_dir
metron_zookeeper_config_path = status_params.metron_zookeeper_config_path
# indicates if zk_load_configs.sh --mode PUSH has been executed
zk_configured_flag_file = status_params.zk_configured_flag_file
parsers_configured_flag_file = status_params.parsers_configured_flag_file
parsers_acl_configured_flag_file = status_params.parsers_acl_configured_flag_file
enrichment_kafka_configured_flag_file = status_params.enrichment_kafka_configured_flag_file
enrichment_kafka_acl_configured_flag_file = status_params.enrichment_kafka_acl_configured_flag_file
enrichment_hbase_configured_flag_file = status_params.enrichment_hbase_configured_flag_file
enrichment_hbase_acl_configured_flag_file = status_params.enrichment_hbase_acl_configured_flag_file
enrichment_geo_configured_flag_file = status_params.enrichment_geo_configured_flag_file
indexing_configured_flag_file = status_params.indexing_configured_flag_file
indexing_acl_configured_flag_file = status_params.indexing_acl_configured_flag_file
indexing_hbase_configured_flag_file = status_params.indexing_hbase_configured_flag_file
indexing_hbase_acl_configured_flag_file = status_params.indexing_hbase_acl_configured_flag_file
indexing_hdfs_perm_configured_flag_file = status_params.indexing_hdfs_perm_configured_flag_file
elasticsearch_template_installed_flag_file = status_params.elasticsearch_template_installed_flag_file
rest_kafka_configured_flag_file = status_params.rest_kafka_configured_flag_file
rest_kafka_acl_configured_flag_file = status_params.rest_kafka_acl_configured_flag_file
rest_hbase_configured_flag_file = status_params.rest_hbase_configured_flag_file
rest_hbase_acl_configured_flag_file = status_params.rest_hbase_acl_configured_flag_file
global_properties_template = config['configurations']['metron-env']['elasticsearch-properties']
# Elasticsearch hosts and port management
es_cluster_name = config['configurations']['metron-env']['es_cluster_name']
es_hosts = config['configurations']['metron-env']['es_hosts']
es_host_list = es_hosts.split(",")
es_binary_port = config['configurations']['metron-env']['es_binary_port']
es_url = ",".join([host + ":" + es_binary_port for host in es_host_list])
es_http_port = config['configurations']['metron-env']['es_http_port']
es_http_url = es_host_list[0] + ":" + es_http_port
es_date_format = config['configurations']['metron-env']['es_date_format']
# hadoop params
stack_root = Script.get_stack_root()
# This is the cluster group named 'hadoop'. Its membership is the stack process user ids not individual users.
# The config name 'user_group' is out of our control and a bit misleading, so it is renamed to 'hadoop_group'.
hadoop_group = config['configurations']['cluster-env']['user_group']
hadoop_home_dir = stack_select.get_hadoop_dir("home")
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
kafka_home = os.path.join(stack_root, "current", "kafka-broker")
kafka_bin_dir = os.path.join(kafka_home, "bin")
# zookeeper
zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
has_zk_host = not len(zk_hosts) == 0
zookeeper_quorum = None
if has_zk_host:
if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
else:
zookeeper_clientPort = '2181'
zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
# last port config
zookeeper_quorum += ':' + zookeeper_clientPort
# Storm
storm_rest_addr = status_params.storm_rest_addr
# Zeppelin
zeppelin_server_url = status_params.zeppelin_server_url
# Kafka
kafka_hosts = default("/clusterHostInfo/kafka_broker_hosts", [])
has_kafka_host = not len(kafka_hosts) == 0
kafka_brokers = None
if has_kafka_host:
if 'port' in config['configurations']['kafka-broker']:
kafka_broker_port = config['configurations']['kafka-broker']['port']
else:
kafka_broker_port = '6667'
kafka_brokers = (':' + kafka_broker_port + ',').join(config['clusterHostInfo']['kafka_broker_hosts'])
kafka_brokers += ':' + kafka_broker_port
metron_apps_hdfs_dir = config['configurations']['metron-env']['metron_apps_hdfs_dir']
# the double "format" is not an error - we are pulling in a jinja-templated param. This is a bit of a hack, but works
# well enough until we find a better way via Ambari
metron_temp_grok_path = format(format(config['configurations']['metron-rest-env']['metron_temp_grok_path']))
metron_topic_retention = config['configurations']['metron-env']['metron_topic_retention']
local_grok_patterns_dir = format("{metron_home}/patterns")
hdfs_grok_patterns_dir = format("{metron_apps_hdfs_dir}/patterns")
# for create_hdfs_directory
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
# create partial functions with common arguments for every HdfsResource call
# to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled=security_enabled,
keytab=hdfs_user_keytab,
kinit_path_local=kinit_path_local,
hadoop_bin_dir=hadoop_bin_dir,
hadoop_conf_dir=hadoop_conf_dir,
principal_name=hdfs_principal_name,
hdfs_site=hdfs_site,
default_fs=default_fs,
immutable_paths=get_not_managed_resources(),
dfs_type=dfs_type
)
# Metron HBase configuration
enrichment_hbase_provider_impl = 'org.apache.metron.hbase.HTableProvider'
enrichment_hbase_table = status_params.enrichment_hbase_table
enrichment_hbase_cf = status_params.enrichment_hbase_cf
update_hbase_table = status_params.update_hbase_table
update_hbase_cf = status_params.update_hbase_cf
threatintel_hbase_table = status_params.threatintel_hbase_table
threatintel_hbase_cf = status_params.threatintel_hbase_cf
# Kafka Topics
ambari_kafka_service_check_topic = 'ambari_kafka_service_check'
consumer_offsets_topic = '__consumer_offsets'
# ES Templates
bro_index_path = tmp_dir + "/bro_index.template"
snort_index_path = tmp_dir + "/snort_index.template"
yaf_index_path = tmp_dir + "/yaf_index.template"
error_index_path = tmp_dir + "/error_index.template"
meta_index_path = tmp_dir + "/metaalert_index.template"
# Zeppelin Notebooks
metron_config_zeppelin_path = format("{metron_config_path}/zeppelin")
zeppelin_shiro_ini_content = status_params.zeppelin_shiro_ini_content
# kafka_security
kafka_security_protocol = config['configurations']['kafka-broker'].get('security.inter.broker.protocol', 'PLAINTEXT')
kafka_user = config['configurations']['kafka-env']['kafka_user']
storm_user = config['configurations']['storm-env']['storm_user']
# HBase user table creation and ACLs
hbase_user = config['configurations']['hbase-env']['hbase_user']
# Security
security_enabled = status_params.security_enabled
client_jaas_path = metron_home + '/client_jaas.conf'
client_jaas_arg = '-Djava.security.auth.login.config=' + metron_home + '/client_jaas.conf'
enrichment_topology_worker_childopts = client_jaas_arg if security_enabled else ''
profiler_topology_worker_childopts = client_jaas_arg if security_enabled else ''
indexing_topology_worker_childopts = client_jaas_arg if security_enabled else ''
metron_jvm_flags += (' ' + client_jaas_arg) if security_enabled else ''
topology_auto_credentials = config['configurations']['storm-site'].get('nimbus.credential.renewers.classes', [])
# Needed for storm.config, because it needs Java String
topology_auto_credentials_double_quotes = str(topology_auto_credentials).replace("'", '"')
if security_enabled:
hostname_lowercase = config['hostname'].lower()
metron_principal_name = status_params.metron_principal_name
metron_keytab_path = status_params.metron_keytab_path
kinit_path_local = status_params.kinit_path_local
hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
hbase_keytab_path = config['configurations']['hbase-env']['hbase_user_keytab']
kafka_principal_raw = config['configurations']['kafka-env']['kafka_principal_name']
kafka_principal_name = kafka_principal_raw.replace('_HOST', hostname_lowercase)
kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
nimbus_seeds = config['configurations']['storm-site']['nimbus.seeds']
# Management UI
metron_rest_host = default("/clusterHostInfo/metron_rest_hosts", [hostname])[0]
# REST
metron_rest_pid_dir = config['configurations']['metron-rest-env']['metron_rest_pid_dir']
metron_rest_pid = 'metron-rest.pid'
metron_indexing_classpath = config['configurations']['metron-rest-env']['metron_indexing_classpath']
metron_rest_classpath = config['configurations']['metron-rest-env']['metron_rest_classpath']
metron_sysconfig = config['configurations']['metron-rest-env']['metron_sysconfig']
user_settings_hbase_table = status_params.user_settings_hbase_table
user_settings_hbase_cf = status_params.user_settings_hbase_cf
# Enrichment
metron_enrichment_topology = status_params.metron_enrichment_topology
geoip_url = config['configurations']['metron-enrichment-env']['geoip_url']
enrichment_host_known_hosts = config['configurations']['metron-enrichment-env']['enrichment_host_known_hosts']
# Enrichment - Kafka
enrichment_kafka_start = config['configurations']['metron-enrichment-env']['enrichment_kafka_start']
enrichment_input_topic = status_params.enrichment_input_topic
enrichment_output_topic = config['configurations']['metron-enrichment-env']['enrichment_output_topic']
enrichment_error_topic = config['configurations']['metron-enrichment-env']['enrichment_error_topic']
threatintel_error_topic = config['configurations']['metron-enrichment-env']['threatintel_error_topic']
# Enrichment - Storm common parameters
enrichment_workers = config['configurations']['metron-enrichment-env']['enrichment_workers']
enrichment_acker_executors = config['configurations']['metron-enrichment-env']['enrichment_acker_executors']
if not len(enrichment_topology_worker_childopts) == 0:
enrichment_topology_worker_childopts += ' '
enrichment_topology_worker_childopts += config['configurations']['metron-enrichment-env']['enrichment_topology_worker_childopts']
enrichment_topology_max_spout_pending = config['configurations']['metron-enrichment-env']['enrichment_topology_max_spout_pending']
enrichment_topology = config['configurations']['metron-enrichment-env']['enrichment_topology']
# Enrichment - Split Join topology
enrichment_join_cache_size = config['configurations']['metron-enrichment-env']['enrichment_join_cache_size']
threatintel_join_cache_size = config['configurations']['metron-enrichment-env']['threatintel_join_cache_size']
enrichment_kafka_spout_parallelism = config['configurations']['metron-enrichment-env']['enrichment_kafka_spout_parallelism']
enrichment_split_parallelism = config['configurations']['metron-enrichment-env']['enrichment_split_parallelism']
enrichment_stellar_parallelism = config['configurations']['metron-enrichment-env']['enrichment_stellar_parallelism']
enrichment_join_parallelism = config['configurations']['metron-enrichment-env']['enrichment_join_parallelism']
threat_intel_split_parallelism = config['configurations']['metron-enrichment-env']['threat_intel_split_parallelism']
threat_intel_stellar_parallelism = config['configurations']['metron-enrichment-env']['threat_intel_stellar_parallelism']
threat_intel_join_parallelism = config['configurations']['metron-enrichment-env']['threat_intel_join_parallelism']
kafka_writer_parallelism = config['configurations']['metron-enrichment-env']['kafka_writer_parallelism']
# Enrichment - Unified topology
unified_kafka_spout_parallelism = config['configurations']['metron-enrichment-env']['unified_kafka_spout_parallelism']
unified_enrichment_parallelism = config['configurations']['metron-enrichment-env']['unified_enrichment_parallelism']
unified_threat_intel_parallelism = config['configurations']['metron-enrichment-env']['unified_threat_intel_parallelism']
unified_kafka_writer_parallelism = config['configurations']['metron-enrichment-env']['unified_kafka_writer_parallelism']
unified_enrichment_cache_size = config['configurations']['metron-enrichment-env']['unified_enrichment_cache_size']
unified_threat_intel_cache_size = config['configurations']['metron-enrichment-env']['unified_threat_intel_cache_size']
unified_enrichment_threadpool_size = config['configurations']['metron-enrichment-env']['unified_enrichment_threadpool_size']
unified_enrichment_threadpool_type = config['configurations']['metron-enrichment-env']['unified_enrichment_threadpool_type']
# Profiler
metron_profiler_topology = 'profiler'
profiler_input_topic = config['configurations']['metron-enrichment-env']['enrichment_output_topic']
profiler_kafka_start = config['configurations']['metron-profiler-env']['profiler_kafka_start']
profiler_period_duration = config['configurations']['metron-profiler-env']['profiler_period_duration']
profiler_period_units = config['configurations']['metron-profiler-env']['profiler_period_units']
profiler_window_duration = config['configurations']['metron-profiler-env']['profiler_window_duration']
profiler_window_units = config['configurations']['metron-profiler-env']['profiler_window_units']
profiler_ttl = config['configurations']['metron-profiler-env']['profiler_ttl']
profiler_ttl_units = config['configurations']['metron-profiler-env']['profiler_ttl_units']
profiler_hbase_batch = config['configurations']['metron-profiler-env']['profiler_hbase_batch']
profiler_hbase_flush_interval = config['configurations']['metron-profiler-env']['profiler_hbase_flush_interval']
profiler_topology_workers = config['configurations']['metron-profiler-env']['profiler_topology_workers']
profiler_acker_executors = config['configurations']['metron-profiler-env']['profiler_acker_executors']
profiler_hbase_table = config['configurations']['metron-profiler-env']['profiler_hbase_table']
profiler_hbase_cf = config['configurations']['metron-profiler-env']['profiler_hbase_cf']
profiler_configured_flag_file = status_params.profiler_configured_flag_file
profiler_acl_configured_flag_file = status_params.profiler_acl_configured_flag_file
profiler_hbase_configured_flag_file = status_params.profiler_hbase_configured_flag_file
profiler_hbase_acl_configured_flag_file = status_params.profiler_hbase_acl_configured_flag_file
if not len(profiler_topology_worker_childopts) == 0:
profiler_topology_worker_childopts += ' '
profiler_topology_worker_childopts += config['configurations']['metron-profiler-env']['profiler_topology_worker_childopts']
profiler_max_routes_per_bolt=config['configurations']['metron-profiler-env']['profiler_max_routes_per_bolt']
profiler_window_lag=config['configurations']['metron-profiler-env']['profiler_window_lag']
profiler_window_lag_units=config['configurations']['metron-profiler-env']['profiler_window_lag_units']
profiler_topology_message_timeout_secs=config['configurations']['metron-profiler-env']['profiler_topology_message_timeout_secs']
profiler_topology_max_spout_pending=config['configurations']['metron-profiler-env']['profiler_topology_max_spout_pending']
# Indexing
ra_indexing_kafka_start = config['configurations']['metron-indexing-env']['ra_indexing_kafka_start']
batch_indexing_kafka_start = config['configurations']['metron-indexing-env']['batch_indexing_kafka_start']
indexing_input_topic = status_params.indexing_input_topic
indexing_error_topic = config['configurations']['metron-indexing-env']['indexing_error_topic']
metron_random_access_indexing_topology = status_params.metron_random_access_indexing_topology
metron_batch_indexing_topology = status_params.metron_batch_indexing_topology
ra_indexing_writer_class_name = config['configurations']['metron-indexing-env']['ra_indexing_writer_class_name']
batch_indexing_writer_class_name = config['configurations']['metron-indexing-env']['batch_indexing_writer_class_name']
ra_indexing_workers = config['configurations']['metron-indexing-env']['ra_indexing_workers']
batch_indexing_workers = config['configurations']['metron-indexing-env']['batch_indexing_workers']
ra_indexing_acker_executors = config['configurations']['metron-indexing-env']['ra_indexing_acker_executors']
batch_indexing_acker_executors = config['configurations']['metron-indexing-env']['batch_indexing_acker_executors']
if not len(indexing_topology_worker_childopts) == 0:
indexing_topology_worker_childopts += ' '
indexing_topology_worker_childopts += config['configurations']['metron-indexing-env']['indexing_topology_worker_childopts']
ra_indexing_topology_max_spout_pending = config['configurations']['metron-indexing-env']['ra_indexing_topology_max_spout_pending']
batch_indexing_topology_max_spout_pending = config['configurations']['metron-indexing-env']['batch_indexing_topology_max_spout_pending']
ra_indexing_kafka_spout_parallelism = config['configurations']['metron-indexing-env']['ra_indexing_kafka_spout_parallelism']
batch_indexing_kafka_spout_parallelism = config['configurations']['metron-indexing-env']['batch_indexing_kafka_spout_parallelism']
ra_indexing_writer_parallelism = config['configurations']['metron-indexing-env']['ra_indexing_writer_parallelism']
hdfs_writer_parallelism = config['configurations']['metron-indexing-env']['hdfs_writer_parallelism']
# the double "format" is not an error - we are pulling in a jinja-templated param. This is a bit of a hack, but works
# well enough until we find a better way via Ambari
metron_apps_indexed_hdfs_dir = format(format(config['configurations']['metron-indexing-env']['metron_apps_indexed_hdfs_dir']))
bolt_hdfs_rotation_policy = config['configurations']['metron-indexing-env']['bolt_hdfs_rotation_policy']
bolt_hdfs_rotation_policy_units = config['configurations']['metron-indexing-env']['bolt_hdfs_rotation_policy_units']
bolt_hdfs_rotation_policy_count = config['configurations']['metron-indexing-env']['bolt_hdfs_rotation_policy_count']
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF Utilities.
@since: 0.1.0
"""
import struct
import calendar
import datetime
import types
import inspect
import pyamf
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
set
except NameError:
from sets import Set as set
#: XML types.
xml_types = None
ET = None
#: On some Python versions retrieving a negative timestamp, like
#: C{datetime.datetime.utcfromtimestamp(-31536000.0)} is broken.
negative_timestamp_broken = False
int_types = [int]
str_types = [str]
# py3k support
try:
int_types.append(long)
except NameError:
pass
try:
str_types.append(unicode)
except NameError:
pass
#: Numeric types.
int_types = tuple(int_types)
#: String types.
str_types = tuple(str_types)
PosInf = 1e300000
NegInf = -1e300000
# we do this instead of float('nan') because windows throws a wobbler.
NaN = PosInf / PosInf
class StringIOProxy(object):
"""
I am a C{StringIO} type object containing byte data from the AMF stream.
@see: U{ByteArray on OSFlash (external)
<http://osflash.org/documentation/amf3#x0c_-_bytearray>}
@see: U{Parsing ByteArrays on OSFlash (external)
<http://osflash.org/documentation/amf3/parsing_byte_arrays>}
"""
_wrapped_class = StringIO
def __init__(self, buf=None):
"""
@raise TypeError: Unable to coerce C{buf} to C{StringIO}.
"""
self._buffer = StringIOProxy._wrapped_class()
if isinstance(buf, (str, unicode)):
self._buffer.write(buf)
elif hasattr(buf, 'getvalue'):
self._buffer.write(buf.getvalue())
elif hasattr(buf, 'read') and hasattr(buf, 'seek') and hasattr(buf, 'tell'):
old_pos = buf.tell()
buf.seek(0)
self._buffer.write(buf.read())
buf.seek(old_pos)
elif buf is None:
pass
else:
raise TypeError("Unable to coerce buf->StringIO")
self._get_len()
self._len_changed = False
self._buffer.seek(0, 0)
def getvalue(self):
"""
Get raw data from buffer.
"""
return self._buffer.getvalue()
def read(self, n=-1):
"""
Reads C{n} bytes from the stream.
"""
bytes = self._buffer.read(n)
return bytes
def seek(self, pos, mode=0):
"""
Sets the file-pointer offset, measured from the beginning of this stream,
at which the next write operation will occur.
@param pos:
@type pos: C{int}
@param mode:
@type mode: C{int}
"""
return self._buffer.seek(pos, mode)
def tell(self):
"""
Returns the position of the stream pointer.
"""
return self._buffer.tell()
def truncate(self, size=0):
"""
Truncates the stream to the specified length.
@param size: The length of the stream, in bytes.
@type size: C{int}
"""
if size == 0:
self._buffer = StringIOProxy._wrapped_class()
self._len_changed = True
return
cur_pos = self.tell()
self.seek(0)
buf = self.read(size)
self._buffer = StringIOProxy._wrapped_class()
self._buffer.write(buf)
self.seek(cur_pos)
self._len_changed = True
def write(self, s):
"""
Writes the content of the specified C{s} into this buffer.
@param s:
@type s:
"""
self._buffer.write(s)
self._len_changed = True
def _get_len(self):
"""
Return total number of bytes in buffer.
"""
if hasattr(self._buffer, 'len'):
self._len = self._buffer.len
return
old_pos = self._buffer.tell()
self._buffer.seek(0, 2)
self._len = self._buffer.tell()
self._buffer.seek(old_pos)
def __len__(self):
if not self._len_changed:
return self._len
self._get_len()
self._len_changed = False
return self._len
def consume(self):
"""
Chops the tail off the stream starting at 0 and ending at C{tell()}.
The stream pointer is set to 0 at the end of this function.
@since: 0.4
"""
try:
bytes = self.read()
except IOError:
bytes = ''
self.truncate()
if len(bytes) > 0:
self.write(bytes)
self.seek(0)
class DataTypeMixIn(object):
"""
Provides methods for reading and writing basic data types for file-like
objects.
@ivar endian: Byte ordering used to represent the data. Default byte order
is L{ENDIAN_NETWORK}.
@type endian: C{str}
"""
#: Network byte order
ENDIAN_NETWORK = "!"
#: Native byte order
ENDIAN_NATIVE = "@"
#: Little endian
ENDIAN_LITTLE = "<"
#: Big endian
ENDIAN_BIG = ">"
endian = ENDIAN_NETWORK
def _read(self, length):
"""
Reads C{length} bytes from the stream. If an attempt to read past the
end of the buffer is made, L{IOError} is raised.
"""
bytes = self.read(length)
if len(bytes) != length:
self.seek(0 - len(bytes), 1)
raise IOError("Tried to read %d byte(s) from the stream" % length)
return bytes
def _is_big_endian(self):
"""
Whether this system is big endian or not.
@rtype: C{bool}
"""
if self.endian == DataTypeMixIn.ENDIAN_NATIVE:
return DataTypeMixIn._system_endian == DataTypeMixIn.ENDIAN_BIG
return self.endian in (DataTypeMixIn.ENDIAN_BIG, DataTypeMixIn.ENDIAN_NETWORK)
def read_uchar(self):
"""
Reads an C{unsigned char} from the stream.
"""
return struct.unpack("B", self._read(1))[0]
def write_uchar(self, c):
"""
Writes an C{unsigned char} to the stream.
@param c: Unsigned char
@type c: C{int}
@raise TypeError: Unexpected type for int C{c}.
@raise OverflowError: Not in range.
"""
if type(c) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(c),))
if not 0 <= c <= 255:
raise OverflowError("Not in range, %d" % c)
self.write(struct.pack("B", c))
def read_char(self):
"""
Reads a C{char} from the stream.
"""
return struct.unpack("b", self._read(1))[0]
def write_char(self, c):
"""
Write a C{char} to the stream.
@param c: char
@type c: C{int}
@raise TypeError: Unexpected type for int C{c}.
@raise OverflowError: Not in range.
"""
if type(c) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(c),))
if not -128 <= c <= 127:
raise OverflowError("Not in range, %d" % c)
self.write(struct.pack("b", c))
def read_ushort(self):
"""
Reads a 2 byte unsigned integer from the stream.
"""
return struct.unpack("%sH" % self.endian, self._read(2))[0]
def write_ushort(self, s):
"""
Writes a 2 byte unsigned integer to the stream.
@param s: 2 byte unsigned integer
@type s: C{int}
@raise TypeError: Unexpected type for int C{s}.
@raise OverflowError: Not in range.
"""
if type(s) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(s),))
if not 0 <= s <= 65535:
raise OverflowError("Not in range, %d" % s)
self.write(struct.pack("%sH" % self.endian, s))
def read_short(self):
"""
Reads a 2 byte integer from the stream.
"""
return struct.unpack("%sh" % self.endian, self._read(2))[0]
def write_short(self, s):
"""
Writes a 2 byte integer to the stream.
@param s: 2 byte integer
@type s: C{int}
@raise TypeError: Unexpected type for int C{s}.
@raise OverflowError: Not in range.
"""
if type(s) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(s),))
if not -32768 <= s <= 32767:
raise OverflowError("Not in range, %d" % s)
self.write(struct.pack("%sh" % self.endian, s))
def read_ulong(self):
"""
Reads a 4 byte unsigned integer from the stream.
"""
return struct.unpack("%sL" % self.endian, self._read(4))[0]
def write_ulong(self, l):
"""
Writes a 4 byte unsigned integer to the stream.
@param l: 4 byte unsigned integer
@type l: C{int}
@raise TypeError: Unexpected type for int C{l}.
@raise OverflowError: Not in range.
"""
if type(l) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(l),))
if not 0 <= l <= 4294967295:
raise OverflowError("Not in range, %d" % l)
self.write(struct.pack("%sL" % self.endian, l))
def read_long(self):
"""
Reads a 4 byte integer from the stream.
"""
return struct.unpack("%sl" % self.endian, self._read(4))[0]
def write_long(self, l):
"""
Writes a 4 byte integer to the stream.
@param l: 4 byte integer
@type l: C{int}
@raise TypeError: Unexpected type for int C{l}.
@raise OverflowError: Not in range.
"""
if type(l) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(l),))
if not -2147483648 <= l <= 2147483647:
raise OverflowError("Not in range, %d" % l)
self.write(struct.pack("%sl" % self.endian, l))
def read_24bit_uint(self):
"""
Reads a 24 bit unsigned integer from the stream.
@since: 0.4
"""
order = None
if not self._is_big_endian():
order = [0, 8, 16]
else:
order = [16, 8, 0]
n = 0
for x in order:
n += (self.read_uchar() << x)
return n
def write_24bit_uint(self, n):
"""
Writes a 24 bit unsigned integer to the stream.
@since: 0.4
@param n: 24 bit unsigned integer
@type n: C{int}
@raise TypeError: Unexpected type for int C{n}.
@raise OverflowError: Not in range.
"""
if type(n) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(n),))
if not 0 <= n <= 0xffffff:
raise OverflowError("n is out of range")
order = None
if not self._is_big_endian():
order = [0, 8, 16]
else:
order = [16, 8, 0]
for x in order:
self.write_uchar((n >> x) & 0xff)
def read_24bit_int(self):
"""
Reads a 24 bit integer from the stream.
@since: 0.4
"""
n = self.read_24bit_uint()
if n & 0x800000 != 0:
# the int is signed
n -= 0x1000000
return n
def write_24bit_int(self, n):
"""
Writes a 24 bit integer to the stream.
@since: 0.4
@param n: 24 bit integer
@type n: C{int}
@raise TypeError: Unexpected type for int C{n}.
@raise OverflowError: Not in range.
"""
if type(n) not in int_types:
raise TypeError('expected an int (got:%r)' % (type(n),))
if not -8388608 <= n <= 8388607:
raise OverflowError("n is out of range")
order = None
if not self._is_big_endian():
order = [0, 8, 16]
else:
order = [16, 8, 0]
if n < 0:
n += 0x1000000
for x in order:
self.write_uchar((n >> x) & 0xff)
def read_double(self):
"""
Reads an 8 byte float from the stream.
"""
return struct.unpack("%sd" % self.endian, self._read(8))[0]
def write_double(self, d):
"""
Writes an 8 byte float to the stream.
@param d: 8 byte float
@type d: C{float}
@raise TypeError: Unexpected type for float C{d}.
"""
if not type(d) is float:
raise TypeError('expected a float (got:%r)' % (type(d),))
self.write(struct.pack("%sd" % self.endian, d))
def read_float(self):
"""
Reads a 4 byte float from the stream.
"""
return struct.unpack("%sf" % self.endian, self._read(4))[0]
def write_float(self, f):
"""
Writes a 4 byte float to the stream.
@param f: 4 byte float
@type f: C{float}
@raise TypeError: Unexpected type for float C{f}.
"""
if type(f) is not float:
raise TypeError('expected a float (got:%r)' % (type(f),))
self.write(struct.pack("%sf" % self.endian, f))
def read_utf8_string(self, length):
"""
Reads a UTF-8 string from the stream.
@rtype: C{unicode}
"""
str = struct.unpack("%s%ds" % (self.endian, length), self.read(length))[0]
return unicode(str, "utf8")
def write_utf8_string(self, u):
"""
Writes a unicode object to the stream in UTF-8.
@param u: unicode object
@raise TypeError: Unexpected type for str C{u}.
"""
if type(u) not in str_types:
raise TypeError('expected a str (got:%r)' % (type(u),))
bytes = u.encode("utf8")
self.write(struct.pack("%s%ds" % (self.endian, len(bytes)), bytes))
if struct.pack('@H', 1)[0] == '\x01':
DataTypeMixIn._system_endian = DataTypeMixIn.ENDIAN_LITTLE
else:
DataTypeMixIn._system_endian = DataTypeMixIn.ENDIAN_BIG
class BufferedByteStream(StringIOProxy, DataTypeMixIn):
"""
An extension of C{StringIO}.
Features:
- Raises L{IOError} if reading past end.
- Allows you to C{peek()} at the next byte.
@see: L{cBufferedByteStream<cpyamf.util.cBufferedByteStream>}
"""
def __init__(self, buf=None):
"""
@param buf: Initial byte stream.
@type buf: C{str} or C{StringIO} instance
"""
StringIOProxy.__init__(self, buf=buf)
self.seek(0)
def read(self, length=-1):
"""
Reads up to the specified number of bytes from the stream into
the specified byte array of specified length.
@raise IOError: Attempted to read past the end of the buffer.
"""
if length == -1 and self.at_eof():
raise IOError('Attempted to read from the buffer but already at '
'the end')
elif length > 0 and self.tell() + length > len(self):
raise IOError('Attempted to read %d bytes from the buffer but '
'only %d remain' % (length, len(self) - self.tell()))
return StringIOProxy.read(self, length)
def peek(self, size=1):
"""
Looks C{size} bytes ahead in the stream, returning what it finds,
returning the stream pointer to its initial position.
@param size: Default is 1.
@type size: C{int}
@raise ValueError: Trying to peek backwards.
@rtype:
@return: Bytes.
"""
if size == -1:
return self.peek(len(self) - self.tell())
if size < -1:
raise ValueError("Cannot peek backwards")
bytes = ''
pos = self.tell()
while not self.at_eof() and len(bytes) != size:
bytes += self.read(1)
self.seek(pos)
return bytes
def remaining(self):
"""
Returns number of remaining bytes.
@rtype: C{number}
@return: Number of remaining bytes.
"""
return len(self) - self.tell()
def at_eof(self):
"""
Returns C{True} if the internal pointer is at the end of the stream.
@rtype: C{bool}
"""
return self.tell() == len(self)
def append(self, data):
"""
Append data to the end of the stream. The pointer will not move if
this operation is successful.
@param data: The data to append to the stream.
@type data: C{str} or C{unicode}
@raise TypeError: data is not C{str} or C{unicode}
"""
t = self.tell()
# seek to the end of the stream
self.seek(0, 2)
if hasattr(data, 'getvalue'):
self.write_utf8_string(data.getvalue())
else:
self.write_utf8_string(data)
self.seek(t)
def __add__(self, other):
old_pos = self.tell()
old_other_pos = other.tell()
new = BufferedByteStream(self)
other.seek(0)
new.seek(0, 2)
new.write(other.read())
self.seek(old_pos)
other.seek(old_other_pos)
new.seek(0)
return new
class IndexedCollection(object):
"""
A class that provides a quick and clean way to store references and
referenced objects.
@note: All attributes on the instance are private.
"""
def __init__(self, use_hash=False):
if use_hash is True:
self.func = hash
else:
self.func = id
self.clear()
def clear(self):
"""
Clears the index.
"""
self.list = []
self.dict = {}
def getByReference(self, ref):
"""
Returns an object based on the reference.
If the reference is not found, C{None} will be returned.
@raise TypeError: Bad reference type.
"""
if not isinstance(ref, int_types):
raise TypeError("Bad reference type, got %s" % (type(ref),))
try:
return self.list[ref]
except IndexError:
return None
def getReferenceTo(self, obj):
"""
Returns a reference to C{obj} if it is contained within this index.
If the object is not contained within the collection, C{None} will be
returned.
@param obj: The object to find the reference to
@return: An C{int} representing the reference or C{None} is the object
is not contained within the collection.
"""
return self.dict.get(self.func(obj), None)
def append(self, obj):
"""
Appends C{obj} to this index.
@note: Uniqueness is not checked
@return: The reference to C{obj} in this index.
"""
h = self.func(obj)
self.list.append(obj)
idx = len(self.list) - 1
self.dict[h] = idx
return idx
def __eq__(self, other):
if isinstance(other, list):
return self.list == other
elif isinstance(other, dict):
return self.dict == other
return False
def __len__(self):
return len(self.list)
def __getitem__(self, idx):
return self.getByReference(idx)
def __contains__(self, obj):
r = self.getReferenceTo(obj)
return r is not None
def __repr__(self):
return '<%s list=%r dict=%r>' % (self.__class__.__name__, self.list, self.dict)
def __iter__(self):
return iter(self.list)
def find_xml_lib():
"""
Run through a predefined order looking through the various C{ElementTree}
implementations so that any type can be encoded but PyAMF will return
elements as the first implementation found.
We work through the C implementations first - then the pure Python
versions. The downside to this is that a possible of three libraries will
be loaded into memory that are not used but the libs are small
(relatively) and the flexibility that this gives seems to outweigh the
cost. Time will tell.
@since: 0.4
"""
global xml_types, ET
xml_types = []
try:
import xml.etree.cElementTree as cET
ET = cET
xml_types.append(type(cET.Element('foo')))
except ImportError:
pass
try:
import cElementTree as cET
if ET is None:
ET = cET
xml_types.append(type(cET.Element('foo')))
except ImportError:
pass
try:
import xml.etree.ElementTree as pET
if ET is None:
ET = pET
xml_types.append(pET._ElementInterface)
except ImportError:
pass
try:
import elementtree.ElementTree as pET
if ET is None:
ET = pET
xml_types.append(pET._ElementInterface)
except ImportError:
pass
for x in xml_types[:]:
# hack for jython
if x.__name__ == 'instance':
xml_types.remove(x)
xml_types = tuple(xml_types)
return xml_types
def hexdump(data):
"""
Get hexadecimal representation of C{StringIO} data.
@type data:
@param data:
@rtype: C{str}
@return: Hexadecimal string.
"""
import string
hex = ascii = buf = ""
index = 0
for c in data:
hex += "%02x " % ord(c)
if c in string.printable and c not in string.whitespace:
ascii += c
else:
ascii += "."
if len(ascii) == 16:
buf += "%04x: %s %s %s\n" % (index, hex[:24], hex[24:], ascii)
hex = ascii = ""
index += 16
if len(ascii):
buf += "%04x: %-24s %-24s %s\n" % (index, hex[:24], hex[24:], ascii)
return buf
def get_timestamp(d):
"""
Returns a UTC timestamp for a C{datetime.datetime} object.
@type d: C{datetime.datetime}
@param d: The date object.
@return: UTC timestamp.
@rtype: C{str}
@note: Inspiration taken from the U{Intertwingly blog
<http://intertwingly.net/blog/2007/09/02/Dealing-With-Dates>}.
"""
if isinstance(d, datetime.date) and not isinstance(d, datetime.datetime):
d = datetime.datetime.combine(d, datetime.time(0, 0, 0, 0))
msec = str(d.microsecond).rjust(6).replace(' ', '0')
return float('%s.%s' % (calendar.timegm(d.utctimetuple()), msec))
def get_datetime(secs):
"""
Return a UTC date from a timestamp.
@type secs: C{long}
@param secs: Seconds since 1970.
@return: UTC timestamp.
@rtype: C{datetime.datetime}
"""
if secs < 0 and negative_timestamp_broken:
return datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=secs)
return datetime.datetime.utcfromtimestamp(secs)
def get_properties(obj):
"""
@since: 0.5
"""
if hasattr(obj, 'keys'):
return set(obj.keys())
elif hasattr(obj, '__dict__'):
return set(obj.__dict__.keys())
return set()
def set_attrs(obj, attrs):
"""
A generic function which applies a collection of attributes C{attrs} to
object C{obj}.
@param obj: An instance implementing the C{__setattr__} function
@param attrs: A collection implementing the C{iteritems} function
@type attrs: Usually a dict
"""
if isinstance(obj, (list, dict)):
for k, v in attrs.iteritems():
obj[k] = v
return
for k, v in attrs.iteritems():
setattr(obj, k, v)
def get_class_alias(klass):
"""
Returns a alias class suitable for klass. Defaults to L{pyamf.ClassAlias}
"""
for k, v in pyamf.ALIAS_TYPES.iteritems():
for kl in v:
if isinstance(kl, types.FunctionType):
if kl(klass) is True:
return k
elif isinstance(kl, (type, (types.ClassType, types.ObjectType))):
if issubclass(klass, kl):
return k
return pyamf.ClassAlias
def is_class_sealed(klass):
"""
Returns a C{boolean} whether or not the supplied class can accept dynamic
properties.
@rtype: C{bool}
@since: 0.5
"""
mro = inspect.getmro(klass)
new = False
if mro[-1] is object:
mro = mro[:-1]
new = True
for kls in mro:
if new and '__dict__' in kls.__dict__:
return False
if not hasattr(kls, '__slots__'):
return False
return True
def get_class_meta(klass):
"""
Returns a C{dict} containing meta data based on the supplied class, useful
for class aliasing.
@rtype: C{dict}
@since: 0.5
"""
if not isinstance(klass, (type, types.ClassType)) or klass is object:
raise TypeError('klass must be a class object, got %r' % type(klass))
meta = {
'static_attrs': None,
'exclude_attrs': None,
'readonly_attrs': None,
'proxy_attrs': None,
'amf3': None,
'dynamic': None,
'alias': None,
'external': None
}
if not hasattr(klass, '__amf__'):
return meta
a = klass.__amf__
if type(a) is dict:
in_func = lambda x: x in a
get_func = a.__getitem__
else:
in_func = lambda x: hasattr(a, x)
get_func = lambda x: getattr(a, x)
for prop in ['alias', 'amf3', 'dynamic', 'external']:
if in_func(prop):
meta[prop] = get_func(prop)
for prop in ['static', 'exclude', 'readonly', 'proxy']:
if in_func(prop):
meta[prop + '_attrs'] = list(get_func(prop))
return meta
def is_ET_element(obj):
"""
Determines if the supplied C{obj} param is a valid ElementTree element.
"""
return isinstance(obj, xml_types)
def is_float_broken():
"""
Older versions of Python (<=2.5) and the Windows platform are renowned for
mixing up 'special' floats. This function determines whether this is the
case.
@since: 0.4
@rtype: C{bool}
"""
global NaN
return str(NaN) != str(struct.unpack("!d", '\xff\xf8\x00\x00\x00\x00\x00\x00')[0])
def isNaN(val):
"""
@since: 0.5
"""
return str(float(val)) == str(NaN)
def isPosInf(val):
"""
@since: 0.5
"""
return str(float(val)) == str(PosInf)
def isNegInf(val):
"""
@since: 0.5
"""
return str(float(val)) == str(NegInf)
# init the module from here ..
find_xml_lib()
try:
datetime.datetime.utcfromtimestamp(-31536000.0)
except ValueError:
negative_timestamp_broken = True
if is_float_broken():
def read_double_workaround(self):
global PosInf, NegInf, NaN
"""
Override the L{DataTypeMixIn.read_double} method to fix problems
with doubles by using the third-party C{fpconst} library.
"""
bytes = self.read(8)
if self._is_big_endian():
if bytes == '\xff\xf8\x00\x00\x00\x00\x00\x00':
return NaN
if bytes == '\xff\xf0\x00\x00\x00\x00\x00\x00':
return NegInf
if bytes == '\x7f\xf0\x00\x00\x00\x00\x00\x00':
return PosInf
else:
if bytes == '\x00\x00\x00\x00\x00\x00\xf8\xff':
return NaN
if bytes == '\x00\x00\x00\x00\x00\x00\xf0\xff':
return NegInf
if bytes == '\x00\x00\x00\x00\x00\x00\xf0\x7f':
return PosInf
return struct.unpack("%sd" % self.endian, bytes)[0]
DataTypeMixIn.read_double = read_double_workaround
def write_double_workaround(self, d):
"""
Override the L{DataTypeMixIn.write_double} method to fix problems
with doubles by using the third-party C{fpconst} library.
"""
if type(d) is not float:
raise TypeError('expected a float (got:%r)' % (type(d),))
if isNaN(d):
if self._is_big_endian():
self.write('\xff\xf8\x00\x00\x00\x00\x00\x00')
else:
self.write('\x00\x00\x00\x00\x00\x00\xf8\xff')
elif isNegInf(d):
if self._is_big_endian():
self.write('\xff\xf0\x00\x00\x00\x00\x00\x00')
else:
self.write('\x00\x00\x00\x00\x00\x00\xf0\xff')
elif isPosInf(d):
if self._is_big_endian():
self.write('\x7f\xf0\x00\x00\x00\x00\x00\x00')
else:
self.write('\x00\x00\x00\x00\x00\x00\xf0\x7f')
else:
write_double_workaround.old_func(self, d)
x = DataTypeMixIn.write_double
DataTypeMixIn.write_double = write_double_workaround
write_double_workaround.old_func = x
try:
from cpyamf.util import BufferedByteStream, IndexedCollection, IndexedMap
class StringIOProxy(BufferedByteStream):
_wrapped_class = None
def __init__(self, *args, **kwargs):
BufferedByteStream.__init__(self, *args, **kwargs)
self._buffer = self
class DataTypeMixIn(BufferedByteStream):
#: Network byte order
ENDIAN_NETWORK = "!"
#: Native byte order
ENDIAN_NATIVE = "@"
#: Little endian
ENDIAN_LITTLE = "<"
#: Big endian
ENDIAN_BIG = ">"
except ImportError:
pass
|
|
#!/usr/bin/env python
import os
from os.path import join as pjoin, dirname, basename
import cPickle as pickle
import argparse
import luigi
import numpy
import pandas
import ogr
from eotools.vector import retrieve_attribute_table
CONFIG = luigi.configuration.get_config()
def combine_all_cells():
# Config params
base_out_dir = CONFIG.get('work', 'output_directory')
base_out_fname = CONFIG.get('outputs', 'final_output_filename')
cells_list_fname = pjoin(base_out_dir,
CONFIG.get('outputs', 'cells_list'))
combined_cell_stats_fname = CONFIG.get('outputs',
'combined_cell_stats_filename')
vector_fname = CONFIG.get('work', 'vector_filename')
ngroups = int(CONFIG.get('internals', 'pandas_groups'))
chunksize = int(CONFIG.get('internals', 'pandas_chunksize'))
with open(cells_list_fname, 'r') as infile:
cells = pickle.load(infile)
headings = ['SID', 'Timestamp', 'Band', 'Observed_Count', 'Min', 'Max',
'Sum', 'Sum_of_Squares']
items = ['Timestamp', 'SID', 'Band']
tmp1_fname = pjoin(base_out_dir, 'tmp_combined_results.h5')
tmp1_store = pandas.HDFStore(tmp1_fname)
for cell in cells:
cell_dir = pjoin(base_out_dir, cell)
cell_stats_fname = pjoin(cell_dir, combined_cell_stats_fname)
# Open the current cells WC result file
try:
store = pandas.HDFStore(cell_stats_fname, 'r')
except IOError:
print "No stats result for cell: {}".format(cell)
continue
if '/data' in store.keys():
# We have data to retrieve
df = store['data']
df.drop('index', 1, inplace=True)
tmp1_store.append('data', df, index=False)
tmp1_store.flush()
store.close()
tmp1_store.close()
# Chunking method
# http://stackoverflow.com/questions/25459982/trouble-with-grouby-on-millions-of-keys-on-a-chunked-file-in-python-pandas/25471765#25471765
# http://stackoverflow.com/questions/15798209/pandas-group-by-query-on-large-data-in-hdfstore/15800314#15800314
# We need to group as many records we can into more memory manageable
# chunks, that also balance well for I/O
tmp1_store = pandas.HDFStore(tmp1_fname)
tmp2_fname = pjoin(base_out_dir, 'tmp_grouped_results.h5')
tmp2_store = pandas.HDFStore(tmp2_fname)
for chunk in tmp1_store.select('data', chunksize=chunksize):
g = chunk.groupby(chunk['SID'] % ngroups)
for grp, grouped in g:
tmp2_store.append('group_{}'.format(int(grp)), grouped,
data_columns=headings, index=False)
tmp2_store.flush()
tmp1_store.close()
tmp2_store.close()
# Define the output file
out_fname = pjoin(base_out_dir, base_out_fname)
combined_store = pandas.HDFStore(out_fname, 'w')
new_headings = ['FID', 'Timestamp', 'Band', 'Observed_Count', 'Min', 'Max',
'Sum', 'Sum_of_Squares', 'Mean', 'Variance', 'StdDev']
# Now read the grouped data and write to disk
tmp2_store = pandas.HDFStore(tmp2_fname)
for key in tmp2_store.keys():
grouped = tmp2_store[key].groupby(items, as_index=False)
df = grouped.agg({'Observed_Count': numpy.sum, 'Min': numpy.min,
'Max': numpy.max, 'Sum': numpy.sum,
'Sum_of_Squares': numpy.sum})
# Account for the offset between the feature and segment ID's
df['SID'] = df['SID'] - 1
# Change the segment id column name to feature id
df.rename(columns={'SID': 'FID'}, inplace=True)
# Calculate the mean, variance, stddev
df['Mean'] = df['Sum'] / df['Observed_Count']
df['Variance'] = ((df['Sum_of_Squares'] - (df['Observed_Count'] *
df['Mean']**2)) /
(df['Observed_Count'] - 1))
df['StdDev'] = numpy.sqrt(df['Variance'].values)
# Write the group to disk
combined_store.append('data', df, data_columns=new_headings)
combined_store.flush()
# Add metadata
metadata_group = 'Metadata'
metadata = {'Vector_Filename': basename(vector_fname)}
metadata = pandas.DataFrame(metadata, index=[0])
combined_store[metadata_group] = metadata
# Add the vector attribute table
attribute_table = retrieve_attribute_table(vector_fname)
combined_store['attribute_table'] = pandas.DataFrame(attribute_table)
# Save and close the file
combined_store.close()
tmp1_store.close()
tmp2_store.close()
# Clean up temporary files
os.remove(tmp1_fname)
os.remove(tmp2_fname)
def combine_all_cells_distribution():
"""
"""
# Config params
base_out_dir = CONFIG.get('work', 'output_directory')
base_out_fname = CONFIG.get('outputs', 'final_output_filename')
cells_list_fname = pjoin(base_out_dir,
CONFIG.get('outputs', 'cells_list'))
combined_cell_stats_fname = CONFIG.get('outputs',
'combined_cell_stats_filename')
vector_fname = CONFIG.get('work', 'vector_filename')
ngroups = int(CONFIG.get('internals', 'pandas_groups'))
chunksize = int(CONFIG.get('internals', 'pandas_chunksize'))
with open(cells_list_fname, 'r') as infile:
cells = pickle.load(infile)
# 1st stage, combine all the results from each cell into a single file
tmp1_fname = pjoin(base_out_dir, 'tmp_combined_results.h5')
tmp1_store = pandas.HDFStore(tmp1_fname, 'w')
for cell in cells:
cell_dir = pjoin(base_out_dir, cell)
cell_stats_fname = pjoin(cell_dir, combined_cell_stats_fname)
# Open the current cells WC result file
store = pandas.HDFStore(cell_stats_fname, 'r')
if '/data' in store.keys():
# We have data to retrieve
df = store['data']
headings = df.columns.values.tolist()
df.drop('index', 1, inplace=True)
tmp1_store.append('data', df, index=False)
tmp1_store.flush()
store.close()
tmp1_store.close()
# Chunking method
# http://stackoverflow.com/questions/25459982/trouble-with-grouby-on-millions-of-keys-on-a-chunked-file-in-python-pandas/25471765#25471765
# http://stackoverflow.com/questions/15798209/pandas-group-by-query-on-large-data-in-hdfstore/15800314#15800314
# We need to group as many records we can into more memory manageable
# chunks, that also balance well for I/O
tmp1_store = pandas.HDFStore(tmp1_fname)
tmp2_fname = pjoin(base_out_dir, 'tmp_grouped_results.h5')
tmp2_store = pandas.HDFStore(tmp2_fname, 'w')
for chunk in tmp1_store.select('data', chunksize=chunksize):
g = chunk.groupby(chunk['SID'] % ngroups)
for grp, grouped in g:
tmp2_store.append('group_{}'.format(int(grp)), grouped,
data_columns=headings, index=False)
tmp2_store.flush()
tmp1_store.close()
tmp2_store.close()
# TODO We need a generic way of allowing a user to insert custom
# classification headings as opposed to the numeric code
# without begin specific like we did for WOfS.
new_headings = ['FID' if x == 'SID' else x for x in headings]
# items = ['Timestamp', 'SID']
items = ['SID', 'Year', 'Month']
# Define the output file
out_fname = pjoin(base_out_dir, base_out_fname)
combined_store = pandas.HDFStore(out_fname, 'w')
# Now read the grouped data and write to disk
tmp2_store = pandas.HDFStore(tmp2_fname)
for key in tmp2_store.keys():
df = tmp2_store[key].groupby(items, as_index=False).sum()
# Account for the offset between the feature and segment ID's
df['SID'] = df['SID'] - 1
# Change the segment id column name to feature id
df.rename(columns={'SID': 'FID'}, inplace=True)
combined_store.append('data', df, data_columns=new_headings)
combined_store.flush()
# Save and close the file
combined_store.close()
tmp1_store.close()
tmp2_store.close()
# Clean up temporary files
os.remove(tmp1_fname)
os.remove(tmp2_fname)
if __name__ == '__main__':
desc = ("Combines the all the statistical outputs from each cell "
"and merges the results when required.")
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--cfg', required=True,
help="The config file used to drive the workflow.")
parser.add_argument('--distribution', action='store_true',
help=("If set, then stats will be combined on the"
"assumption that the results are for a"
"class distribution"))
parsed_args = parser.parse_args()
CONFIG.add_config_path(parsed_args.cfg)
if parsed_args.distribution:
combine_all_cells_distribution()
else:
combine_all_cells()
|
|
from __future__ import unicode_literals
from __future__ import print_function
import time
import keras.backend
import keras.callbacks
import keras.constraints
import keras.layers
import keras.layers.recurrent
import keras.models
import keras.optimizers
import keras.regularizers
import numpy
import pandas
import sklearn
import sklearn.utils
import theano
from . import general
_he_activations = {"relu"}
# Normally it's always better to set this true but it only works if you edit Theano I think
RNN_UNROLL = True
def set_theano_float_precision(precision):
assert precision in {"float32", "float64"}
theano.config.floatX = precision
class NnRegressor(sklearn.base.BaseEstimator):
"""Wrapper for Keras feed-forward neural network for regression to enable scikit-learn grid search"""
def __init__(self, hidden_layer_sizes=(100,), hidden_units=None, dropout=None, batch_size=-1, loss="mse",
num_epochs=500, activation="relu", input_noise=0., learning_rate=0.001, verbose=0, init=None, l2=None,
batch_norm=False, early_stopping=False, clip_gradient_norm=None, assert_finite=True,
maxnorm=False, val=0., history_file=None, optimizer="adam", input_dropout=None, lr_decay=None, non_negative=False, weight_samples=False):
self.clip_gradient_norm = clip_gradient_norm
self.assert_finite = assert_finite
self.hidden_units = hidden_units
self.hidden_layer_sizes = hidden_layer_sizes
self.dropout = dropout
self.batch_size = batch_size
self.num_epochs = num_epochs
self.activation = activation
self.input_noise = input_noise
self.input_dropout = input_dropout
self.learning_rate = learning_rate
self.verbose = verbose
self.loss = loss
self.l2 = l2
self.batch_norm = batch_norm
self.early_stopping = early_stopping
self.init = self._get_default_init(init, activation)
self.use_maxnorm = maxnorm
self.val = val
self.history_file = history_file
self.optimizer = optimizer
self.lr_decay = lr_decay
self.extra_callback = None
self.non_negative = non_negative
self.weight_samples = weight_samples
self.logger = general.get_class_logger(self)
self.model_ = None
self.history_df_ = None
def _get_optimizer(self):
if self.optimizer == "adam":
return keras.optimizers.Adam(**self._get_optimizer_kwargs())
elif self.optimizer == "rmsprop":
return keras.optimizers.RMSprop(**self._get_optimizer_kwargs())
elif self.optimizer == "sgd":
return keras.optimizers.SGD(**self._get_optimizer_kwargs())
elif self.optimizer == "adamax":
return keras.optimizers.Adamax(**self._get_optimizer_kwargs())
else:
raise ValueError("Unknown optimizer {}".format(self.optimizer))
def _get_activation(self):
if self.activation == "elu":
return keras.layers.advanced_activations.ELU()
elif self.activation:
return keras.layers.core.Activation(self.activation)
else:
raise ValueError("No activation unit specified")
def fit(self, X, y, **kwargs):
self.set_params(**kwargs)
if self.hidden_units:
self.hidden_layer_sizes = (self.hidden_units,)
self.logger.debug("X: {}, Y: {}".format(X.shape, y.shape))
model = keras.models.Sequential()
# input noise not optional so that we have a well-defined first layer to
# set the input shape on (though it may be set to zero noise)
model.add(keras.layers.noise.GaussianNoise(self.input_noise, input_shape=X.shape[1:]))
if self.input_dropout:
model.add(keras.layers.core.Dropout(self.input_dropout))
dense_kwargs = self._get_dense_layer_kwargs()
# hidden layers
for layer_size in self.hidden_layer_sizes:
model.add(keras.layers.core.Dense(output_dim=layer_size, **dense_kwargs))
if self.batch_norm:
model.add(keras.layers.normalization.BatchNormalization())
model.add(self._get_activation())
if self.dropout:
model.add(keras.layers.core.Dropout(self.dropout))
# output layer
model.add(keras.layers.core.Dense(output_dim=y.shape[1], **dense_kwargs))
if self.non_negative:
model.add(keras.layers.core.Activation("relu"))
optimizer = self._get_optimizer()
model.compile(loss=self.loss, optimizer=optimizer)
self.model_ = model
self._run_fit(X, y)
return self
def _run_fit(self, X, y):
t = time.time()
history = self.model_.fit(X, y, **self._get_fit_kwargs(X))
t = time.time() - t
self._save_history(history)
self.logger.info("Trained at {:,} rows/sec in {:,} epochs".format(int(X.shape[0] * len(history.epoch) / t),
len(history.epoch)))
self.logger.debug("Model has {:,} params".format(self.count_params()))
def _get_dense_layer_kwargs(self):
"""Apply settings to dense layer keyword args"""
dense_kwargs = {"init": self.init, "trainable": True}
if self.l2:
dense_kwargs["W_regularizer"] = keras.regularizers.l2(self.l2)
if self.use_maxnorm:
dense_kwargs["W_constraint"] = keras.constraints.MaxNorm(2)
dense_kwargs["b_constraint"] = keras.constraints.MaxNorm(2)
return dense_kwargs
def _get_fit_kwargs(self, X, batch_size_override=None, num_epochs_override=None, disable_validation=False):
"""Apply settings to the fit function keyword args"""
kwargs = {"verbose": self.verbose, "nb_epoch": self.num_epochs, "callbacks": []}
if num_epochs_override:
kwargs["nb_epoch"] = num_epochs_override
if self.early_stopping:
monitor = "val_loss" if self.val > 0 else "loss"
es = keras.callbacks.EarlyStopping(monitor=monitor, patience=self.num_epochs / 20, verbose=self.verbose,
mode="min")
kwargs["callbacks"].append(es)
if self.lr_decay:
if self.lr_decay == "DecreasingLearningRateScheduler":
kwargs["callbacks"].append(DecreasingLearningRateScheduler(self.learning_rate, scale=2, window=self.num_epochs / 40))
elif self.lr_decay != 1:
assert 0 < self.lr_decay < 1, "Learning rate must range 0-1"
kwargs["callbacks"].append(LearningRateDecay(self.lr_decay))
if self.extra_callback:
kwargs["callbacks"].append(self.extra_callback)
if self.val > 0 and not disable_validation:
kwargs["validation_split"] = self.val
if self.weight_samples:
kwargs["sample_weight"] = 0.97 ** numpy.log(X.shape[0] - numpy.asarray(range(X.shape[0])))
kwargs["batch_size"] = self.batch_size
if batch_size_override:
kwargs["batch_size"] = batch_size_override
if kwargs["batch_size"] < 0 or kwargs["batch_size"] > X.shape[0]:
kwargs["batch_size"] = X.shape[0]
self.logger.info("Fit kwargs: %s", kwargs)
return kwargs
def count_params(self):
return self.model_.count_params()
def predict(self, X):
retval = self._check_finite(self.model_.predict(X))
return retval
def _check_finite(self, Y):
if self.assert_finite:
sklearn.utils.assert_all_finite(Y)
else:
Y = numpy.nan_to_num(Y)
return Y
def _get_default_init(self, init, activation):
if init:
return init
if activation in _he_activations:
return "he_uniform"
return "glorot_uniform"
def _get_optimizer_kwargs(self):
kwargs = {"lr": self.learning_rate}
if self.clip_gradient_norm:
kwargs["clipnorm"] = self.clip_gradient_norm
return kwargs
def _save_history(self, history):
if not self.history_file:
return
self.history_df_ = pandas.DataFrame.from_dict(history.history)
self.history_df_.index.rename("epoch", inplace=True)
self.history_df_.to_csv(self.history_file)
def pad_to_batch(data, batch_size):
remainder = data.shape[0] % batch_size
if remainder == 0:
return data, lambda Y: Y
pad_after = [batch_size - remainder] + [0 for _ in data.shape[1:]]
paddings = [(0, p) for p in pad_after]
return numpy.pad(data, paddings, mode="edge"), lambda Y: Y[:-pad_after[0]]
class RnnRegressor(NnRegressor):
def __init__(self, num_units=50, time_steps=5, batch_size=64, num_epochs=100, unit="lstm", verbose=0,
early_stopping=False, dropout=None, recurrent_dropout=None, loss="mse", input_noise=0.,
learning_rate=0.001, clip_gradient_norm=None, val=0, assert_finite=True, history_file=None,
pretrain=True, optimizer="adam", input_dropout=None, activation=None, posttrain=False, hidden_layer_sizes=None, stateful=False,
lr_decay=None, non_negative=False, l2=None, reverse=False):
super(RnnRegressor, self).__init__(batch_size=batch_size, num_epochs=num_epochs, verbose=verbose,
early_stopping=early_stopping, dropout=dropout, loss=loss,
input_noise=input_noise, learning_rate=learning_rate,
clip_gradient_norm=clip_gradient_norm, val=val, assert_finite=assert_finite,
history_file=history_file, optimizer=optimizer, input_dropout=input_dropout,
activation=activation, hidden_layer_sizes=hidden_layer_sizes, lr_decay=lr_decay, non_negative=non_negative, l2=l2)
self.posttrain = posttrain
self.num_units = num_units
self.time_steps = time_steps
self.unit = unit
self.recurrent_dropout = recurrent_dropout
self.use_maxnorm = True
self.pretrain = pretrain
self.stateful = stateful
self.reverse = reverse
if stateful:
assert self.time_steps == self.batch_size
self.logger = general.get_class_logger(self)
def _transform_input(self, X):
return general.prepare_time_matrix(X, self.time_steps, fill_value=0)
def _get_recurrent_layer_kwargs(self):
"""Apply settings to dense layer keyword args"""
kwargs = {"output_dim": self.num_units, "trainable": True, "unroll": RNN_UNROLL}
if self.recurrent_dropout:
kwargs["dropout_U"] = self.recurrent_dropout
if self.l2:
kwargs["W_regularizer"] = keras.regularizers.l2(self.l2)
kwargs["U_regularizer"] = keras.regularizers.l2(self.l2)
return kwargs
def _check_reverse(self, *args):
"""Return the args unless the reverse flag is set, then reverse all the matrices"""
if len(args) == 1:
if self.reverse:
return args[0][::-1]
else:
return args[0]
else:
if self.reverse:
return [arg[::-1] for arg in args]
else:
return args
def fit(self, X, Y, **kwargs):
self.set_params(**kwargs)
X, Y = self._check_reverse(X, Y)
model = keras.models.Sequential()
X_time = self._transform_input(X)
if self.stateful:
X_time, _ = pad_to_batch(X_time, self.batch_size)
Y, _ = pad_to_batch(Y, self.batch_size)
self.logger.debug("X takes %d mb", X.nbytes / 10e6)
self.logger.debug("X_time takes %d mb", X_time.nbytes / 10e6)
if self.stateful:
model.add(keras.layers.noise.GaussianNoise(self.input_noise, batch_input_shape=(self.batch_size,) + X_time.shape[1:]))
else:
model.add(keras.layers.noise.GaussianNoise(self.input_noise, input_shape=X_time.shape[1:]))
if self.input_dropout:
model.add(keras.layers.core.Dropout(self.input_dropout))
# recurrent layer
if self.unit == "lstm":
model.add(keras.layers.recurrent.LSTM(**self._get_recurrent_layer_kwargs()))
elif self.unit == "gru":
model.add(keras.layers.recurrent.GRU(**self._get_recurrent_layer_kwargs()))
else:
raise ValueError("Unknown unit type: {}".format(self.unit))
# dropout
if self.dropout:
model.add(keras.layers.core.Dropout(self.dropout))
# regular hidden layer(s)
if self.hidden_layer_sizes:
for layer_size in self.hidden_layer_sizes:
self.logger.warning("Adding FC-%d layer after RNN", layer_size)
model.add(keras.layers.core.Dense(output_dim=layer_size, **self._get_dense_layer_kwargs()))
model.add(self._get_activation())
# if self.dropout:
# model.add(keras.layers.core.Dropout(self.dropout))
# output layer
model.add(keras.layers.core.Dense(output_dim=Y.shape[1], **self._get_dense_layer_kwargs()))
if self.non_negative:
model.add(keras.layers.core.Activation("relu"))
optimizer = self._get_optimizer()
model.compile(loss="mse", optimizer=optimizer)
self.model_ = model
if self.pretrain and not self.stateful:
self.model_.fit(X_time, Y, **self._get_fit_kwargs(X, batch_size_override=1, num_epochs_override=1))
self._run_fit(X_time, Y)
if self.posttrain and not self.stateful:
self.model_.fit(X_time, Y, **self._get_fit_kwargs(X, disable_validation=True, num_epochs_override=5))
return self
def _get_fit_kwargs(self, *args, **kwargs):
kwargs = super(RnnRegressor, self)._get_fit_kwargs(*args, **kwargs)
if self.stateful:
kwargs["shuffle"] = False
kwargs["validation_split"] = 0
self.logger.warning("Disabling validation split for stateful RNN training")
return kwargs
def predict(self, X):
X = self._check_reverse(X)
inverse_trans = None
if self.stateful:
self.model_.reset_states()
X, inverse_trans = pad_to_batch(X, self.batch_size)
Y = self._check_finite(self.model_.predict(self._transform_input(X)))
if inverse_trans:
Y = inverse_trans(Y)
Y = self._check_reverse(Y)
return Y
def make_learning_rate_schedule(initial_value, exponential_decay=0.99, kick_every=10000):
logger = general.get_function_logger()
def schedule(epoch_num):
lr = initial_value * (10 ** int(epoch_num / kick_every)) * exponential_decay ** epoch_num
logger.debug("Setting learning rate at {} to {}".format(epoch_num, lr))
return lr
return schedule
def sigmoid(x):
return 1 / (1 + numpy.exp(-x))
class AdaptiveLearningRateScheduler(keras.callbacks.Callback):
"""Learning rate scheduler that increases or decreases LR based on a recent sample of validation results"""
def __init__(self, initial_learning_rate, monitor="val_loss", scale=2., window=5):
super(AdaptiveLearningRateScheduler, self).__init__()
self.monitor = monitor
self.initial_lr = initial_learning_rate
self.scale = float(scale)
self.window = window
self.metric_ = []
self.logger_ = general.get_class_logger(self)
def on_epoch_begin(self, epoch, logs={}):
assert hasattr(self.model.optimizer, 'lr'), 'Optimizer must have a "lr" attribute.'
lr = self._get_learning_rate()
if lr:
self.logger_.debug("Setting learning rate at %d to %e", epoch, lr)
keras.backend.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs={}):
metric = logs[self.monitor]
self.metric_.append(metric)
def _get_learning_rate(self):
if len(self.metric_) < self.window * 2:
return self.initial_lr
data = numpy.asarray(self.metric_)
baseline = data[:-self.window].min()
diffs = baseline - data[-self.window:]
# assume error, lower is better
percent_epochs_improved = sigmoid((diffs / baseline) / 0.02).mean()
self.logger_.debug("Ratio of good epochs: %.2f", percent_epochs_improved)
if percent_epochs_improved > 0.75:
return self._scale_learning_rate(self.scale)
elif percent_epochs_improved < 0.5:
return self._scale_learning_rate(1. / self.scale)
return None
def _scale_learning_rate(self, scale):
return keras.backend.get_value(self.model.optimizer.lr) * scale
class DecreasingLearningRateScheduler(keras.callbacks.Callback):
"""Learning rate scheduler that decreases LR based on validation plateau"""
def __init__(self, initial_learning_rate, monitor="val_loss", scale=2., window=25):
super(DecreasingLearningRateScheduler, self).__init__()
self.monitor = monitor
self.initial_lr = initial_learning_rate
self.scale = float(scale)
self.window = window
self.metric_ = []
self.epochs_stale_ = 0
self.logger_ = general.get_class_logger(self)
def on_epoch_begin(self, epoch, logs={}):
assert hasattr(self.model.optimizer, "lr"), 'Optimizer must have a "lr" attribute.'
lr = self._get_learning_rate()
if lr:
self.logger_.debug("Setting learning rate at {} to {}".format(epoch, lr))
keras.backend.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs={}):
metric = logs[self.monitor]
self.metric_.append(metric)
def _get_learning_rate(self):
if len(self.metric_) < self.window * 2:
return self.initial_lr
data = numpy.asarray(self.metric_)
best = data[:-1].min()
self.logger_.debug("Current score {}".format(data[-1]))
# we've improved
if data[-1] < best:
self.logger_.debug("Improved from {} to {}".format(best, data[-1]))
self.epochs_stale_ = 0
else:
self.epochs_stale_ += 1
if self.epochs_stale_ >= self.window:
self.epochs_stale_ = 0
return self._scale_learning_rate(1 / self.scale)
return None
def _scale_learning_rate(self, scale):
return keras.backend.get_value(self.model.optimizer.lr) * scale
class LearningRateDecay(keras.callbacks.Callback):
"""Trying to get mode debug info...."""
def __init__(self, decay):
super(LearningRateDecay, self).__init__()
self.decay = decay
self.logger = general.get_class_logger(self)
def on_epoch_end(self, epoch, logs={}):
lr = keras.backend.get_value(self.model.optimizer.lr)
self.logger.debug("Decay LR to {}".format(lr * self.decay))
keras.backend.set_value(self.model.optimizer.lr, lr * self.decay)
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from .visual import CompoundVisual
from .line import LineVisual
from .text import TextVisual
# XXX TODO list (see code, plus):
# 1. Automated tick direction?
# 2. Expand to 3D (only 2D supported currently)
# 3. Input validation
# 4. Property support
# 5. Reactivity to resizing (current tick lengths grow/shrink w/zoom)
# 6. Improve tick label naming (str(x) is not good) and tick selection
class AxisVisual(CompoundVisual):
"""Axis visual
Parameters
----------
pos : array
Co-ordinates of start and end of the axis.
domain : tuple
The data values at the beginning and end of the axis, used for tick
labels. i.e. (5, 10) means the axis starts at 5 and ends at 10. Default
is (0, 1).
tick_direction : array
The tick direction to use (in document coordinates).
scale_type : str
The type of scale. For now only 'linear' is supported.
axis_color : tuple
RGBA values for the axis colour. Default is black.
tick_color : tuple
RGBA values for the tick colours. The colour for the major and minor
ticks is currently fixed to be the same. Default is a dark grey.
text_color : Color
The color to use for drawing tick values.
font_size : float
The font size to use for rendering tick values.
**kwargs : dict
Keyword arguments to pass to `Visual`.
"""
def __init__(self, pos=None, domain=(0., 1.), tick_direction=(-1., 0.),
scale_type="linear", axis_color=(1, 1, 1),
tick_color=(0.7, 0.7, 0.7), text_color='w', font_size=8):
if scale_type != 'linear':
raise NotImplementedError('only linear scaling is currently '
'supported')
self._pos = None
self._domain = None
# If True, then axis stops at the first / last major tick.
# If False, then axis extends to edge of *pos*
# (private until we come up with a better name for this)
self._stop_at_major = (False, False)
self.ticker = Ticker(self)
self.tick_direction = np.array(tick_direction, float)
self.tick_direction = self.tick_direction
self.scale_type = scale_type
self.axis_color = axis_color
self.tick_color = tick_color
self.minor_tick_length = 5 # px
self.major_tick_length = 10 # px
self.label_margin = 5 # px
self._need_update = True
self._line = LineVisual(method='gl', width=3.0)
self._ticks = LineVisual(method='gl', width=2.0, connect='segments')
self._text = TextVisual(font_size=font_size, color=text_color)
CompoundVisual.__init__(self, [self._line, self._text, self._ticks])
if pos is not None:
self.pos = pos
self.domain = domain
@property
def pos(self):
return self._pos
@pos.setter
def pos(self, pos):
self._pos = np.array(pos, float)
self._need_update = True
self.update()
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, d):
if self._domain is None or d != self._domain:
self._domain = d
self._need_update = True
self.update()
@property
def _vec(self):
"""Vector in the direction of the axis line"""
return self.pos[1] - self.pos[0]
def _update_subvisuals(self):
tick_pos, labels, label_pos, anchors = self.ticker.get_update()
self._line.set_data(pos=self.pos, color=self.axis_color)
self._ticks.set_data(pos=tick_pos, color=self.tick_color)
self._text.text = list(labels)
self._text.pos = label_pos
self._text.anchors = anchors
self._need_update = False
def _prepare_draw(self, view):
if self._pos is None:
return False
if self._need_update:
self._update_subvisuals()
def _compute_bounds(self, axis, view):
if axis == 2:
return (0., 0.)
# now axis in (0, 1)
return self.pos[:, axis].min(), self.pos[:, axis].max()
class Ticker(object):
"""Class to determine tick marks
Parameters
----------
axis : instance of AxisVisual
The AxisVisual to generate ticks for.
"""
def __init__(self, axis):
self.axis = axis
def get_update(self):
major_tick_fractions, minor_tick_fractions, tick_labels = \
self._get_tick_frac_labels()
tick_pos, label_pos, anchors = self._get_tick_positions(
major_tick_fractions, minor_tick_fractions)
return tick_pos, tick_labels, label_pos, anchors
def _get_tick_positions(self, major_tick_fractions, minor_tick_fractions):
# tick direction is defined in visual coords, but use document
# coords to determine the tick length
trs = self.axis.transforms
visual_to_document = trs.get_transform('visual', 'document')
direction = np.array(self.axis.tick_direction)
direction /= np.linalg.norm(direction)
# use the document (pixel) coord system to set text anchors
anchors = []
if direction[0] < 0:
anchors.append('right')
elif direction[0] > 0:
anchors.append('left')
else:
anchors.append('center')
if direction[1] < 0:
anchors.append('bottom')
elif direction[1] > 0:
anchors.append('top')
else:
anchors.append('middle')
# now figure out the tick positions in visual (data) coords
doc_unit = visual_to_document.map([[0, 0], direction[:2]])
doc_unit = doc_unit[1] - doc_unit[0]
doc_len = np.linalg.norm(doc_unit)
vectors = np.array([[0., 0.],
direction * self.axis.minor_tick_length / doc_len,
direction * self.axis.major_tick_length / doc_len,
direction * (self.axis.major_tick_length +
self.axis.label_margin) / doc_len],
dtype=float)
minor_vector = vectors[1] - vectors[0]
major_vector = vectors[2] - vectors[0]
label_vector = vectors[3] - vectors[0]
major_origins, major_endpoints = self._tile_ticks(
major_tick_fractions, major_vector)
minor_origins, minor_endpoints = self._tile_ticks(
minor_tick_fractions, minor_vector)
tick_label_pos = major_origins + label_vector
num_major = len(major_tick_fractions)
num_minor = len(minor_tick_fractions)
c = np.empty([(num_major + num_minor) * 2, 2])
c[0:(num_major-1)*2+1:2] = major_origins
c[1:(num_major-1)*2+2:2] = major_endpoints
c[(num_major-1)*2+2::2] = minor_origins
c[(num_major-1)*2+3::2] = minor_endpoints
return c, tick_label_pos, anchors
def _tile_ticks(self, frac, tickvec):
"""Tiles tick marks along the axis."""
origins = np.tile(self.axis._vec, (len(frac), 1))
origins = self.axis.pos[0].T + (origins.T*frac).T
endpoints = tickvec + origins
return origins, endpoints
def _get_tick_frac_labels(self):
"""Get the major ticks, minor ticks, and major labels"""
minor_num = 4 # number of minor ticks per major division
if (self.axis.scale_type == 'linear'):
domain = self.axis.domain
if domain[1] < domain[0]:
flip = True
domain = domain[::-1]
else:
flip = False
offset = domain[0]
scale = domain[1] - domain[0]
transforms = self.axis.transforms
length = self.axis.pos[1] - self.axis.pos[0] # in logical coords
n_inches = np.sqrt(np.sum(length ** 2)) / transforms.dpi
# major = np.linspace(domain[0], domain[1], num=11)
# major = MaxNLocator(10).tick_values(*domain)
major = _get_ticks_talbot(domain[0], domain[1], n_inches, 2)
labels = ['%g' % x for x in major]
majstep = major[1] - major[0]
minor = []
minstep = majstep / (minor_num + 1)
minstart = 0 if self.axis._stop_at_major[0] else -1
minstop = -1 if self.axis._stop_at_major[1] else 0
for i in range(minstart, len(major) + minstop):
maj = major[0] + i * majstep
minor.extend(np.linspace(maj + minstep,
maj + majstep - minstep,
minor_num))
major_frac = (major - offset) / scale
minor_frac = (np.array(minor) - offset) / scale
major_frac = major_frac[::-1] if flip else major_frac
use_mask = (major_frac > -0.0001) & (major_frac < 1.0001)
major_frac = major_frac[use_mask]
labels = [l for li, l in enumerate(labels) if use_mask[li]]
minor_frac = minor_frac[(minor_frac > -0.0001) &
(minor_frac < 1.0001)]
elif self.axis.scale_type == 'logarithmic':
return NotImplementedError
elif self.axis.scale_type == 'power':
return NotImplementedError
return major_frac, minor_frac, labels
# #############################################################################
# Translated from matplotlib
class MaxNLocator(object):
"""
Select no more than N intervals at nice locations.
"""
def __init__(self, nbins=10, steps=None, trim=True, integer=False,
symmetric=False, prune=None):
"""
Keyword args:
*nbins*
Maximum number of intervals; one less than max number of ticks.
*steps*
Sequence of nice numbers starting with 1 and ending with 10;
e.g., [1, 2, 4, 5, 10]
*integer*
If True, ticks will take only integer values.
*symmetric*
If True, autoscaling will result in a range symmetric
about zero.
*prune*
['lower' | 'upper' | 'both' | None]
Remove edge ticks -- useful for stacked or ganged plots
where the upper tick of one axes overlaps with the lower
tick of the axes above it.
If prune=='lower', the smallest tick will
be removed. If prune=='upper', the largest tick will be
removed. If prune=='both', the largest and smallest ticks
will be removed. If prune==None, no ticks will be removed.
"""
self._nbins = int(nbins)
self._trim = trim
self._integer = integer
self._symmetric = symmetric
if prune is not None and prune not in ['upper', 'lower', 'both']:
raise ValueError(
"prune must be 'upper', 'lower', 'both', or None")
self._prune = prune
if steps is None:
steps = [1, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
self._integer = integer
if self._integer:
self._steps = [n for n in self._steps
if divmod(n, 1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin = vmin - offset
vmax = vmax - offset
raw_step = (vmax - vmin) / nbins
scaled_raw_step = raw_step / scale
best_vmax = vmax
best_vmin = vmin
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step * divmod(vmin, step)[0]
best_vmax = best_vmin + step * nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins + 1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
locs = self.bin_boundaries(vmin, vmax)
prune = self._prune
if prune == 'lower':
locs = locs[1:]
elif prune == 'upper':
locs = locs[:-1]
elif prune == 'both':
locs = locs[1:-1]
return locs
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
return np.take(self.bin_boundaries(dmin, dmax), [0, -1])
def scale_range(vmin, vmax, n=1, threshold=100):
dv = abs(vmax - vmin)
if dv == 0: # maxabsv == 0 is a special case of this.
return 1.0, 0.0
# Note: this should never occur because
# vmin, vmax should have been checked by nonsingular(),
# and spread apart if necessary.
meanv = 0.5 * (vmax + vmin)
if abs(meanv) / dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(np.log10(meanv), 1)[0]
offset = 10 ** ex
else:
ex = divmod(np.log10(-meanv), 1)[0]
offset = -10 ** ex
ex = divmod(np.log10(dv / n), 1)[0]
scale = 10 ** ex
return scale, offset
# #############################################################################
# Tranlated from http://www.justintalbot.com/research/axis-labeling/
# See "An Extension of Wilkinson's Algorithm for Positioning Tick Labels
# on Axes" # by Justin Talbot, Sharon Lin, and Pat Hanrahan, InfoVis 2010.
def _coverage(dmin, dmax, lmin, lmax):
return 1 - 0.5 * ((dmax - lmax) ** 2 +
(dmin - lmin) ** 2) / (0.1 * (dmax - dmin)) ** 2
def _coverage_max(dmin, dmax, span):
range_ = dmax - dmin
if span <= range_:
return 1.
else:
half = (span - range_) / 2.0
return 1 - half ** 2 / (0.1 * range_) ** 2
def _density(k, m, dmin, dmax, lmin, lmax):
r = (k-1.0) / (lmax-lmin)
rt = (m-1.0) / (max(lmax, dmax) - min(lmin, dmin))
return 2 - max(r / rt, rt / r)
def _density_max(k, m):
return 2 - (k-1.0) / (m-1.0) if k >= m else 1.
def _simplicity(q, Q, j, lmin, lmax, lstep):
eps = 1e-10
n = len(Q)
i = Q.index(q) + 1
if ((lmin % lstep) < eps or
(lstep - lmin % lstep) < eps) and lmin <= 0 and lmax >= 0:
v = 1
else:
v = 0
return (n - i) / (n - 1.0) + v - j
def _simplicity_max(q, Q, j):
n = len(Q)
i = Q.index(q) + 1
return (n - i)/(n - 1.0) + 1. - j
def _get_ticks_talbot(dmin, dmax, n_inches, density=1.):
# density * size gives target number of intervals,
# density * size + 1 gives target number of tick marks,
# the density function converts this back to a density in data units
# (not inches)
n_inches = max(n_inches, 2.0) # Set minimum otherwise code can crash :(
m = density * n_inches + 1.0
only_inside = False # we cull values outside ourselves
Q = [1, 5, 2, 2.5, 4, 3]
w = [0.25, 0.2, 0.5, 0.05]
best_score = -2.0
j = 1.0
n_max = 1000
while j < n_max:
for q in Q:
sm = _simplicity_max(q, Q, j)
if w[0] * sm + w[1] + w[2] + w[3] < best_score:
j = n_max
break
k = 2.0
while k < n_max:
dm = _density_max(k, n_inches)
if w[0] * sm + w[1] + w[2] * dm + w[3] < best_score:
break
delta = (dmax-dmin)/(k+1.0)/j/q
z = np.ceil(np.log10(delta))
while z < float('infinity'):
step = j * q * 10 ** z
cm = _coverage_max(dmin, dmax, step*(k-1.0))
if (w[0] * sm +
w[1] * cm +
w[2] * dm +
w[3] < best_score):
break
min_start = np.floor(dmax/step)*j - (k-1.0)*j
max_start = np.ceil(dmin/step)*j
if min_start > max_start:
z = z+1
break
for start in range(int(min_start), int(max_start)+1):
lmin = start * (step/j)
lmax = lmin + step*(k-1.0)
lstep = step
s = _simplicity(q, Q, j, lmin, lmax, lstep)
c = _coverage(dmin, dmax, lmin, lmax)
d = _density(k, m, dmin, dmax, lmin, lmax)
l = 1. # _legibility(lmin, lmax, lstep)
score = w[0] * s + w[1] * c + w[2] * d + w[3] * l
if (score > best_score and
(not only_inside or (lmin >= dmin and
lmax <= dmax))):
best_score = score
best = (lmin, lmax, lstep, q, k)
z += 1
k += 1
if k == n_max:
raise RuntimeError('could not converge on ticks')
j += 1
if j == n_max:
raise RuntimeError('could not converge on ticks')
return np.arange(best[4]) * best[2] + best[0]
|
|
# coding: utf-8
#
# Copyright 2010-2014 Ning, Inc.
# Copyright 2014-2020 Groupon, Inc
# Copyright 2020-2021 Equinix, Inc
# Copyright 2014-2021 The Billing Project, LLC
#
# The Billing Project, LLC licenses this file to you under the Apache License, version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Kill Bill
Kill Bill is an open-source billing and payments platform # noqa: E501
OpenAPI spec version: 0.22.22-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PaymentAttempt(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_id': 'Str',
'payment_method_id': 'Str',
'payment_external_key': 'Str',
'transaction_id': 'Str',
'transaction_external_key': 'Str',
'transaction_type': 'Str',
'effective_date': 'Datetime',
'state_name': 'Str',
'amount': 'Float',
'currency': 'Str',
'plugin_name': 'Str',
'plugin_properties': 'List[PluginProperty]',
'audit_logs': 'List[AuditLog]'
}
attribute_map = {
'account_id': 'accountId',
'payment_method_id': 'paymentMethodId',
'payment_external_key': 'paymentExternalKey',
'transaction_id': 'transactionId',
'transaction_external_key': 'transactionExternalKey',
'transaction_type': 'transactionType',
'effective_date': 'effectiveDate',
'state_name': 'stateName',
'amount': 'amount',
'currency': 'currency',
'plugin_name': 'pluginName',
'plugin_properties': 'pluginProperties',
'audit_logs': 'auditLogs'
}
def __init__(self, account_id=None, payment_method_id=None, payment_external_key=None, transaction_id=None, transaction_external_key=None, transaction_type=None, effective_date=None, state_name=None, amount=None, currency=None, plugin_name=None, plugin_properties=None, audit_logs=None): # noqa: E501
"""PaymentAttempt - a model defined in Swagger""" # noqa: E501
self._account_id = None
self._payment_method_id = None
self._payment_external_key = None
self._transaction_id = None
self._transaction_external_key = None
self._transaction_type = None
self._effective_date = None
self._state_name = None
self._amount = None
self._currency = None
self._plugin_name = None
self._plugin_properties = None
self._audit_logs = None
self.discriminator = None
if account_id is not None:
self.account_id = account_id
if payment_method_id is not None:
self.payment_method_id = payment_method_id
if payment_external_key is not None:
self.payment_external_key = payment_external_key
if transaction_id is not None:
self.transaction_id = transaction_id
if transaction_external_key is not None:
self.transaction_external_key = transaction_external_key
if transaction_type is not None:
self.transaction_type = transaction_type
if effective_date is not None:
self.effective_date = effective_date
if state_name is not None:
self.state_name = state_name
if amount is not None:
self.amount = amount
if currency is not None:
self.currency = currency
if plugin_name is not None:
self.plugin_name = plugin_name
if plugin_properties is not None:
self.plugin_properties = plugin_properties
if audit_logs is not None:
self.audit_logs = audit_logs
@property
def account_id(self):
"""Gets the account_id of this PaymentAttempt. # noqa: E501
:return: The account_id of this PaymentAttempt. # noqa: E501
:rtype: Str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this PaymentAttempt.
:param account_id: The account_id of this PaymentAttempt. # noqa: E501
:type: Str
"""
self._account_id = account_id
@property
def payment_method_id(self):
"""Gets the payment_method_id of this PaymentAttempt. # noqa: E501
:return: The payment_method_id of this PaymentAttempt. # noqa: E501
:rtype: Str
"""
return self._payment_method_id
@payment_method_id.setter
def payment_method_id(self, payment_method_id):
"""Sets the payment_method_id of this PaymentAttempt.
:param payment_method_id: The payment_method_id of this PaymentAttempt. # noqa: E501
:type: Str
"""
self._payment_method_id = payment_method_id
@property
def payment_external_key(self):
"""Gets the payment_external_key of this PaymentAttempt. # noqa: E501
:return: The payment_external_key of this PaymentAttempt. # noqa: E501
:rtype: Str
"""
return self._payment_external_key
@payment_external_key.setter
def payment_external_key(self, payment_external_key):
"""Sets the payment_external_key of this PaymentAttempt.
:param payment_external_key: The payment_external_key of this PaymentAttempt. # noqa: E501
:type: Str
"""
self._payment_external_key = payment_external_key
@property
def transaction_id(self):
"""Gets the transaction_id of this PaymentAttempt. # noqa: E501
:return: The transaction_id of this PaymentAttempt. # noqa: E501
:rtype: Str
"""
return self._transaction_id
@transaction_id.setter
def transaction_id(self, transaction_id):
"""Sets the transaction_id of this PaymentAttempt.
:param transaction_id: The transaction_id of this PaymentAttempt. # noqa: E501
:type: Str
"""
self._transaction_id = transaction_id
@property
def transaction_external_key(self):
"""Gets the transaction_external_key of this PaymentAttempt. # noqa: E501
:return: The transaction_external_key of this PaymentAttempt. # noqa: E501
:rtype: Str
"""
return self._transaction_external_key
@transaction_external_key.setter
def transaction_external_key(self, transaction_external_key):
"""Sets the transaction_external_key of this PaymentAttempt.
:param transaction_external_key: The transaction_external_key of this PaymentAttempt. # noqa: E501
:type: Str
"""
self._transaction_external_key = transaction_external_key
@property
def transaction_type(self):
"""Gets the transaction_type of this PaymentAttempt. # noqa: E501
:return: The transaction_type of this PaymentAttempt. # noqa: E501
:rtype: Str
"""
return self._transaction_type
@transaction_type.setter
def transaction_type(self, transaction_type):
"""Sets the transaction_type of this PaymentAttempt.
:param transaction_type: The transaction_type of this PaymentAttempt. # noqa: E501
:type: Str
"""
self._transaction_type = transaction_type
@property
def effective_date(self):
"""Gets the effective_date of this PaymentAttempt. # noqa: E501
:return: The effective_date of this PaymentAttempt. # noqa: E501
:rtype: Datetime
"""
return self._effective_date
@effective_date.setter
def effective_date(self, effective_date):
"""Sets the effective_date of this PaymentAttempt.
:param effective_date: The effective_date of this PaymentAttempt. # noqa: E501
:type: Datetime
"""
self._effective_date = effective_date
@property
def state_name(self):
"""Gets the state_name of this PaymentAttempt. # noqa: E501
:return: The state_name of this PaymentAttempt. # noqa: E501
:rtype: Str
"""
return self._state_name
@state_name.setter
def state_name(self, state_name):
"""Sets the state_name of this PaymentAttempt.
:param state_name: The state_name of this PaymentAttempt. # noqa: E501
:type: Str
"""
self._state_name = state_name
@property
def amount(self):
"""Gets the amount of this PaymentAttempt. # noqa: E501
Transaction amount, required except for void operations # noqa: E501
:return: The amount of this PaymentAttempt. # noqa: E501
:rtype: Float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this PaymentAttempt.
Transaction amount, required except for void operations # noqa: E501
:param amount: The amount of this PaymentAttempt. # noqa: E501
:type: Float
"""
self._amount = amount
@property
def currency(self):
"""Gets the currency of this PaymentAttempt. # noqa: E501
Amount currency (account currency unless specified) # noqa: E501
:return: The currency of this PaymentAttempt. # noqa: E501
:rtype: Str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this PaymentAttempt.
Amount currency (account currency unless specified) # noqa: E501
:param currency: The currency of this PaymentAttempt. # noqa: E501
:type: Str
"""
self._currency = currency
@property
def plugin_name(self):
"""Gets the plugin_name of this PaymentAttempt. # noqa: E501
:return: The plugin_name of this PaymentAttempt. # noqa: E501
:rtype: Str
"""
return self._plugin_name
@plugin_name.setter
def plugin_name(self, plugin_name):
"""Sets the plugin_name of this PaymentAttempt.
:param plugin_name: The plugin_name of this PaymentAttempt. # noqa: E501
:type: Str
"""
self._plugin_name = plugin_name
@property
def plugin_properties(self):
"""Gets the plugin_properties of this PaymentAttempt. # noqa: E501
:return: The plugin_properties of this PaymentAttempt. # noqa: E501
:rtype: List[PluginProperty]
"""
return self._plugin_properties
@plugin_properties.setter
def plugin_properties(self, plugin_properties):
"""Sets the plugin_properties of this PaymentAttempt.
:param plugin_properties: The plugin_properties of this PaymentAttempt. # noqa: E501
:type: List[PluginProperty]
"""
self._plugin_properties = plugin_properties
@property
def audit_logs(self):
"""Gets the audit_logs of this PaymentAttempt. # noqa: E501
:return: The audit_logs of this PaymentAttempt. # noqa: E501
:rtype: List[AuditLog]
"""
return self._audit_logs
@audit_logs.setter
def audit_logs(self, audit_logs):
"""Sets the audit_logs of this PaymentAttempt.
:param audit_logs: The audit_logs of this PaymentAttempt. # noqa: E501
:type: List[AuditLog]
"""
self._audit_logs = audit_logs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentAttempt):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
# coding: utf-8
# imports
import os
import re
# django imports
from django.template import (
Library, Node, Variable, VariableDoesNotExist, TemplateSyntaxError
)
from django.utils.encoding import smart_str
# filebrowser imports
from filebrowser.settings import VERSIONS
from filebrowser.conf import fb_settings
from filebrowser.functions import (
url_to_path, path_to_url, get_version_path, version_generator
)
from filebrowser.base import FileObject
register = Library()
class VersionNode(Node):
def __init__(self, src, version_prefix):
self.src = Variable(src)
if (version_prefix[0] == version_prefix[-1] and
version_prefix[0] in ('"', "'")):
self.version_prefix = version_prefix[1:-1]
else:
self.version_prefix = None
self.version_prefix_var = Variable(version_prefix)
def render(self, context):
try:
source = self.src.resolve(context)
except VariableDoesNotExist:
return None
if self.version_prefix:
version_prefix = self.version_prefix
else:
try:
version_prefix = self.version_prefix_var.resolve(context)
except VariableDoesNotExist:
return None
try:
version_path = get_version_path(
url_to_path(source), version_prefix
)
if not os.path.isfile(smart_str(os.path.join(fb_settings.MEDIA_ROOT, version_path))):
# create version
version_path = version_generator(
url_to_path(source), version_prefix
)
elif os.path.getmtime(smart_str(os.path.join(fb_settings.MEDIA_ROOT, url_to_path(source)))) > os.path.getmtime(smart_str(os.path.join(fb_settings.MEDIA_ROOT, version_path))):
# recreate version if original image was updated
version_path = version_generator(
url_to_path(source), version_prefix, force=True
)
return path_to_url(version_path)
except:
return ""
def version(parser, token):
"""
Displaying a version of an existing Image according to the predefined
VERSIONS settings (see filebrowser settings).
{% version field_name version_prefix %}
Use {% version my_image 'medium' %} in order to display the medium-size
version of an Image stored in a field name my_image.
version_prefix can be a string or a variable.
if version_prefix is a string, use quotes.
"""
try:
tag, src, version_prefix = token.split_contents()
except:
raise TemplateSyntaxError(
"{0} tag requires 2 arguments".format(token.contents.split()[0])
)
if (version_prefix[0] == version_prefix[-1] and
version_prefix[0] in ('"', "'")) and \
version_prefix.lower()[1:-1] not in VERSIONS:
raise TemplateSyntaxError(
"{0} tag received bad version_prefix {1}".format(tag,
version_prefix)
)
return VersionNode(src, version_prefix)
class VersionObjectNode(Node):
def __init__(self, src, version_prefix, var_name):
self.var_name = var_name
self.src = Variable(src)
if (version_prefix[0] == version_prefix[-1] and
version_prefix[0] in ('"', "'")):
self.version_prefix = version_prefix[1:-1]
else:
self.version_prefix = None
self.version_prefix_var = Variable(version_prefix)
def render(self, context):
try:
source = self.src.resolve(context)
except VariableDoesNotExist:
return None
if self.version_prefix:
version_prefix = self.version_prefix
else:
try:
version_prefix = self.version_prefix_var.resolve(context)
except VariableDoesNotExist:
return None
try:
version_path = get_version_path(
url_to_path(source), version_prefix
)
if not os.path.isfile(smart_str(os.path.join(fb_settings.MEDIA_ROOT, version_path))):
# create version
version_path = version_generator(
url_to_path(source), version_prefix
)
elif os.path.getmtime(smart_str(os.path.join(fb_settings.MEDIA_ROOT, url_to_path(source)))) > os.path.getmtime(smart_str(os.path.join(fb_settings.MEDIA_ROOT, version_path))):
# recreate version if original image was updated
version_path = version_generator(
url_to_path(source), version_prefix, force=True
)
context[self.var_name] = FileObject(version_path)
except:
context[self.var_name] = ""
return ''
def version_object(parser, token):
"""
Returns a context variable 'version_object'.
{% version_object field_name version_prefix %}
Use {% version_object my_image 'medium' %} in order to retrieve the medium
version of an Image stored in a field name my_image.
Use {% version_object my_image 'medium' as var %} in order to use 'var' as
your context variable.
version_prefix can be a string or a variable.
if version_prefix is a string, use quotes.
"""
try:
# tag, src, version_prefix = token.split_contents()
tag, arg = token.contents.split(None, 1)
except:
raise TemplateSyntaxError(
"{0} tag requires arguments".format(token.contents.split()[0])
)
m = re.search(r'(.*?) (.*?) as (\w+)', arg)
if not m:
raise TemplateSyntaxError("{0} tag had invalid arguments".format(tag))
src, version_prefix, var_name = m.groups()
if (version_prefix[0] == version_prefix[-1] and
version_prefix[0] in ('"', "'")) and \
version_prefix.lower()[1:-1] not in VERSIONS:
raise TemplateSyntaxError(
"{0} tag received bad version_prefix {1}".format(tag,
version_prefix)
)
return VersionObjectNode(src, version_prefix, var_name)
class VersionSettingNode(Node):
def __init__(self, version_prefix):
if (version_prefix[0] == version_prefix[-1] and
version_prefix[0] in ('"', "'")):
self.version_prefix = version_prefix[1:-1]
else:
self.version_prefix = None
self.version_prefix_var = Variable(version_prefix)
def render(self, context):
if self.version_prefix:
version_prefix = self.version_prefix
else:
try:
version_prefix = self.version_prefix_var.resolve(context)
except VariableDoesNotExist:
return None
context['version_setting'] = VERSIONS[version_prefix]
return ''
def version_setting(parser, token):
"""
Get Information about a version setting.
"""
try:
tag, version_prefix = token.split_contents()
except:
raise TemplateSyntaxError(
"{0} tag requires 1 argument".format(token.contents.split()[0])
)
if (version_prefix[0] == version_prefix[-1] and
version_prefix[0] in ('"', "'")) and \
version_prefix.lower()[1:-1] not in VERSIONS:
raise TemplateSyntaxError(
"{0} tag received bad version_prefix {1}".format(tag,
version_prefix)
)
return VersionSettingNode(version_prefix)
register.tag(version)
register.tag(version_object)
register.tag(version_setting)
|
|
#!/usr/bin/env python
#
# Copyright 2001 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple web server for browsing dependency graph data.
This script is inlined into the final executable and spawned by
it when needed.
"""
from __future__ import print_function
try:
import http.server as httpserver
except ImportError:
import BaseHTTPServer as httpserver
import argparse
import cgi
import os
import socket
import subprocess
import sys
import webbrowser
try:
from urllib.request import unquote
except ImportError:
from urllib2 import unquote
from collections import namedtuple
Node = namedtuple('Node', ['inputs', 'rule', 'target', 'outputs'])
# Ideally we'd allow you to navigate to a build edge or a build node,
# with appropriate views for each. But there's no way to *name* a build
# edge so we can only display nodes.
#
# For a given node, it has at most one input edge, which has n
# different inputs. This becomes node.inputs. (We leave out the
# outputs of the input edge due to what follows.) The node can have
# multiple dependent output edges. Rather than attempting to display
# those, they are summarized by taking the union of all their outputs.
#
# This means there's no single view that shows you all inputs and outputs
# of an edge. But I think it's less confusing than alternatives.
def match_strip(line, prefix):
if not line.startswith(prefix):
return (False, line)
return (True, line[len(prefix):])
def html_escape(text):
return cgi.escape(text, quote=True)
def parse(text):
lines = iter(text.split('\n'))
target = None
rule = None
inputs = []
outputs = []
try:
target = next(lines)[:-1] # strip trailing colon
line = next(lines)
(match, rule) = match_strip(line, ' input: ')
if match:
(match, line) = match_strip(next(lines), ' ')
while match:
type = None
(match, line) = match_strip(line, '| ')
if match:
type = 'implicit'
(match, line) = match_strip(line, '|| ')
if match:
type = 'order-only'
inputs.append((line, type))
(match, line) = match_strip(next(lines), ' ')
match, _ = match_strip(line, ' outputs:')
if match:
(match, line) = match_strip(next(lines), ' ')
while match:
outputs.append(line)
(match, line) = match_strip(next(lines), ' ')
except StopIteration:
pass
return Node(inputs, rule, target, outputs)
def create_page(body):
return '''<!DOCTYPE html>
<style>
body {
font-family: sans;
font-size: 0.8em;
margin: 4ex;
}
h1 {
font-weight: normal;
font-size: 140%;
text-align: center;
margin: 0;
}
h2 {
font-weight: normal;
font-size: 120%;
}
tt {
font-family: WebKitHack, monospace;
white-space: nowrap;
}
.filelist {
-webkit-columns: auto 2;
}
</style>
''' + body
def generate_html(node):
document = ['<h1><tt>%s</tt></h1>' % html_escape(node.target)]
if node.inputs:
document.append('<h2>target is built using rule <tt>%s</tt> of</h2>' %
html_escape(node.rule))
if len(node.inputs) > 0:
document.append('<div class=filelist>')
for input, type in sorted(node.inputs):
extra = ''
if type:
extra = ' (%s)' % html_escape(type)
document.append('<tt><a href="?%s">%s</a>%s</tt><br>' %
(html_escape(input), html_escape(input), extra))
document.append('</div>')
if node.outputs:
document.append('<h2>dependent edges build:</h2>')
document.append('<div class=filelist>')
for output in sorted(node.outputs):
document.append('<tt><a href="?%s">%s</a></tt><br>' %
(html_escape(output), html_escape(output)))
document.append('</div>')
return '\n'.join(document)
def ninja_dump(target):
cmd = [args.ninja_command, '-f', args.f, '-t', 'query', target]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
return proc.communicate() + (proc.returncode,)
class RequestHandler(httpserver.BaseHTTPRequestHandler):
def do_GET(self):
assert self.path[0] == '/'
target = unquote(self.path[1:])
if target == '':
self.send_response(302)
self.send_header('Location', '?' + args.initial_target)
self.end_headers()
return
if not target.startswith('?'):
self.send_response(404)
self.end_headers()
return
target = target[1:]
ninja_output, ninja_error, exit_code = ninja_dump(target)
if exit_code == 0:
page_body = generate_html(parse(ninja_output.strip()))
else:
# Relay ninja's error message.
page_body = '<h1><tt>%s</tt></h1>' % html_escape(ninja_error)
self.send_response(200)
self.end_headers()
self.wfile.write(create_page(page_body).encode('utf-8'))
def log_message(self, format, *args):
pass # Swallow console spam.
parser = argparse.ArgumentParser(prog='ninja -t browse')
parser.add_argument('--port', '-p', default=8000, type=int,
help='Port number to use (default %(default)d)')
parser.add_argument('--hostname', '-a', default='localhost', type=str,
help='Hostname to bind to (default %(default)s)')
parser.add_argument('--no-browser', action='store_true',
help='Do not open a webbrowser on startup.')
parser.add_argument('--ninja-command', default='ninja',
help='Path to ninja binary (default %(default)s)')
parser.add_argument('-f', default='build.ninja',
help='Path to build.ninja file (default %(default)s)')
parser.add_argument('initial_target', default='all', nargs='?',
help='Initial target to show (default %(default)s)')
args = parser.parse_args()
port = args.port
hostname = args.hostname
httpd = httpserver.HTTPServer((hostname,port), RequestHandler)
try:
if hostname == "":
hostname = socket.gethostname()
print('Web server running on %s:%d, ctl-C to abort...' % (hostname,port) )
print('Web server pid %d' % os.getpid(), file=sys.stderr )
if not args.no_browser:
webbrowser.open_new('http://%s:%s' % (hostname, port) )
httpd.serve_forever()
except KeyboardInterrupt:
print()
pass # Swallow console spam.
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import os
import shutil
import uuid
import fixtures
import mock
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
import oslo_i18n
from oslo_messaging import conffixture as messaging_conffixture
import oslotest.base as base_test
from manila.db import migration
from manila.db.sqlalchemy import api as db_api
from manila.db.sqlalchemy import models as db_models
from manila import rpc
from manila import service
from manila.tests import conf_fixture
from manila.tests import fake_notifier
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite database.'),
]
CONF = cfg.CONF
CONF.register_opts(test_opts)
_DB_CACHE = None
class Database(fixtures.Fixture):
def __init__(self, db_session, db_migrate, sql_connection, sqlite_db,
sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_session.get_engine()
self.engine.dispose()
conn = self.engine.connect()
if sql_connection == "sqlite://":
self.setup_sqlite(db_migrate)
else:
testdb = os.path.join(CONF.state_path, sqlite_db)
db_migrate.upgrade('head')
if os.path.exists(testdb):
return
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = os.path.join(CONF.state_path, sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose) # pylint: disable=E1101
else:
shutil.copyfile(
os.path.join(CONF.state_path, self.sqlite_clean_db),
os.path.join(CONF.state_path, self.sqlite_db),
)
def setup_sqlite(self, db_migrate):
if db_migrate.version():
return
db_models.BASE.metadata.create_all(self.engine)
db_migrate.stamp('head')
class TestCase(base_test.BaseTestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
oslo_i18n.enable_lazy(enable=False)
conf_fixture.set_defaults(CONF)
CONF([], default_config_files=[])
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(
db_api,
migration,
sql_connection=CONF.database.connection,
sqlite_db=CONF.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db,
)
self.useFixture(_DB_CACHE)
self.injected = []
self._services = []
self.flags(fatal_exception_format_errors=True)
# This will be cleaned up by the NestedTempfile fixture
lock_path = self.useFixture(fixtures.TempDir()).path
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(lock_path=lock_path, group='oslo_concurrency')
self.fixture.config(
disable_process_locking=True, group='oslo_concurrency')
rpc.add_extra_exmods('manila.tests')
self.addCleanup(rpc.clear_extra_exmods)
self.addCleanup(rpc.cleanup)
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.messaging_conf.response_timeout = 15
self.useFixture(self.messaging_conf)
rpc.init(CONF)
mock.patch('keystoneauth1.loading.load_auth_from_conf_options').start()
fake_notifier.stub_notifier(self)
def tearDown(self):
"""Runs after each test method to tear down test environment."""
super(TestCase, self).tearDown()
# Reset any overridden flags
CONF.reset()
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
for k, v in kw.items():
CONF.set_override(k, v, enforce_type=True)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'manila-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
def mock_object(self, obj, attr_name, new_attr=None, **kwargs):
"""Use python mock to mock an object attribute
Mocks the specified objects attribute with the given value.
Automatically performs 'addCleanup' for the mock.
"""
if not new_attr:
new_attr = mock.Mock()
patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs)
patcher.start()
self.addCleanup(patcher.stop)
return new_attr
def mock_class(self, class_name, new_val=None, **kwargs):
"""Use python mock to mock a class
Mocks the specified objects attribute with the given value.
Automatically performs 'addCleanup' for the mock.
"""
if not new_val:
new_val = mock.Mock()
patcher = mock.patch(class_name, new_val, **kwargs)
patcher.start()
self.addCleanup(patcher.stop)
return new_val
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = str(d1)
d2str = str(d2)
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' %
{"msg": msg, "d1str": d1str, "d2str": d2str})
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' %
{"d1only": d1only, "d2only": d2only})
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" %
{
"key": key,
"d1value": d1value,
"d2value": d2value
})
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
"""Assert a list of dicts are equivalent."""
def raise_assertion(msg):
L1str = str(L1)
L2str = str(L2)
base_msg = ('List of dictionaries do not match: %(msg)s '
'L1: %(L1str)s L2: %(L2str)s' %
{"msg": msg, "L1str": L1str, "L2str": L2str})
raise AssertionError(base_msg)
L1count = len(L1)
L2count = len(L2)
if L1count != L2count:
raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
'len(L2)=%(L2count)d' %
{"L1count": L1count, "L2count": L2count})
for d1, d2 in zip(L1, L2):
self.assertDictMatch(d1, d2, approx_equal=approx_equal,
tolerance=tolerance)
def assertSubDictMatch(self, sub_dict, super_dict):
"""Assert a sub_dict is subset of super_dict."""
self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys())))
for k, sub_value in sub_dict.items():
super_value = super_dict[k]
if isinstance(sub_value, dict):
self.assertSubDictMatch(sub_value, super_value)
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
self.assertEqual(sub_value, super_value)
def assertIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' in 'b'."""
try:
f = super(TestCase, self).assertIn
except AttributeError:
self.assertTrue(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
def assertNotIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' NOT in 'b'."""
try:
f = super(TestCase, self).assertNotIn
except AttributeError:
self.assertFalse(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
def assertIsInstance(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility."""
try:
f = super(TestCase, self).assertIsInstance
except AttributeError:
self.assertIsInstance(a, b)
else:
f(a, b, *args, **kwargs)
def assertIsNone(self, a, *args, **kwargs):
"""Python < v2.7 compatibility."""
try:
f = super(TestCase, self).assertIsNone
except AttributeError:
self.assertTrue(a is None)
else:
f(a, *args, **kwargs)
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return {k: v for k, v in obj.iteritems()
if k not in ignored_keys}
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
|
|
"""general tests and simple benchmarks for the sparse module"""
from __future__ import division, print_function, absolute_import
import time
import warnings
import numpy
import numpy as np
from numpy import ones, array, asarray, empty
from numpy.testing import *
from scipy import sparse
from scipy.lib.six import xrange
from scipy.sparse import csr_matrix, coo_matrix, dia_matrix, lil_matrix, \
dok_matrix, rand, SparseEfficiencyWarning
def random_sparse(m,n,nnz_per_row):
rows = numpy.arange(m).repeat(nnz_per_row)
cols = numpy.random.random_integers(low=0,high=n-1,size=nnz_per_row*m)
vals = numpy.random.random_sample(m*nnz_per_row)
return coo_matrix((vals,(rows,cols)),(m,n)).tocsr()
# TODO move this to a matrix gallery and add unittests
def poisson2d(N,dtype='d',format=None):
"""
Return a sparse matrix for the 2D Poisson problem
with standard 5-point finite difference stencil on a
square N-by-N grid.
"""
if N == 1:
diags = asarray([[4]],dtype=dtype)
return dia_matrix((diags,[0]), shape=(1,1)).asformat(format)
offsets = array([0,-N,N,-1,1])
diags = empty((5,N**2),dtype=dtype)
diags[0] = 4 # main diagonal
diags[1:] = -1 # all offdiagonals
diags[3,N-1::N] = 0 # first lower diagonal
diags[4,N::N] = 0 # first upper diagonal
return dia_matrix((diags,offsets),shape=(N**2,N**2)).asformat(format)
class BenchmarkSparse(TestCase):
"""Simple benchmarks for sparse matrix module"""
def bench_arithmetic(self):
matrices = []
# matrices.append( ('A','Identity', sparse.eye(500**2,format='csr')) )
matrices.append(('A','Poisson5pt', poisson2d(250,format='csr')))
matrices.append(('B','Poisson5pt^2', poisson2d(250,format='csr')**2))
print()
print(' Sparse Matrix Arithmetic')
print('====================================================================')
print(' var | name | shape | dtype | nnz ')
print('--------------------------------------------------------------------')
fmt = ' %1s | %14s | %20s | %9s | %8d '
for var,name,mat in matrices:
name = name.center(14)
shape = ("%s" % (mat.shape,)).center(20)
dtype = mat.dtype.name.center(9)
print(fmt % (var,name,shape,dtype,mat.nnz))
space = ' ' * 10
print()
print(space+' Timings')
print(space+'==========================================')
print(space+' format | operation | time (msec) ')
print(space+'------------------------------------------')
fmt = space+' %3s | %17s | %7.1f '
for format in ['csr']:
vars = dict([(var, mat.asformat(format)) for (var, _, mat) in matrices])
for X,Y in [('A','A'),('A','B'),('B','A'),('B','B')]:
x,y = vars[X],vars[Y]
for op in ['__add__','__sub__','multiply','__div__','__mul__']:
fn = getattr(x,op)
fn(y) # warmup
start = time.clock()
iter = 0
while iter < 3 or time.clock() < start + 0.5:
fn(y)
iter += 1
end = time.clock()
msec_per_it = 1000*(end - start)/float(iter)
operation = (X + '.' + op + '(' + Y + ')').center(17)
print(fmt % (format,operation,msec_per_it))
def bench_sort(self):
"""sort CSR column indices"""
matrices = []
matrices.append(('Rand10', 1e4, 10))
matrices.append(('Rand25', 1e4, 25))
matrices.append(('Rand50', 1e4, 50))
matrices.append(('Rand100', 1e4, 100))
matrices.append(('Rand200', 1e4, 200))
print()
print(' Sparse Matrix Index Sorting')
print('=====================================================================')
print(' type | name | shape | nnz | time (msec) ')
print('---------------------------------------------------------------------')
fmt = ' %3s | %12s | %20s | %8d | %6.2f '
for name,N,K in matrices:
N = int(N)
A = random_sparse(N,N,K)
start = time.clock()
iter = 0
while iter < 5 and time.clock() - start < 1:
A.has_sorted_indices = False
A.indices[:2] = 2,1
A.sort_indices()
iter += 1
end = time.clock()
name = name.center(12)
shape = ("%s" % (A.shape,)).center(20)
print(fmt % (A.format,name,shape,A.nnz,1e3*(end-start)/float(iter)))
def bench_matvec(self):
matrices = []
matrices.append(('Identity', sparse.eye(10**4,format='dia')))
matrices.append(('Identity', sparse.eye(10**4,format='csr')))
matrices.append(('Poisson5pt', poisson2d(300,format='lil')))
matrices.append(('Poisson5pt', poisson2d(300,format='dok')))
matrices.append(('Poisson5pt', poisson2d(300,format='dia')))
matrices.append(('Poisson5pt', poisson2d(300,format='coo')))
matrices.append(('Poisson5pt', poisson2d(300,format='csr')))
matrices.append(('Poisson5pt', poisson2d(300,format='csc')))
matrices.append(('Poisson5pt', poisson2d(300,format='bsr')))
A = sparse.kron(poisson2d(150),ones((2,2))).tobsr(blocksize=(2,2))
matrices.append(('Block2x2', A.tocsr()))
matrices.append(('Block2x2', A))
A = sparse.kron(poisson2d(100),ones((3,3))).tobsr(blocksize=(3,3))
matrices.append(('Block3x3', A.tocsr()))
matrices.append(('Block3x3', A))
print()
print(' Sparse Matrix Vector Product')
print('==================================================================')
print(' type | name | shape | nnz | MFLOPs ')
print('------------------------------------------------------------------')
fmt = ' %3s | %12s | %20s | %8d | %6.1f '
for name,A in matrices:
x = ones(A.shape[1],dtype=A.dtype)
y = A*x # warmup
start = time.clock()
iter = 0
while iter < 5 or time.clock() < start + 1:
y = A*x
iter += 1
end = time.clock()
del y
name = name.center(12)
shape = ("%s" % (A.shape,)).center(20)
MFLOPs = (2*A.nnz*iter/(end-start))/float(1e6)
print(fmt % (A.format,name,shape,A.nnz,MFLOPs))
def bench_matvecs(self):
matrices = []
matrices.append(('Poisson5pt', poisson2d(300,format='dia')))
matrices.append(('Poisson5pt', poisson2d(300,format='coo')))
matrices.append(('Poisson5pt', poisson2d(300,format='csr')))
matrices.append(('Poisson5pt', poisson2d(300,format='csc')))
matrices.append(('Poisson5pt', poisson2d(300,format='bsr')))
n_vecs = 10
print()
print(' Sparse Matrix (Block) Vector Product')
print(' Blocksize = %d' % (n_vecs,))
print('==================================================================')
print(' type | name | shape | nnz | MFLOPs ')
print('------------------------------------------------------------------')
fmt = ' %3s | %12s | %20s | %8d | %6.1f '
for name,A in matrices:
x = ones((A.shape[1],10),dtype=A.dtype)
y = A*x # warmup
start = time.clock()
iter = 0
while iter < 5 or time.clock() < start + 1:
y = A*x
iter += 1
end = time.clock()
del y
name = name.center(12)
shape = ("%s" % (A.shape,)).center(20)
MFLOPs = (2*n_vecs*A.nnz*iter/(end-start))/float(1e6)
print(fmt % (A.format,name,shape,A.nnz,MFLOPs))
def bench_construction(self):
"""build matrices by inserting single values"""
matrices = []
matrices.append(('Empty',csr_matrix((10000,10000))))
matrices.append(('Identity',sparse.eye(10000)))
matrices.append(('Poisson5pt', poisson2d(100)))
print()
print(' Sparse Matrix Construction')
print('====================================================================')
print(' type | name | shape | nnz | time (sec) ')
print('--------------------------------------------------------------------')
fmt = ' %3s | %12s | %20s | %8d | %6.4f '
for name,A in matrices:
A = A.tocoo()
for format in ['lil','dok']:
start = time.clock()
iter = 0
while time.clock() < start + 0.5:
T = eval(format + '_matrix')(A.shape)
for i,j,v in zip(A.row,A.col,A.data):
T[i,j] = v
iter += 1
end = time.clock()
del T
name = name.center(12)
shape = ("%s" % (A.shape,)).center(20)
print(fmt % (format,name,shape,A.nnz,(end-start)/float(iter)))
def bench_conversion(self):
A = poisson2d(100)
formats = ['csr','csc','coo','dia','lil','dok']
print()
print(' Sparse Matrix Conversion')
print('====================================================================')
print(' format | tocsr() | tocsc() | tocoo() | todia() | tolil() | todok() ')
print('--------------------------------------------------------------------')
for fromfmt in formats:
base = getattr(A,'to' + fromfmt)()
times = []
for tofmt in formats:
try:
fn = getattr(base,'to' + tofmt)
except:
times.append(None)
else:
x = fn() # warmup
start = time.clock()
iter = 0
while time.clock() < start + 0.2:
x = fn()
iter += 1
end = time.clock()
del x
times.append((end - start)/float(iter))
output = " %3s " % fromfmt
for t in times:
if t is None:
output += '| n/a '
else:
output += '| %5.1fms ' % (1000*t)
print(output)
def _getset_bench(self, kernel, formats):
print('==========================================================')
print(' N | s.patt. |' + ''.join(' %7s |' % fmt for fmt in formats))
print('----------------------------------------------------------')
A = rand(1000, 1000, density=1e-5)
for N in [1, 10, 100, 1000, 10000]:
for spat in [False, True]:
# indices to assign to
i, j = [], []
while len(i) < N:
n = N - len(i)
ip = np.random.randint(0, A.shape[0], size=n)
jp = np.random.randint(0, A.shape[1], size=n)
i = np.r_[i, ip]
j = np.r_[j, jp]
v = np.random.rand(n)
if N == 1:
i = int(i)
j = int(j)
v = float(v)
times = []
for fmt in formats:
if fmt == 'dok' and N > 500:
times.append(None)
continue
base = A.asformat(fmt)
m = base.copy()
if spat:
kernel(m, i, j, v)
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
iter = 0
total_time = 0
while total_time < 0.2 and iter < 5000:
if not spat:
m = base.copy()
a = time.clock()
kernel(m, i, j, v)
total_time += time.clock() - a
iter += 1
times.append(total_time/float(iter))
output = " %6d | %7s " % (N, "same" if spat else "change")
for t in times:
if t is None:
output += '| n/a '
else:
output += '| %5.2fms ' % (1e3*t)
print(output)
def bench_setitem(self):
def kernel(A, i, j, v):
A[i, j] = v
print()
print(' Sparse Matrix fancy __setitem__')
self._getset_bench(kernel, ['csr', 'csc', 'lil', 'dok'])
def bench_getitem(self):
def kernel(A, i, j, v=None):
A[i, j]
print()
print(' Sparse Matrix fancy __getitem__')
self._getset_bench(kernel, ['csr', 'csc', 'lil'])
# class TestLarge(TestCase):
# def bench_large(self):
# # Create a 100x100 matrix with 100 non-zero elements
# # and play around with it
# #TODO move this out of Common since it doesn't use spmatrix
# random.seed(0)
# A = dok_matrix((100,100))
# for k in xrange(100):
# i = random.randrange(100)
# j = random.randrange(100)
# A[i,j] = 1.
# csr = A.tocsr()
# csc = A.tocsc()
# csc2 = csr.tocsc()
# coo = A.tocoo()
# csr2 = coo.tocsr()
# assert_array_equal(A.transpose().todense(), csr.transpose().todense())
# assert_array_equal(csc.todense(), csr.todense())
# assert_array_equal(csr.todense(), csr2.todense())
# assert_array_equal(csr2.todense().transpose(), coo.transpose().todense())
# assert_array_equal(csr2.todense(), csc2.todense())
# csr_plus_csc = csr + csc
# csc_plus_csr = csc + csr
# assert_array_equal(csr_plus_csc.todense(), (2*A).todense())
# assert_array_equal(csr_plus_csc.todense(), csc_plus_csr.todense())
if __name__ == "__main__":
run_module_suite()
|
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
from future.utils import lmap
standard_library.install_aliases()
__title__ = 'Interconnects'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2016) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
from hpOneView.resources.resource import ResourceClient
class Interconnects(object):
URI = '/rest/interconnects'
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
def get_all(self, start=0, count=-1, filter='', sort=''):
"""
Gets a paginated collection of interconnects that includes the ports.
In order to avoid a timeout on busy systems, the recommended maximum
value of count is 2.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all the items.
The actual number of items in the response may differ from the requested
count if the sum of start and count exceed the total number of items.
filter:
A general filter/query string to narrow the list of items returned. The
default is no filter - all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time, with the oldest entry first.
Returns:
list: A list of interconnects.
"""
return self._client.get_all(start, count, filter=filter, sort=sort)
def get_statistics(self, id_or_uri, port_name=''):
"""
Gets the statistics from an interconnect.
Args:
id_or_uri: Can be either the interconnect id or the interconnect uri.
port_name (str): A specific port name of an interconnect.
Returns:
dict: The statistics for the interconnect that matches id.
"""
uri = self._client.build_uri(id_or_uri) + "/statistics"
if port_name:
uri = uri + "/" + port_name
return self._client.get(uri)
def get_subport_statistics(self, id_or_uri, port_name, subport_number):
"""
Gets the subport statistics on an interconnect.
Args:
id_or_uri: Can be either the interconnect id or the interconnect uri.
port_name (str): A specific port name of an interconnect.
subport_number (int): The subport.
Returns:
dict: The statistics for the interconnect that matches id, port_name and subport_number.
"""
uri = self._client.build_uri(id_or_uri) + "/statistics/{0}/subport/{1}".format(port_name, subport_number)
return self._client.get(uri)
def get_name_servers(self, id_or_uri):
"""
Gets the named servers for an interconnect.
Args:
id_or_uri: Can be either the interconnect id or the interconnect uri.
Returns:
dict: the name servers for an interconnect.
"""
uri = self._client.build_uri(id_or_uri) + "/nameServers"
return self._client.get(uri)
def get(self, id_or_uri):
"""
Gets the Interconnect by ID or by uri.
Args:
id_or_uri: Could be either the interconnect id or the interconnect uri.
Returns:
dict
"""
return self._client.get(id_or_uri)
def get_by(self, field, value):
"""
Gets all interconnects that matches the filter
The search is case insensitive
Args:
field: Field name to filter.
value: Value to filter.
Returns:
list: A list of interconnects.
"""
return self._client.get_by(field, value)
def get_by_name(self, name):
"""
Retrieve an Interconnect by its name.
Args:
name: Interconnect name.
Returns:
dict: Interconnect.
"""
return self._client.get_by_name(name)
def patch(self, id_or_uri, operation, path, value, timeout=-1):
"""
Performs a specific patch operation for the given interconnect.
There are a limited set of interconnect properties which may be changed.
They are: 'powerState', 'uidState', 'deviceResetState'.
If the interconnect supports the operation, the operation is performed and
a task is returned through which the results are reported.
Args:
id_or_uri:
Could be either the interconnect id or the interconnect uri
operation:
The type of operation: one of "add", "copy", "move", "remove", "replace", or "test".
path:
The JSON path the operation is to use. The exact meaning depends on the type of operation.
value:
The value to add or replace for "add" and "replace" operations, or the value to compare against
for a "test" operation. Not used by "copy", "move", or "remove".
Returns:
dict
"""
return self._client.patch(id_or_uri, operation, path, value, timeout)
def update_port(self, port_information, id_or_uri, timeout=-1):
"""
Updates an interconnect port.
Args:
id_or_uri: Could be either the interconnect id or the interconnect uri.
port_information (dict): object to update
timeout: Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
dict: The interconnect.
"""
uri = self._client.build_uri(id_or_uri) + "/ports"
return self._client.update(port_information, uri, timeout)
def update_ports(self, ports, id_or_uri, timeout=-1):
"""
Updates the interconnect ports.
Args:
id_or_uri: Could be either the interconnect id or the interconnect uri.
ports (list): Ports to update.
timeout: Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
dict: The interconnect.
"""
resources = lmap(self.__port_with_default_type, ports)
uri = self._client.build_uri(id_or_uri) + "/update-ports"
return self._client.update(resources, uri, timeout)
def __port_with_default_type(self, port):
data = dict(type="port")
data.update(port)
return data
def reset_port_protection(self, id_or_uri, timeout=-1):
"""
Triggers a reset of port protection.
Cause port protection to be reset on all the interconnects of the logical interconnect that matches ID.
Args:
id_or_uri: Could be either the interconnect id or the interconnect uri.
timeout: Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
dict: The interconnect.
"""
uri = self._client.build_uri(id_or_uri) + "/resetportprotection"
return self._client.update_with_zero_body(uri, timeout)
|
|
# generated by pgen/main.py
import token
SkulptOpMap = {
"(": token.LPAR,
")": token.RPAR,
"[": token.LSQB,
"]": token.RSQB,
":": token.COLON,
",": token.COMMA,
";": token.SEMI,
"+": token.PLUS,
"-": token.MINUS,
"*": token.STAR,
"/": token.SLASH,
"|": token.VBAR,
"&": token.AMPER,
"<": token.LESS,
">": token.GREATER,
"=": token.EQUAL,
".": token.DOT,
"%": token.PERCENT,
"`": token.BACKQUOTE,
"{": token.LBRACE,
"}": token.RBRACE,
"@": token.AT,
"==": token.EQEQUAL,
"!=": token.NOTEQUAL,
"<>": token.NOTEQUAL,
"<=": token.LESSEQUAL,
">=": token.GREATEREQUAL,
"~": token.TILDE,
"^": token.CIRCUMFLEX,
"<<": token.LEFTSHIFT,
">>": token.RIGHTSHIFT,
"**": token.DOUBLESTAR,
"+=": token.PLUSEQUAL,
"-=": token.MINEQUAL,
"*=": token.STAREQUAL,
"/=": token.SLASHEQUAL,
"%=": token.PERCENTEQUAL,
"&=": token.AMPEREQUAL,
"|=": token.VBAREQUAL,
"^=": token.CIRCUMFLEXEQUAL,
"<<=": token.LEFTSHIFTEQUAL,
">>=": token.RIGHTSHIFTEQUAL,
"**=": token.DOUBLESTAREQUAL,
"//": token.DOUBLESLASH,
"//=": token.DOUBLESLASHEQUAL,
"->": token.RARROW
}
class Grammar:
symbol2number = \
{'and_expr': 257,
'and_test': 258,
'arglist': 259,
'argument': 260,
'arith_expr': 261,
'assert_stmt': 262,
'atom': 263,
'augassign': 264,
'break_stmt': 265,
'classdef': 266,
'comp_for': 267,
'comp_if': 268,
'comp_iter': 269,
'comp_op': 270,
'comparison': 271,
'compound_stmt': 272,
'continue_stmt': 273,
'decorated': 274,
'decorator': 275,
'decorators': 276,
'del_stmt': 277,
'dictsetmaker': 278,
'dotted_as_name': 279,
'dotted_as_names': 280,
'dotted_name': 281,
'eval_input': 282,
'except_clause': 283,
'exec_stmt': 284,
'expr': 285,
'expr_stmt': 286,
'exprlist': 287,
'factor': 288,
'file_input': 256,
'flow_stmt': 289,
'for_stmt': 290,
'funcdef': 291,
'gen_for': 292,
'gen_if': 293,
'gen_iter': 294,
'global_stmt': 295,
'if_stmt': 296,
'import_as_name': 297,
'import_as_names': 298,
'import_from': 299,
'import_name': 300,
'import_stmt': 301,
'lambdef': 302,
'listmaker': 303,
'not_test': 304,
'old_lambdef': 305,
'old_test': 306,
'or_test': 307,
'parameters': 308,
'pass_stmt': 309,
'power': 310,
'print_stmt': 311,
'raise_stmt': 312,
'return_stmt': 313,
'shift_expr': 314,
'simple_stmt': 315,
'single_input': 316,
'sliceop': 317,
'small_stmt': 318,
'stmt': 319,
'subscript': 320,
'subscriptlist': 321,
'suite': 322,
'term': 323,
'test': 324,
'testlist': 325,
'testlist1': 326,
'testlist_gexp': 327,
'testlist_safe': 328,
'trailer': 329,
'try_stmt': 330,
'varargslist': 331,
'vfpdef': 332,
'vfplist': 333,
'while_stmt': 334,
'with_item': 335,
'with_stmt': 336,
'with_var': 337,
'xor_expr': 338,
'yield_expr': 339,
'yield_stmt': 340}
number2symbol = \
{256: 'file_input',
257: 'and_expr',
258: 'and_test',
259: 'arglist',
260: 'argument',
261: 'arith_expr',
262: 'assert_stmt',
263: 'atom',
264: 'augassign',
265: 'break_stmt',
266: 'classdef',
267: 'comp_for',
268: 'comp_if',
269: 'comp_iter',
270: 'comp_op',
271: 'comparison',
272: 'compound_stmt',
273: 'continue_stmt',
274: 'decorated',
275: 'decorator',
276: 'decorators',
277: 'del_stmt',
278: 'dictsetmaker',
279: 'dotted_as_name',
280: 'dotted_as_names',
281: 'dotted_name',
282: 'eval_input',
283: 'except_clause',
284: 'exec_stmt',
285: 'expr',
286: 'expr_stmt',
287: 'exprlist',
288: 'factor',
289: 'flow_stmt',
290: 'for_stmt',
291: 'funcdef',
292: 'gen_for',
293: 'gen_if',
294: 'gen_iter',
295: 'global_stmt',
296: 'if_stmt',
297: 'import_as_name',
298: 'import_as_names',
299: 'import_from',
300: 'import_name',
301: 'import_stmt',
302: 'lambdef',
303: 'listmaker',
304: 'not_test',
305: 'old_lambdef',
306: 'old_test',
307: 'or_test',
308: 'parameters',
309: 'pass_stmt',
310: 'power',
311: 'print_stmt',
312: 'raise_stmt',
313: 'return_stmt',
314: 'shift_expr',
315: 'simple_stmt',
316: 'single_input',
317: 'sliceop',
318: 'small_stmt',
319: 'stmt',
320: 'subscript',
321: 'subscriptlist',
322: 'suite',
323: 'term',
324: 'test',
325: 'testlist',
326: 'testlist1',
327: 'testlist_gexp',
328: 'testlist_safe',
329: 'trailer',
330: 'try_stmt',
331: 'varargslist',
332: 'vfpdef',
333: 'vfplist',
334: 'while_stmt',
335: 'with_item',
336: 'with_stmt',
337: 'with_var',
338: 'xor_expr',
339: 'yield_expr',
340: 'yield_stmt'}
dfas = \
{256: ([[(1, 0), (2, 1), (3, 0)], [(0, 1)]],
{1: 1,
2: 1,
4: 1,
5: 1,
6: 1,
7: 1,
8: 1,
9: 1,
10: 1,
11: 1,
12: 1,
13: 1,
14: 1,
15: 1,
16: 1,
17: 1,
18: 1,
19: 1,
20: 1,
21: 1,
22: 1,
23: 1,
24: 1,
25: 1,
26: 1,
27: 1,
28: 1,
29: 1,
30: 1,
31: 1,
32: 1,
33: 1,
34: 1,
35: 1,
36: 1,
37: 1}),
257: ([[(38, 1)], [(39, 0), (0, 1)]],
{5: 1, 6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 30: 1, 31: 1, 37: 1}),
258: ([[(40, 1)], [(41, 0), (0, 1)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
259: ([[(42, 1), (43, 2), (44, 3)],
[(45, 4)],
[(46, 5), (0, 2)],
[(45, 6)],
[(46, 7), (0, 4)],
[(42, 1), (43, 2), (44, 3), (0, 5)],
[(0, 6)],
[(43, 4), (44, 3)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1,
42: 1,
44: 1}),
260: ([[(45, 1)], [(47, 2), (48, 3), (0, 1)], [(0, 2)], [(45, 2)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
261: ([[(49, 1)], [(30, 0), (37, 0), (0, 1)]],
{5: 1, 6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 30: 1, 31: 1, 37: 1}),
262: ([[(11, 1)], [(45, 2)], [(46, 3), (0, 2)], [(45, 4)], [(0, 4)]],
{11: 1}),
263: ([[(18, 1), (6, 2), (31, 5), (13, 4), (20, 3), (8, 6), (27, 2)],
[(18, 1), (0, 1)],
[(0, 2)],
[(50, 7), (51, 2)],
[(52, 2), (53, 8), (54, 8)],
[(55, 9), (56, 2)],
[(57, 10)],
[(51, 2)],
[(52, 2)],
[(56, 2)],
[(8, 2)]],
{6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 31: 1}),
264: ([[(58, 1),
(59, 1),
(60, 1),
(61, 1),
(62, 1),
(63, 1),
(64, 1),
(65, 1),
(66, 1),
(67, 1),
(68, 1),
(69, 1)],
[(0, 1)]],
{58: 1,
59: 1,
60: 1,
61: 1,
62: 1,
63: 1,
64: 1,
65: 1,
66: 1,
67: 1,
68: 1,
69: 1}),
265: ([[(14, 1)], [(0, 1)]], {14: 1}),
266: ([[(21, 1)],
[(27, 2)],
[(70, 3), (13, 4)],
[(71, 5)],
[(52, 6), (72, 7)],
[(0, 5)],
[(70, 3)],
[(52, 6)]],
{21: 1}),
267: ([[(33, 1)],
[(73, 2)],
[(74, 3)],
[(75, 4)],
[(76, 5), (0, 4)],
[(0, 5)]],
{33: 1}),
268: ([[(35, 1)], [(77, 2)], [(76, 3), (0, 2)], [(0, 3)]], {35: 1}),
269: ([[(78, 1), (79, 1)], [(0, 1)]], {33: 1, 35: 1}),
270: ([[(80, 1),
(81, 1),
(19, 2),
(80, 1),
(82, 1),
(74, 1),
(83, 1),
(84, 3),
(85, 1),
(86, 1)],
[(0, 1)],
[(74, 1)],
[(19, 1), (0, 3)]],
{19: 1, 74: 1, 80: 1, 81: 1, 82: 1, 83: 1, 84: 1, 85: 1, 86: 1}),
271: ([[(87, 1)], [(88, 0), (0, 1)]],
{5: 1, 6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 30: 1, 31: 1, 37: 1}),
272: ([[(89, 1),
(90, 1),
(91, 1),
(92, 1),
(93, 1),
(94, 1),
(95, 1),
(96, 1)],
[(0, 1)]],
{7: 1, 9: 1, 16: 1, 21: 1, 26: 1, 33: 1, 35: 1, 36: 1}),
273: ([[(15, 1)], [(0, 1)]], {15: 1}),
274: ([[(97, 1)], [(95, 2), (92, 2)], [(0, 2)]], {16: 1}),
275: ([[(16, 1)],
[(98, 2)],
[(13, 4), (1, 3)],
[(0, 3)],
[(52, 5), (99, 6)],
[(1, 3)],
[(52, 5)]],
{16: 1}),
276: ([[(100, 1)], [(100, 1), (0, 1)]], {16: 1}),
277: ([[(28, 1)], [(73, 2)], [(0, 2)]], {28: 1}),
278: ([[(45, 1)],
[(70, 2), (79, 3), (46, 4), (0, 1)],
[(45, 5)],
[(0, 3)],
[(45, 6), (0, 4)],
[(79, 3), (46, 7), (0, 5)],
[(46, 4), (0, 6)],
[(45, 8), (0, 7)],
[(70, 9)],
[(45, 10)],
[(46, 7), (0, 10)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
279: ([[(98, 1)], [(101, 2), (0, 1)], [(27, 3)], [(0, 3)]], {27: 1}),
280: ([[(102, 1)], [(46, 0), (0, 1)]], {27: 1}),
281: ([[(27, 1)], [(103, 0), (0, 1)]], {27: 1}),
282: ([[(72, 1)], [(1, 1), (2, 2)], [(0, 2)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
283: ([[(104, 1)],
[(45, 2), (0, 1)],
[(101, 3), (46, 3), (0, 2)],
[(45, 4)],
[(0, 4)]],
{104: 1}),
284: ([[(25, 1)],
[(87, 2)],
[(74, 3), (0, 2)],
[(45, 4)],
[(46, 5), (0, 4)],
[(45, 6)],
[(0, 6)]],
{25: 1}),
285: ([[(105, 1)], [(106, 0), (0, 1)]],
{5: 1, 6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 30: 1, 31: 1, 37: 1}),
286: ([[(72, 1)],
[(107, 2), (48, 3), (0, 1)],
[(72, 4), (54, 4)],
[(72, 5), (54, 5)],
[(0, 4)],
[(48, 3), (0, 5)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
287: ([[(87, 1)], [(46, 2), (0, 1)], [(87, 1), (0, 2)]],
{5: 1, 6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 30: 1, 31: 1, 37: 1}),
288: ([[(30, 1), (108, 2), (37, 1), (5, 1)], [(109, 2)], [(0, 2)]],
{5: 1, 6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 30: 1, 31: 1, 37: 1}),
289: ([[(110, 1), (111, 1), (112, 1), (113, 1), (114, 1)], [(0, 1)]],
{10: 1, 12: 1, 14: 1, 15: 1, 17: 1}),
290: ([[(33, 1)],
[(73, 2)],
[(74, 3)],
[(72, 4)],
[(70, 5)],
[(71, 6)],
[(115, 7), (0, 6)],
[(70, 8)],
[(71, 9)],
[(0, 9)]],
{33: 1}),
291: ([[(7, 1)], [(27, 2)], [(116, 3)], [(70, 4)], [(71, 5)], [(0, 5)]],
{7: 1}),
292: ([[(33, 1)],
[(73, 2)],
[(74, 3)],
[(117, 4)],
[(118, 5), (0, 4)],
[(0, 5)]],
{33: 1}),
293: ([[(35, 1)], [(77, 2)], [(118, 3), (0, 2)], [(0, 3)]], {35: 1}),
294: ([[(47, 1), (119, 1)], [(0, 1)]], {33: 1, 35: 1}),
295: ([[(32, 1), (24, 1)], [(27, 2)], [(46, 1), (0, 2)]], {24: 1, 32: 1}),
296: ([[(35, 1)],
[(45, 2)],
[(70, 3)],
[(71, 4)],
[(115, 5), (120, 1), (0, 4)],
[(70, 6)],
[(71, 7)],
[(0, 7)]],
{35: 1}),
297: ([[(27, 1)], [(101, 2), (0, 1)], [(27, 3)], [(0, 3)]], {27: 1}),
298: ([[(121, 1)], [(46, 2), (0, 1)], [(121, 1), (0, 2)]], {27: 1}),
299: ([[(34, 1)],
[(98, 2), (103, 3)],
[(4, 4)],
[(98, 2), (4, 4), (103, 3)],
[(122, 5), (42, 5), (13, 6)],
[(0, 5)],
[(122, 7)],
[(52, 5)]],
{34: 1}),
300: ([[(4, 1)], [(123, 2)], [(0, 2)]], {4: 1}),
301: ([[(124, 1), (125, 1)], [(0, 1)]], {4: 1, 34: 1}),
302: ([[(22, 1)], [(70, 2), (126, 3)], [(45, 4)], [(70, 2)], [(0, 4)]],
{22: 1}),
303: ([[(45, 1)],
[(79, 2), (46, 3), (0, 1)],
[(0, 2)],
[(45, 4), (0, 3)],
[(46, 3), (0, 4)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
304: ([[(19, 1), (127, 2)], [(40, 2)], [(0, 2)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
305: ([[(22, 1)], [(70, 2), (126, 3)], [(77, 4)], [(70, 2)], [(0, 4)]],
{22: 1}),
306: ([[(128, 1), (117, 1)], [(0, 1)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
307: ([[(129, 1)], [(130, 0), (0, 1)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
308: ([[(13, 1)], [(52, 2), (126, 3)], [(0, 2)], [(52, 2)]], {13: 1}),
309: ([[(29, 1)], [(0, 1)]], {29: 1}),
310: ([[(131, 1)], [(132, 1), (44, 2), (0, 1)], [(109, 3)], [(0, 3)]],
{6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 31: 1}),
311: ([[(23, 1)],
[(45, 2), (133, 3), (0, 1)],
[(46, 4), (0, 2)],
[(45, 5)],
[(45, 2), (0, 4)],
[(46, 6), (0, 5)],
[(45, 7)],
[(46, 8), (0, 7)],
[(45, 7), (0, 8)]],
{23: 1}),
312: ([[(17, 1)],
[(45, 2), (0, 1)],
[(34, 3), (46, 4), (0, 2)],
[(45, 5)],
[(45, 6)],
[(0, 5)],
[(46, 3), (0, 6)]],
{17: 1}),
313: ([[(10, 1)], [(72, 2), (0, 1)], [(0, 2)]], {10: 1}),
314: ([[(134, 1)], [(133, 0), (135, 0), (0, 1)]],
{5: 1, 6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 30: 1, 31: 1, 37: 1}),
315: ([[(136, 1)], [(1, 2), (137, 3)], [(0, 2)], [(136, 1), (1, 2)]],
{4: 1,
5: 1,
6: 1,
8: 1,
10: 1,
11: 1,
12: 1,
13: 1,
14: 1,
15: 1,
17: 1,
18: 1,
19: 1,
20: 1,
22: 1,
23: 1,
24: 1,
25: 1,
27: 1,
28: 1,
29: 1,
30: 1,
31: 1,
32: 1,
34: 1,
37: 1}),
316: ([[(138, 1), (1, 1), (139, 2)], [(0, 1)], [(1, 1)]],
{1: 1,
4: 1,
5: 1,
6: 1,
7: 1,
8: 1,
9: 1,
10: 1,
11: 1,
12: 1,
13: 1,
14: 1,
15: 1,
16: 1,
17: 1,
18: 1,
19: 1,
20: 1,
21: 1,
22: 1,
23: 1,
24: 1,
25: 1,
26: 1,
27: 1,
28: 1,
29: 1,
30: 1,
31: 1,
32: 1,
33: 1,
34: 1,
35: 1,
36: 1,
37: 1}),
317: ([[(70, 1)], [(45, 2), (0, 1)], [(0, 2)]], {70: 1}),
318: ([[(140, 1),
(141, 1),
(142, 1),
(143, 1),
(144, 1),
(145, 1),
(146, 1),
(147, 1),
(148, 1)],
[(0, 1)]],
{4: 1,
5: 1,
6: 1,
8: 1,
10: 1,
11: 1,
12: 1,
13: 1,
14: 1,
15: 1,
17: 1,
18: 1,
19: 1,
20: 1,
22: 1,
23: 1,
24: 1,
25: 1,
27: 1,
28: 1,
29: 1,
30: 1,
31: 1,
32: 1,
34: 1,
37: 1}),
319: ([[(138, 1), (139, 1)], [(0, 1)]],
{4: 1,
5: 1,
6: 1,
7: 1,
8: 1,
9: 1,
10: 1,
11: 1,
12: 1,
13: 1,
14: 1,
15: 1,
16: 1,
17: 1,
18: 1,
19: 1,
20: 1,
21: 1,
22: 1,
23: 1,
24: 1,
25: 1,
26: 1,
27: 1,
28: 1,
29: 1,
30: 1,
31: 1,
32: 1,
33: 1,
34: 1,
35: 1,
36: 1,
37: 1}),
320: ([[(45, 1), (70, 2), (103, 3)],
[(70, 2), (0, 1)],
[(45, 4), (149, 5), (0, 2)],
[(103, 6)],
[(149, 5), (0, 4)],
[(0, 5)],
[(103, 5)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1,
70: 1,
103: 1}),
321: ([[(150, 1)], [(46, 2), (0, 1)], [(150, 1), (0, 2)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1,
70: 1,
103: 1}),
322: ([[(138, 1), (1, 2)],
[(0, 1)],
[(151, 3)],
[(3, 4)],
[(152, 1), (3, 4)]],
{1: 1,
4: 1,
5: 1,
6: 1,
8: 1,
10: 1,
11: 1,
12: 1,
13: 1,
14: 1,
15: 1,
17: 1,
18: 1,
19: 1,
20: 1,
22: 1,
23: 1,
24: 1,
25: 1,
27: 1,
28: 1,
29: 1,
30: 1,
31: 1,
32: 1,
34: 1,
37: 1}),
323: ([[(109, 1)], [(153, 0), (42, 0), (154, 0), (155, 0), (0, 1)]],
{5: 1, 6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 30: 1, 31: 1, 37: 1}),
324: ([[(117, 1), (156, 2)],
[(35, 3), (0, 1)],
[(0, 2)],
[(117, 4)],
[(115, 5)],
[(45, 2)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
325: ([[(45, 1)], [(46, 2), (0, 1)], [(45, 1), (0, 2)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
326: ([[(45, 1)], [(46, 0), (0, 1)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
327: ([[(45, 1)],
[(47, 2), (46, 3), (0, 1)],
[(0, 2)],
[(45, 4), (0, 3)],
[(46, 3), (0, 4)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
328: ([[(77, 1)],
[(46, 2), (0, 1)],
[(77, 3)],
[(46, 4), (0, 3)],
[(77, 3), (0, 4)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
329: ([[(13, 1), (103, 2), (31, 3)],
[(52, 4), (99, 5)],
[(27, 4)],
[(157, 6)],
[(0, 4)],
[(52, 4)],
[(56, 4)]],
{13: 1, 31: 1, 103: 1}),
330: ([[(9, 1)],
[(70, 2)],
[(71, 3)],
[(158, 4), (159, 5)],
[(70, 6)],
[(70, 7)],
[(71, 8)],
[(71, 9)],
[(158, 4), (115, 10), (159, 5), (0, 8)],
[(0, 9)],
[(70, 11)],
[(71, 12)],
[(159, 5), (0, 12)]],
{9: 1}),
331: ([[(42, 1), (44, 2), (160, 3)],
[(27, 4), (46, 5), (0, 1)],
[(27, 6)],
[(48, 7), (46, 8), (0, 3)],
[(46, 5), (0, 4)],
[(27, 9), (44, 2)],
[(0, 6)],
[(45, 10)],
[(42, 1), (44, 2), (160, 3), (0, 8)],
[(48, 11), (46, 5), (0, 9)],
[(46, 8), (0, 10)],
[(45, 4)]],
{13: 1, 27: 1, 42: 1, 44: 1}),
332: ([[(13, 1), (27, 2)], [(161, 3)], [(0, 2)], [(52, 2)]], {13: 1, 27: 1}),
333: ([[(160, 1)], [(46, 2), (0, 1)], [(160, 1), (0, 2)]], {13: 1, 27: 1}),
334: ([[(26, 1)],
[(45, 2)],
[(70, 3)],
[(71, 4)],
[(115, 5), (0, 4)],
[(70, 6)],
[(71, 7)],
[(0, 7)]],
{26: 1}),
335: ([[(45, 1)], [(101, 2), (0, 1)], [(87, 3)], [(0, 3)]],
{5: 1,
6: 1,
8: 1,
13: 1,
18: 1,
19: 1,
20: 1,
22: 1,
27: 1,
30: 1,
31: 1,
37: 1}),
336: ([[(36, 1)],
[(45, 2)],
[(70, 3), (162, 4)],
[(71, 5)],
[(70, 3)],
[(0, 5)]],
{36: 1}),
337: ([[(101, 1)], [(87, 2)], [(0, 2)]], {101: 1}),
338: ([[(163, 1)], [(164, 0), (0, 1)]],
{5: 1, 6: 1, 8: 1, 13: 1, 18: 1, 20: 1, 27: 1, 30: 1, 31: 1, 37: 1}),
339: ([[(12, 1)], [(72, 2), (0, 1)], [(0, 2)]], {12: 1}),
340: ([[(54, 1)], [(0, 1)]], {12: 1})}
states = \
[[[(1, 0), (2, 1), (3, 0)], [(0, 1)]],
[[(38, 1)], [(39, 0), (0, 1)]],
[[(40, 1)], [(41, 0), (0, 1)]],
[[(42, 1), (43, 2), (44, 3)],
[(45, 4)],
[(46, 5), (0, 2)],
[(45, 6)],
[(46, 7), (0, 4)],
[(42, 1), (43, 2), (44, 3), (0, 5)],
[(0, 6)],
[(43, 4), (44, 3)]],
[[(45, 1)], [(47, 2), (48, 3), (0, 1)], [(0, 2)], [(45, 2)]],
[[(49, 1)], [(30, 0), (37, 0), (0, 1)]],
[[(11, 1)], [(45, 2)], [(46, 3), (0, 2)], [(45, 4)], [(0, 4)]],
[[(18, 1), (6, 2), (31, 5), (13, 4), (20, 3), (8, 6), (27, 2)],
[(18, 1), (0, 1)],
[(0, 2)],
[(50, 7), (51, 2)],
[(52, 2), (53, 8), (54, 8)],
[(55, 9), (56, 2)],
[(57, 10)],
[(51, 2)],
[(52, 2)],
[(56, 2)],
[(8, 2)]],
[[(58, 1),
(59, 1),
(60, 1),
(61, 1),
(62, 1),
(63, 1),
(64, 1),
(65, 1),
(66, 1),
(67, 1),
(68, 1),
(69, 1)],
[(0, 1)]],
[[(14, 1)], [(0, 1)]],
[[(21, 1)],
[(27, 2)],
[(70, 3), (13, 4)],
[(71, 5)],
[(52, 6), (72, 7)],
[(0, 5)],
[(70, 3)],
[(52, 6)]],
[[(33, 1)], [(73, 2)], [(74, 3)], [(75, 4)], [(76, 5), (0, 4)], [(0, 5)]],
[[(35, 1)], [(77, 2)], [(76, 3), (0, 2)], [(0, 3)]],
[[(78, 1), (79, 1)], [(0, 1)]],
[[(80, 1),
(81, 1),
(19, 2),
(80, 1),
(82, 1),
(74, 1),
(83, 1),
(84, 3),
(85, 1),
(86, 1)],
[(0, 1)],
[(74, 1)],
[(19, 1), (0, 3)]],
[[(87, 1)], [(88, 0), (0, 1)]],
[[(89, 1), (90, 1), (91, 1), (92, 1), (93, 1), (94, 1), (95, 1), (96, 1)],
[(0, 1)]],
[[(15, 1)], [(0, 1)]],
[[(97, 1)], [(95, 2), (92, 2)], [(0, 2)]],
[[(16, 1)],
[(98, 2)],
[(13, 4), (1, 3)],
[(0, 3)],
[(52, 5), (99, 6)],
[(1, 3)],
[(52, 5)]],
[[(100, 1)], [(100, 1), (0, 1)]],
[[(28, 1)], [(73, 2)], [(0, 2)]],
[[(45, 1)],
[(70, 2), (79, 3), (46, 4), (0, 1)],
[(45, 5)],
[(0, 3)],
[(45, 6), (0, 4)],
[(79, 3), (46, 7), (0, 5)],
[(46, 4), (0, 6)],
[(45, 8), (0, 7)],
[(70, 9)],
[(45, 10)],
[(46, 7), (0, 10)]],
[[(98, 1)], [(101, 2), (0, 1)], [(27, 3)], [(0, 3)]],
[[(102, 1)], [(46, 0), (0, 1)]],
[[(27, 1)], [(103, 0), (0, 1)]],
[[(72, 1)], [(1, 1), (2, 2)], [(0, 2)]],
[[(104, 1)],
[(45, 2), (0, 1)],
[(101, 3), (46, 3), (0, 2)],
[(45, 4)],
[(0, 4)]],
[[(25, 1)],
[(87, 2)],
[(74, 3), (0, 2)],
[(45, 4)],
[(46, 5), (0, 4)],
[(45, 6)],
[(0, 6)]],
[[(105, 1)], [(106, 0), (0, 1)]],
[[(72, 1)],
[(107, 2), (48, 3), (0, 1)],
[(72, 4), (54, 4)],
[(72, 5), (54, 5)],
[(0, 4)],
[(48, 3), (0, 5)]],
[[(87, 1)], [(46, 2), (0, 1)], [(87, 1), (0, 2)]],
[[(30, 1), (108, 2), (37, 1), (5, 1)], [(109, 2)], [(0, 2)]],
[[(110, 1), (111, 1), (112, 1), (113, 1), (114, 1)], [(0, 1)]],
[[(33, 1)],
[(73, 2)],
[(74, 3)],
[(72, 4)],
[(70, 5)],
[(71, 6)],
[(115, 7), (0, 6)],
[(70, 8)],
[(71, 9)],
[(0, 9)]],
[[(7, 1)], [(27, 2)], [(116, 3)], [(70, 4)], [(71, 5)], [(0, 5)]],
[[(33, 1)], [(73, 2)], [(74, 3)], [(117, 4)], [(118, 5), (0, 4)], [(0, 5)]],
[[(35, 1)], [(77, 2)], [(118, 3), (0, 2)], [(0, 3)]],
[[(47, 1), (119, 1)], [(0, 1)]],
[[(32, 1), (24, 1)], [(27, 2)], [(46, 1), (0, 2)]],
[[(35, 1)],
[(45, 2)],
[(70, 3)],
[(71, 4)],
[(115, 5), (120, 1), (0, 4)],
[(70, 6)],
[(71, 7)],
[(0, 7)]],
[[(27, 1)], [(101, 2), (0, 1)], [(27, 3)], [(0, 3)]],
[[(121, 1)], [(46, 2), (0, 1)], [(121, 1), (0, 2)]],
[[(34, 1)],
[(98, 2), (103, 3)],
[(4, 4)],
[(98, 2), (4, 4), (103, 3)],
[(122, 5), (42, 5), (13, 6)],
[(0, 5)],
[(122, 7)],
[(52, 5)]],
[[(4, 1)], [(123, 2)], [(0, 2)]],
[[(124, 1), (125, 1)], [(0, 1)]],
[[(22, 1)], [(70, 2), (126, 3)], [(45, 4)], [(70, 2)], [(0, 4)]],
[[(45, 1)],
[(79, 2), (46, 3), (0, 1)],
[(0, 2)],
[(45, 4), (0, 3)],
[(46, 3), (0, 4)]],
[[(19, 1), (127, 2)], [(40, 2)], [(0, 2)]],
[[(22, 1)], [(70, 2), (126, 3)], [(77, 4)], [(70, 2)], [(0, 4)]],
[[(128, 1), (117, 1)], [(0, 1)]],
[[(129, 1)], [(130, 0), (0, 1)]],
[[(13, 1)], [(52, 2), (126, 3)], [(0, 2)], [(52, 2)]],
[[(29, 1)], [(0, 1)]],
[[(131, 1)], [(132, 1), (44, 2), (0, 1)], [(109, 3)], [(0, 3)]],
[[(23, 1)],
[(45, 2), (133, 3), (0, 1)],
[(46, 4), (0, 2)],
[(45, 5)],
[(45, 2), (0, 4)],
[(46, 6), (0, 5)],
[(45, 7)],
[(46, 8), (0, 7)],
[(45, 7), (0, 8)]],
[[(17, 1)],
[(45, 2), (0, 1)],
[(34, 3), (46, 4), (0, 2)],
[(45, 5)],
[(45, 6)],
[(0, 5)],
[(46, 3), (0, 6)]],
[[(10, 1)], [(72, 2), (0, 1)], [(0, 2)]],
[[(134, 1)], [(133, 0), (135, 0), (0, 1)]],
[[(136, 1)], [(1, 2), (137, 3)], [(0, 2)], [(136, 1), (1, 2)]],
[[(138, 1), (1, 1), (139, 2)], [(0, 1)], [(1, 1)]],
[[(70, 1)], [(45, 2), (0, 1)], [(0, 2)]],
[[(140, 1),
(141, 1),
(142, 1),
(143, 1),
(144, 1),
(145, 1),
(146, 1),
(147, 1),
(148, 1)],
[(0, 1)]],
[[(138, 1), (139, 1)], [(0, 1)]],
[[(45, 1), (70, 2), (103, 3)],
[(70, 2), (0, 1)],
[(45, 4), (149, 5), (0, 2)],
[(103, 6)],
[(149, 5), (0, 4)],
[(0, 5)],
[(103, 5)]],
[[(150, 1)], [(46, 2), (0, 1)], [(150, 1), (0, 2)]],
[[(138, 1), (1, 2)], [(0, 1)], [(151, 3)], [(3, 4)], [(152, 1), (3, 4)]],
[[(109, 1)], [(153, 0), (42, 0), (154, 0), (155, 0), (0, 1)]],
[[(117, 1), (156, 2)],
[(35, 3), (0, 1)],
[(0, 2)],
[(117, 4)],
[(115, 5)],
[(45, 2)]],
[[(45, 1)], [(46, 2), (0, 1)], [(45, 1), (0, 2)]],
[[(45, 1)], [(46, 0), (0, 1)]],
[[(45, 1)],
[(47, 2), (46, 3), (0, 1)],
[(0, 2)],
[(45, 4), (0, 3)],
[(46, 3), (0, 4)]],
[[(77, 1)],
[(46, 2), (0, 1)],
[(77, 3)],
[(46, 4), (0, 3)],
[(77, 3), (0, 4)]],
[[(13, 1), (103, 2), (31, 3)],
[(52, 4), (99, 5)],
[(27, 4)],
[(157, 6)],
[(0, 4)],
[(52, 4)],
[(56, 4)]],
[[(9, 1)],
[(70, 2)],
[(71, 3)],
[(158, 4), (159, 5)],
[(70, 6)],
[(70, 7)],
[(71, 8)],
[(71, 9)],
[(158, 4), (115, 10), (159, 5), (0, 8)],
[(0, 9)],
[(70, 11)],
[(71, 12)],
[(159, 5), (0, 12)]],
[[(42, 1), (44, 2), (160, 3)],
[(27, 4), (46, 5), (0, 1)],
[(27, 6)],
[(48, 7), (46, 8), (0, 3)],
[(46, 5), (0, 4)],
[(27, 9), (44, 2)],
[(0, 6)],
[(45, 10)],
[(42, 1), (44, 2), (160, 3), (0, 8)],
[(48, 11), (46, 5), (0, 9)],
[(46, 8), (0, 10)],
[(45, 4)]],
[[(13, 1), (27, 2)], [(161, 3)], [(0, 2)], [(52, 2)]],
[[(160, 1)], [(46, 2), (0, 1)], [(160, 1), (0, 2)]],
[[(26, 1)],
[(45, 2)],
[(70, 3)],
[(71, 4)],
[(115, 5), (0, 4)],
[(70, 6)],
[(71, 7)],
[(0, 7)]],
[[(45, 1)], [(101, 2), (0, 1)], [(87, 3)], [(0, 3)]],
[[(36, 1)], [(45, 2)], [(70, 3), (162, 4)], [(71, 5)], [(70, 3)], [(0, 5)]],
[[(101, 1)], [(87, 2)], [(0, 2)]],
[[(163, 1)], [(164, 0), (0, 1)]],
[[(12, 1)], [(72, 2), (0, 1)], [(0, 2)]],
[[(54, 1)], [(0, 1)]]]
labels = \
[(0, 'EMPTY'),
(4, None),
(0, None),
(319, None),
(1, 'import'),
(32, None),
(2, None),
(1, 'def'),
(25, None),
(1, 'try'),
(1, 'return'),
(1, 'assert'),
(1, 'yield'),
(7, None),
(1, 'break'),
(1, 'continue'),
(50, None),
(1, 'raise'),
(3, None),
(1, 'not'),
(26, None),
(1, 'class'),
(1, 'lambda'),
(1, 'print'),
(1, 'nonlocal'),
(1, 'exec'),
(1, 'while'),
(1, None),
(1, 'del'),
(1, 'pass'),
(15, None),
(9, None),
(1, 'global'),
(1, 'for'),
(1, 'from'),
(1, 'if'),
(1, 'with'),
(14, None),
(314, None),
(19, None),
(304, None),
(1, 'and'),
(16, None),
(260, None),
(36, None),
(324, None),
(12, None),
(292, None),
(22, None),
(323, None),
(278, None),
(27, None),
(8, None),
(327, None),
(339, None),
(303, None),
(10, None),
(326, None),
(46, None),
(39, None),
(41, None),
(47, None),
(42, None),
(43, None),
(37, None),
(44, None),
(49, None),
(40, None),
(38, None),
(45, None),
(11, None),
(322, None),
(325, None),
(287, None),
(1, 'in'),
(328, None),
(269, None),
(306, None),
(268, None),
(267, None),
(29, None),
(21, None),
(28, None),
(30, None),
(1, 'is'),
(31, None),
(20, None),
(285, None),
(270, None),
(330, None),
(296, None),
(290, None),
(266, None),
(336, None),
(334, None),
(291, None),
(274, None),
(276, None),
(281, None),
(259, None),
(275, None),
(1, 'as'),
(279, None),
(23, None),
(1, 'except'),
(338, None),
(18, None),
(264, None),
(310, None),
(288, None),
(265, None),
(273, None),
(312, None),
(313, None),
(340, None),
(1, 'else'),
(308, None),
(307, None),
(294, None),
(293, None),
(1, 'elif'),
(297, None),
(298, None),
(280, None),
(300, None),
(299, None),
(331, None),
(271, None),
(305, None),
(258, None),
(1, 'or'),
(263, None),
(329, None),
(35, None),
(261, None),
(34, None),
(318, None),
(13, None),
(315, None),
(272, None),
(289, None),
(277, None),
(286, None),
(309, None),
(311, None),
(262, None),
(284, None),
(295, None),
(301, None),
(317, None),
(320, None),
(5, None),
(6, None),
(48, None),
(17, None),
(24, None),
(302, None),
(321, None),
(283, None),
(1, 'finally'),
(332, None),
(333, None),
(337, None),
(257, None),
(33, None)]
keywords = \
{'and': 41,
'as': 101,
'assert': 11,
'break': 14,
'class': 21,
'continue': 15,
'def': 7,
'del': 28,
'elif': 120,
'else': 115,
'except': 104,
'exec': 25,
'finally': 159,
'for': 33,
'from': 34,
'global': 32,
'if': 35,
'import': 4,
'in': 74,
'is': 84,
'lambda': 22,
'nonlocal': 24,
'not': 19,
'or': 130,
'pass': 29,
'print': 23,
'raise': 17,
'return': 10,
'try': 9,
'while': 26,
'with': 36,
'yield': 12}
tokens = \
{0: 2,
1: 27,
2: 6,
3: 18,
4: 1,
5: 151,
6: 152,
7: 13,
8: 52,
9: 31,
10: 56,
11: 70,
12: 46,
13: 137,
14: 37,
15: 30,
16: 42,
17: 154,
18: 106,
19: 39,
20: 86,
21: 81,
22: 48,
23: 103,
24: 155,
25: 8,
26: 20,
27: 51,
28: 82,
29: 80,
30: 83,
31: 85,
32: 5,
33: 164,
34: 135,
35: 133,
36: 44,
37: 64,
38: 68,
39: 59,
40: 67,
41: 60,
42: 62,
43: 63,
44: 65,
45: 69,
46: 58,
47: 61,
48: 153,
49: 66,
50: 16}
start = 256
|
|
from copy import deepcopy
from django.db.models import Q
from django.http import HttpResponseBadRequest
from django.shortcuts import render
from bestiary.models import Fusion, ESSENCE_MAP, Monster
from herders.decorators import username_case_redirect
from herders.models import Summoner, MonsterInstance, MonsterPiece, MaterialStorage, MonsterShrineStorage
def fusion_progress(request, profile_name):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return render(request, 'herders/profile/not_found.html')
is_owner = (request.user.is_authenticated and summoner.user == request.user)
fusions = Fusion.objects.all()
context = {
'view': 'fusion',
'profile_name': profile_name,
'summoner': summoner,
'is_owner': is_owner,
'fusions': fusions,
}
return render(request, 'herders/profile/fusion/base.html', context)
@username_case_redirect
def fusion_progress_detail(request, profile_name, monster_slug):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
context = {
'view': 'fusion',
'profile_name': profile_name,
'summoner': summoner,
'is_owner': is_owner,
}
if is_owner or summoner.public:
try:
fusion = Fusion.objects.select_related(
'product'
).prefetch_related(
'ingredients'
).get(product__bestiary_slug=monster_slug)
except Fusion.DoesNotExist:
return HttpResponseBadRequest()
else:
level = 10 + fusion.product.base_stars * 5
ingredients = []
# Check if fusion has been completed already
fusion_complete = MonsterInstance.objects.filter(
Q(owner=summoner), Q(monster=fusion.product) | Q(monster=fusion.product.awakens_to)
).exists()
# Scan summoner's collection for instances each ingredient
fusion_ready = True
for ingredient in fusion.ingredients.all().select_related('awakens_from', 'awakens_to'):
owned_ingredients = MonsterInstance.objects.filter(
Q(owner=summoner),
Q(monster=ingredient) | Q(monster=ingredient.awakens_from),
).order_by('-stars', '-level', '-monster__is_awakened')
owned_ingredient_pieces = MonsterPiece.objects.filter(
Q(owner=summoner),
Q(monster=ingredient) | Q(monster=ingredient.awakens_from),
).first()
owned_shrines = MonsterShrineStorage.objects.select_related('item').filter(Q(owner=summoner),Q(item=ingredient) | Q(item=ingredient.awakens_from),)
# Determine if each individual requirement is met using highest evolved/leveled monster that is not ignored for fusion
for owned_ingredient in owned_ingredients:
if not owned_ingredient.ignore_for_fusion:
acquired = True
evolved = owned_ingredient.stars >= fusion.product.base_stars
leveled = owned_ingredient.level >= level
awakened = owned_ingredient.monster.is_awakened
complete = acquired & evolved & leveled & awakened
break
else:
if owned_ingredient_pieces:
acquired = owned_ingredient_pieces.can_summon()
else:
acquired = False
evolved = False
leveled = False
awakened = False
complete = False
for owned_shrine in owned_shrines:
# never evolved so never completed
acquired = True
if owned_shrine.item.awaken_level == Monster.AWAKEN_LEVEL_AWAKENED:
# the best possible outcome, awakened monster in Shrine - no point checking others
awakened = True
break
if not complete:
fusion_ready = False
# Check if this ingredient is fusable
sub_fusion = None
sub_fusion_awakening_cost = None
try:
sub_fusion = Fusion.objects.get(product=ingredient.awakens_from)
except Fusion.DoesNotExist:
pass
else:
if not acquired:
awakened_sub_fusion_ingredients = MonsterInstance.objects.filter(
monster__pk__in=sub_fusion.ingredients.values_list('pk', flat=True),
ignore_for_fusion=False,
owner=summoner,
)
sub_fusion_awakening_cost = sub_fusion.total_awakening_cost(awakened_sub_fusion_ingredients)
ingredient_progress = {
'instance': ingredient,
'owned': owned_ingredients,
'pieces': owned_ingredient_pieces,
'shrine': sum(o.quantity for o in owned_shrines),
'complete': complete,
'acquired': acquired,
'evolved': evolved,
'leveled': leveled,
'awakened': awakened,
'is_fuseable': True if sub_fusion else False,
'sub_fusion_cost': sub_fusion_awakening_cost,
}
ingredients.append(ingredient_progress)
awakened_owned_ingredients = MonsterInstance.objects.filter(
monster__pk__in=fusion.ingredients.values_list('pk', flat=True),
ignore_for_fusion=False,
owner=summoner,
)
total_cost = fusion.total_awakening_cost(awakened_owned_ingredients)
# Calculate fulfilled/missing essences
summoner_storage = {ms.item.com2us_id: ms for ms in MaterialStorage.objects.select_related('item').filter(owner=summoner)}
essence_storage = {
element: {
size: summoner_storage[ESSENCE_MAP[element][size]].quantity if ESSENCE_MAP[element][size] in summoner_storage else 0
for size, _ in element_sizes.items()
}
for element, element_sizes in ESSENCE_MAP.items()
}
total_missing = {
element: {
size: total_cost[element][size] - essence_storage[element][size] if total_cost[element][size] > essence_storage[element][size] else 0
for size, qty in element_sizes.items()
}
for element, element_sizes in total_cost.items()
}
# Check if there are any missing
essences_satisfied = True
for sizes in total_missing.values():
for qty in sizes.values():
if qty > 0:
essences_satisfied = False
# Determine the total/missing essences including sub-fusions
if fusion.sub_fusion_available():
total_sub_fusion_cost = deepcopy(total_cost)
for ingredient in ingredients:
if ingredient['sub_fusion_cost']:
for element, sizes in total_sub_fusion_cost.items():
for size, qty in sizes.items():
total_sub_fusion_cost[element][size] += ingredient['sub_fusion_cost'][element][size]
# Now determine what's missing based on owner's storage
sub_fusion_total_missing = {
element: {
size: total_sub_fusion_cost[element][size] - essence_storage[element][size] if total_sub_fusion_cost[element][size] > essence_storage[element][size] else 0
for size, qty in element_sizes.items()
}
for element, element_sizes in total_sub_fusion_cost.items()
}
sub_fusion_mats_satisfied = True
for sizes in total_sub_fusion_cost.values():
for qty in sizes.values():
if qty > 0:
sub_fusion_mats_satisfied = False
else:
sub_fusion_total_missing = None
sub_fusion_mats_satisfied = None
progress = {
'instance': fusion.product,
'acquired': fusion_complete,
'stars': fusion.product.base_stars,
'level': level,
'cost': fusion.cost,
'ingredients': ingredients,
'awakening_mats': essence_storage,
'awakening_mats_cost': total_cost,
'awakening_mats_sufficient': essences_satisfied,
'awakening_mats_missing': total_missing,
'sub_fusion_mats_missing': sub_fusion_total_missing,
'sub_fusion_mats_sufficient': sub_fusion_mats_satisfied,
'ready': fusion_ready,
}
context['fusion'] = progress
return render(request, 'herders/profile/fusion/fusion_detail.html', context)
else:
return render(request, 'herders/profile/not_public.html', context)
|
|
# -*- coding: utf-8 -*-
"""The command line interface (CLI) tools classes."""
import abc
import codecs
import datetime
import locale
import re
import sys
import time
import textwrap
import pytz
try:
import resource
except ImportError:
resource = None
import plaso
from plaso.cli import logger
from plaso.cli import views
from plaso.lib import errors
class CLITool(object):
"""Command line interface tool.
Attributes:
preferred_encoding (str): preferred encoding of single-byte or multi-byte
character strings, sometimes referred to as extended ASCII.
show_troubleshooting (bool): True if troubleshooting information should
be shown.
"""
NAME = ''
# The maximum number of characters of a line written to the output writer.
_LINE_LENGTH = 80
# The fall back preferred encoding.
_PREFERRED_ENCODING = 'utf-8'
_UNICODE_SURROGATES_RE = re.compile('[\ud800-\udfff]')
def __init__(self, input_reader=None, output_writer=None):
"""Initializes a command line interface tool.
Args:
input_reader (Optional[CLIInputReader]): input reader, where None
indicates that the stdin input reader should be used.
output_writer (Optional[CLIOutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
"""
super(CLITool, self).__init__()
preferred_encoding = locale.getpreferredencoding()
if not preferred_encoding:
preferred_encoding = self._PREFERRED_ENCODING
elif isinstance(preferred_encoding, bytes):
preferred_encoding = preferred_encoding.decode('utf-8')
if not input_reader:
input_reader = StdinInputReader(encoding=preferred_encoding)
if not output_writer:
output_writer = StdoutOutputWriter(encoding=preferred_encoding)
self._data_location = None
self._debug_mode = False
self._encode_errors = 'strict'
self._has_user_warning = False
self._input_reader = input_reader
self._log_file = None
self._output_writer = output_writer
self._quiet_mode = False
self._unattended_mode = False
self._views_format_type = views.ViewsFactory.FORMAT_TYPE_CLI
self._vfs_back_end = 'auto'
self.preferred_encoding = preferred_encoding
self.show_troubleshooting = False
@property
def data_location(self):
"""str: path of the data files."""
return self._data_location
def _CanEnforceProcessMemoryLimit(self):
"""Determines if a process memory limit can be enforced.
Returns:
bool: True if a process memory limit can be enforced, False otherwise.
"""
return bool(resource)
def _EncodeString(self, string):
"""Encodes a string in the preferred encoding.
Returns:
bytes: encoded string.
"""
try:
# Note that encode() will first convert string into a Unicode string
# if necessary.
encoded_string = string.encode(
self.preferred_encoding, errors=self._encode_errors)
except UnicodeEncodeError:
if self._encode_errors == 'strict':
logger.error(
'Unable to properly write output due to encoding error. '
'Switching to error tolerant encoding which can result in '
'non Basic Latin (C0) characters to be replaced with "?" or '
'"\\ufffd".')
self._encode_errors = 'replace'
encoded_string = string.encode(
self.preferred_encoding, errors=self._encode_errors)
return encoded_string
def _EnforceProcessMemoryLimit(self, memory_limit):
"""Enforces a process memory limit.
Args:
memory_limit (int): maximum number of bytes the process is allowed
to allocate, where 0 represents no limit and None a default of
4 GiB.
"""
# Resource is not supported on Windows.
if resource:
if memory_limit is None:
memory_limit = 4 * 1024 * 1024 * 1024
elif memory_limit == 0:
memory_limit = resource.RLIM_INFINITY
resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, memory_limit))
def _GetPathSpecificationString(self, path_spec):
"""Retrieves a printable string representation of the path specification.
Args:
path_spec (dfvfs.PathSpec): path specification.
Returns:
str: printable string representation of the path specification.
"""
path_spec_string = path_spec.comparable
if self._UNICODE_SURROGATES_RE.search(path_spec_string):
path_spec_string = path_spec_string.encode(
'utf-8', errors='surrogateescape')
path_spec_string = path_spec_string.decode(
'utf-8', errors='backslashreplace')
return path_spec_string
def _ParseInformationalOptions(self, options):
"""Parses the informational options.
Args:
options (argparse.Namespace): command line arguments.
"""
self._debug_mode = getattr(options, 'debug', False)
self._quiet_mode = getattr(options, 'quiet', False)
self._unattended_mode = getattr(options, 'unattended', False)
if self._debug_mode and self._quiet_mode:
logger.warning(
'Cannot use debug and quiet mode at the same time, defaulting to '
'debug output.')
def _ParseLogFileOptions(self, options):
"""Parses the log file options.
Args:
options (argparse.Namespace): command line arguments.
"""
self._log_file = self.ParseStringOption(options, 'log_file')
if not self._log_file:
local_date_time = datetime.datetime.now()
self._log_file = (
'{0:s}-{1:04d}{2:02d}{3:02d}T{4:02d}{5:02d}{6:02d}.log.gz').format(
self.NAME, local_date_time.year, local_date_time.month,
local_date_time.day, local_date_time.hour, local_date_time.minute,
local_date_time.second)
def _PrintUserWarning(self, warning_text):
"""Prints a warning to the user.
Args:
warning_text (str): text used to warn the user.
"""
warning_text = 'WARNING: {0:s}'.format(warning_text)
warning_text = textwrap.wrap(warning_text, 80)
print('\n'.join(warning_text))
print('')
self._has_user_warning = True
def _PromptUserForInput(self, input_text):
"""Prompts user for an input.
Args:
input_text (str): text used for prompting the user for input.
Returns:
str: input read from the user.
"""
self._output_writer.Write('{0:s}: '.format(input_text))
return self._input_reader.Read()
def _WaitUserWarning(self):
"""Waits 15 seconds after printing warnings to the user."""
if self._has_user_warning:
print('Waiting for 15 second to give you time to cancel.')
print('')
time.sleep(15)
def AddBasicOptions(self, argument_group):
"""Adds the basic options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
version_string = self.GetVersionInformation()
# We want a custom help message and not the default argparse one.
argument_group.add_argument(
'-h', '--help', action='help',
help='Show this help message and exit.')
argument_group.add_argument(
'--troubles', dest='show_troubleshooting', action='store_true',
default=False, help='Show troubleshooting information.')
argument_group.add_argument(
'-V', '--version', dest='version', action='version',
version=version_string, help='Show the version information.')
def AddInformationalOptions(self, argument_group):
"""Adds the informational options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='Enable debug output.')
argument_group.add_argument(
'-q', '--quiet', dest='quiet', action='store_true', default=False,
help='Disable informational output.')
argument_group.add_argument(
'-u', '--unattended', dest='unattended', action='store_true',
default=False, help=(
'Enable unattended mode and do not ask the user for additional '
'input when needed, but terminate with an error instead.'))
def AddLogFileOptions(self, argument_group):
"""Adds the log file option to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'--logfile', '--log_file', '--log-file', action='store',
metavar='FILENAME', dest='log_file', type=str, default='', help=(
'Path of the file in which to store log messages, by default '
'this file will be named: "{0:s}-YYYYMMDDThhmmss.log.gz". Note '
'that the file will be gzip compressed if the extension is '
'".gz".').format(self.NAME))
def CheckOutDated(self):
"""Checks if the version of plaso is outdated and warns the user."""
version_date_time = datetime.datetime(
int(plaso.__version__[0:4], 10),
int(plaso.__version__[4:6], 10),
int(plaso.__version__[6:8], 10))
date_time_delta = datetime.datetime.utcnow() - version_date_time
if date_time_delta.days > 180:
logger.warning('This version of plaso is more than 6 months old.')
self._PrintUserWarning((
'the version of plaso you are using is more than 6 months old. We '
'strongly recommend to update it.'))
def GetCommandLineArguments(self):
"""Retrieves the command line arguments.
Returns:
str: command line arguments.
"""
command_line_arguments = sys.argv
if not command_line_arguments:
return ''
if isinstance(command_line_arguments[0], bytes):
encoding = sys.stdin.encoding
# Note that sys.stdin.encoding can be None.
if not encoding:
encoding = self.preferred_encoding
try:
command_line_arguments = [
argument.decode(encoding) for argument in command_line_arguments]
except UnicodeDecodeError:
logger.error(
'Unable to properly read command line input due to encoding '
'error. Replacing non Basic Latin (C0) characters with "?" or '
'"\\ufffd".')
command_line_arguments = [
argument.decode(encoding, errors='replace')
for argument in command_line_arguments]
return ' '.join(command_line_arguments)
def GetVersionInformation(self):
"""Retrieves the version information.
Returns:
str: version information.
"""
return 'plaso - {0:s} version {1:s}'.format(self.NAME, plaso.__version__)
def ListTimeZones(self):
"""Lists the timezones."""
max_length = 0
for timezone_name in pytz.all_timezones:
if len(timezone_name) > max_length:
max_length = len(timezone_name)
utc_date_time = datetime.datetime.utcnow()
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Timezone', 'UTC Offset'],
title='Zones')
for timezone_name in pytz.all_timezones:
try:
local_timezone = pytz.timezone(timezone_name)
except AssertionError as exception:
logger.error((
'Unable to determine information about timezone: {0:s} with '
'error: {1!s}').format(timezone_name, exception))
continue
local_date_string = '{0!s}'.format(
local_timezone.localize(utc_date_time))
if '+' in local_date_string:
_, _, diff = local_date_string.rpartition('+')
diff_string = '+{0:s}'.format(diff)
else:
_, _, diff = local_date_string.rpartition('-')
diff_string = '-{0:s}'.format(diff)
table_view.AddRow([timezone_name, diff_string])
table_view.Write(self._output_writer)
def ParseNumericOption(self, options, name, base=10, default_value=None):
"""Parses a numeric option.
If the option is not set the default value is returned.
Args:
options (argparse.Namespace): command line arguments.
name (str): name of the numeric option.
base (Optional[int]): base of the numeric value.
default_value (Optional[object]): default value.
Returns:
int: numeric value.
Raises:
BadConfigOption: if the options are invalid.
"""
numeric_value = getattr(options, name, None)
if not numeric_value:
return default_value
try:
return int(numeric_value, base)
except (TypeError, ValueError):
name = name.replace('_', ' ')
raise errors.BadConfigOption(
'Unsupported numeric value {0:s}: {1!s}.'.format(
name, numeric_value))
def ParseStringOption(self, options, argument_name, default_value=None):
"""Parses a string command line argument.
Args:
options (argparse.Namespace): command line arguments.
argument_name (str): name of the command line argument.
default_value (Optional[object]): default value of the command line
argument.
Returns:
object: command line argument value. If the command line argument is
not set the default value will be returned.
Raises:
BadConfigOption: if the command line argument value cannot be converted
to a Unicode string.
"""
argument_value = getattr(options, argument_name, None)
if not argument_value:
return default_value
if isinstance(argument_value, bytes):
encoding = sys.stdin.encoding
# Note that sys.stdin.encoding can be None.
if not encoding:
encoding = self.preferred_encoding
try:
argument_value = codecs.decode(argument_value, encoding)
except UnicodeDecodeError as exception:
raise errors.BadConfigOption((
'Unable to convert option: {0:s} to Unicode with error: '
'{1!s}.').format(argument_name, exception))
elif not isinstance(argument_value, str):
raise errors.BadConfigOption(
'Unsupported option: {0:s} string type required.'.format(
argument_name))
return argument_value
def PrintSeparatorLine(self):
"""Prints a separator line."""
self._output_writer.Write('-' * self._LINE_LENGTH)
self._output_writer.Write('\n')
class CLIInputReader(object):
"""Command line interface input reader interface."""
def __init__(self, encoding='utf-8'):
"""Initializes an input reader.
Args:
encoding (Optional[str]): input encoding.
"""
super(CLIInputReader, self).__init__()
self._encoding = encoding
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def Read(self):
"""Reads a string from the input.
Returns:
str: input.
"""
class CLIOutputWriter(object):
"""Command line interface output writer interface."""
def __init__(self, encoding='utf-8'):
"""Initializes an output writer.
Args:
encoding (Optional[str]): output encoding.
"""
super(CLIOutputWriter, self).__init__()
self._encoding = encoding
@abc.abstractmethod
def Write(self, string):
"""Writes a string to the output.
Args:
string (str): output.
"""
class FileObjectInputReader(CLIInputReader):
"""File object command line interface input reader.
This input reader relies on the file-like object having a readline method.
"""
def __init__(self, file_object, encoding='utf-8'):
"""Initializes a file object command line interface input reader.
Args:
file_object (file): file-like object to read from.
encoding (Optional[str]): input encoding.
"""
super(FileObjectInputReader, self).__init__(encoding=encoding)
self._errors = 'strict'
self._file_object = file_object
def Read(self):
"""Reads a string from the input.
Returns:
str: input.
"""
encoded_string = self._file_object.readline()
if isinstance(encoded_string, str):
return encoded_string
try:
string = codecs.decode(encoded_string, self._encoding, self._errors)
except UnicodeDecodeError:
if self._errors == 'strict':
logger.error(
'Unable to properly read input due to encoding error. '
'Switching to error tolerant encoding which can result in '
'non Basic Latin (C0) characters to be replaced with "?" or '
'"\\ufffd".')
self._errors = 'replace'
string = codecs.decode(encoded_string, self._encoding, self._errors)
return string
class StdinInputReader(FileObjectInputReader):
"""Stdin command line interface input reader."""
def __init__(self, encoding='utf-8'):
"""Initializes an stdin input reader.
Args:
encoding (Optional[str]): input encoding.
"""
super(StdinInputReader, self).__init__(sys.stdin, encoding=encoding)
def Read(self):
"""Reads a string from the input.
Returns:
str: input.
"""
# Flush stdout to guarantee that all output has been provided before waiting
# for input.
sys.stdout.flush()
return super(StdinInputReader, self).Read()
class FileObjectOutputWriter(CLIOutputWriter):
"""File object command line interface output writer.
This output writer relies on the file-like object having a write method.
"""
def __init__(self, file_object, encoding='utf-8'):
"""Initializes a file object command line interface output writer.
Args:
file_object (file): file-like object to read from.
encoding (Optional[str]): output encoding.
"""
super(FileObjectOutputWriter, self).__init__(encoding=encoding)
self._errors = 'strict'
self._file_object = file_object
def Write(self, string):
"""Writes a string to the output.
Args:
string (str): output.
"""
try:
# Note that encode() will first convert string into a Unicode string
# if necessary.
encoded_string = codecs.encode(string, self._encoding, self._errors)
except UnicodeEncodeError:
if self._errors == 'strict':
logger.error(
'Unable to properly write output due to encoding error. '
'Switching to error tolerant encoding which can result in '
'non Basic Latin (C0) characters to be replaced with "?" or '
'"\\ufffd".')
self._errors = 'replace'
encoded_string = codecs.encode(string, self._encoding, self._errors)
self._file_object.write(encoded_string)
class StdoutOutputWriter(FileObjectOutputWriter):
"""Stdout command line interface output writer."""
def __init__(self, encoding='utf-8'):
"""Initializes a stdout output writer.
Args:
encoding (Optional[str]): output encoding.
"""
super(StdoutOutputWriter, self).__init__(sys.stdout, encoding=encoding)
def Write(self, string):
"""Writes a string to the output.
Args:
string (str): output.
"""
sys.stdout.write(string)
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Nebula Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Compute v2 Security Group action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
from keystoneclient import exceptions as ksc_exc
try:
from novaclient.v2 import security_group_rules
except ImportError:
from novaclient.v1_1 import security_group_rules
from openstackclient.common import parseractions
from openstackclient.common import utils
def _xform_security_group_rule(sgroup):
info = {}
info.update(sgroup)
from_port = info.pop('from_port')
to_port = info.pop('to_port')
if isinstance(from_port, int) and isinstance(to_port, int):
port_range = {'port_range': "%u:%u" % (from_port, to_port)}
elif from_port is None and to_port is None:
port_range = {'port_range': ""}
else:
port_range = {'port_range': "%s:%s" % (from_port, to_port)}
info.update(port_range)
if 'cidr' in info['ip_range']:
info['ip_range'] = info['ip_range']['cidr']
else:
info['ip_range'] = ''
if info['ip_protocol'] is None:
info['ip_protocol'] = ''
elif info['ip_protocol'].lower() == 'icmp':
info['port_range'] = ''
return info
class CreateSecurityGroup(show.ShowOne):
"""Create a new security group"""
log = logging.getLogger(__name__ + ".CreateSecurityGroup")
def get_parser(self, prog_name):
parser = super(CreateSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
"name",
metavar="<name>",
help="New security group name",
)
parser.add_argument(
"--description",
metavar="<description>",
help="Security group description",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
description = parsed_args.description or parsed_args.name
data = compute_client.security_groups.create(
parsed_args.name,
description,
)
info = {}
info.update(data._info)
return zip(*sorted(six.iteritems(info)))
class CreateSecurityGroupRule(show.ShowOne):
"""Create a new security group rule"""
log = logging.getLogger(__name__ + ".CreateSecurityGroupRule")
def get_parser(self, prog_name):
parser = super(CreateSecurityGroupRule, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='Create rule in this security group (name or ID)',
)
parser.add_argument(
"--proto",
metavar="<proto>",
default="tcp",
help="IP protocol (icmp, tcp, udp; default: tcp)",
)
parser.add_argument(
"--src-ip",
metavar="<ip-address>",
default="0.0.0.0/0",
help="Source IP (may use CIDR notation; default: 0.0.0.0/0)",
)
parser.add_argument(
"--dst-port",
metavar="<port-range>",
default=(0, 0),
action=parseractions.RangeAction,
help="Destination port, may be a range: 137:139 (default: 0; "
"only required for proto tcp and udp)",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
group = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
if parsed_args.proto.lower() == 'icmp':
from_port, to_port = -1, -1
else:
from_port, to_port = parsed_args.dst_port
data = compute_client.security_group_rules.create(
group.id,
parsed_args.proto,
from_port,
to_port,
parsed_args.src_ip,
)
info = _xform_security_group_rule(data._info)
return zip(*sorted(six.iteritems(info)))
class DeleteSecurityGroup(command.Command):
"""Delete a security group"""
log = logging.getLogger(__name__ + '.DeleteSecurityGroup')
def get_parser(self, prog_name):
parser = super(DeleteSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='Security group to delete (name or ID)',
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
data = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
compute_client.security_groups.delete(data.id)
return
class DeleteSecurityGroupRule(command.Command):
"""Delete a security group rule"""
log = logging.getLogger(__name__ + '.DeleteSecurityGroupRule')
def get_parser(self, prog_name):
parser = super(DeleteSecurityGroupRule, self).get_parser(prog_name)
parser.add_argument(
'rule',
metavar='<rule>',
help='Security group rule to delete (ID only)',
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
compute_client.security_group_rules.delete(parsed_args.rule)
return
class ListSecurityGroup(lister.Lister):
"""List security groups"""
log = logging.getLogger(__name__ + ".ListSecurityGroup")
def get_parser(self, prog_name):
parser = super(ListSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help='Display information from all projects (admin only)',
)
return parser
def take_action(self, parsed_args):
def _get_project(project_id):
try:
return getattr(project_hash[project_id], 'name', project_id)
except KeyError:
return project_id
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
columns = (
"ID",
"Name",
"Description",
)
column_headers = columns
if parsed_args.all_projects:
# TODO(dtroyer): Translate Project_ID to Project (name)
columns = columns + ('Tenant ID',)
column_headers = column_headers + ('Project',)
search = {'all_tenants': parsed_args.all_projects}
data = compute_client.security_groups.list(search_opts=search)
project_hash = {}
try:
projects = self.app.client_manager.identity.projects.list()
except ksc_exc.ClientException:
# This fails when the user is not an admin, just move along
pass
else:
for project in projects:
project_hash[project.id] = project
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={'Tenant ID': _get_project},
) for s in data))
class ListSecurityGroupRule(lister.Lister):
"""List security group rules"""
log = logging.getLogger(__name__ + ".ListSecurityGroupRule")
def get_parser(self, prog_name):
parser = super(ListSecurityGroupRule, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='List all rules in this security group (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
compute_client = self.app.client_manager.compute
group = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
# Argh, the rules are not Resources...
rules = []
for rule in group.rules:
rules.append(security_group_rules.SecurityGroupRule(
compute_client.security_group_rules,
_xform_security_group_rule(rule),
))
columns = column_headers = (
"ID",
"IP Protocol",
"IP Range",
"Port Range",
)
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in rules))
class SetSecurityGroup(show.ShowOne):
"""Set security group properties"""
log = logging.getLogger(__name__ + '.SetSecurityGroup')
def get_parser(self, prog_name):
parser = super(SetSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='Security group to modify (name or ID)',
)
parser.add_argument(
'--name',
metavar='<new-name>',
help='New security group name',
)
parser.add_argument(
"--description",
metavar="<description>",
help="New security group description",
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
data = utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)
if parsed_args.name:
data.name = parsed_args.name
if parsed_args.description:
data.description = parsed_args.description
info = {}
info.update(compute_client.security_groups.update(
data,
data.name,
data.description,
)._info)
if info:
return zip(*sorted(six.iteritems(info)))
else:
return ({}, {})
class ShowSecurityGroup(show.ShowOne):
"""Display security group details"""
log = logging.getLogger(__name__ + '.ShowSecurityGroup')
def get_parser(self, prog_name):
parser = super(ShowSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help='Security group to display (name or ID)',
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
info = {}
info.update(utils.find_resource(
compute_client.security_groups,
parsed_args.group,
)._info)
rules = []
for r in info['rules']:
rules.append(utils.format_dict(_xform_security_group_rule(r)))
# Format rules into a list of strings
info.update(
{'rules': rules}
)
# Map 'tenant_id' column to 'project_id'
info.update(
{'project_id': info.pop('tenant_id')}
)
return zip(*sorted(six.iteritems(info)))
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import gevent
import gevent.queue
import gevent.pywsgi
import gevent.monkey
gevent.monkey.patch_all()
import os
import sys
import logging
import json
from pprint import pprint
import functools
import socket
import time
import errno
import re
import copy
from cStringIO import StringIO
import uuid
import six
import contextlib
try:
from collections import OrderedDict, defaultdict
except ImportError:
from ordereddict import OrderedDict, defaultdict
import pycassa
import Queue
from collections import deque
from collections import namedtuple
import kombu
import kazoo
from kazoo.client import KazooState
from copy import deepcopy
from datetime import datetime
from pycassa.util import *
from vnc_api import vnc_api
from novaclient import exceptions as nc_exc
from cfgm_common.exceptions import ResourceExistsError, OverQuota
def stub(*args, **kwargs):
pass
class FakeApiConfigLog(object):
_all_logs = []
send = stub
def __init__(self, *args, **kwargs):
FakeApiConfigLog._all_logs.append(kwargs['api_log'])
@classmethod
def _print(cls):
for log in cls._all_logs:
x = copy.deepcopy(log.__dict__)
#body = x.pop('body')
#pprint(json.loads(body))
pprint(x)
print "\n"
# class FakeApiConfigLog
class FakeWSGIHandler(gevent.pywsgi.WSGIHandler):
logger = logging.getLogger('FakeWSGIHandler')
logger.addHandler(logging.FileHandler('api_server.log'))
def __init__(self, socket, address, server):
super(FakeWSGIHandler, self).__init__(socket, address, server)
#server.log = open('api_server.log', 'a')
class LoggerWriter(object):
def write(self, message):
FakeWSGIHandler.logger.log(logging.INFO, message)
server.log = LoggerWriter()
class FakeSystemManager(object):
_keyspaces = {}
def __init__(*args, **kwargs):
pass
def create_keyspace(self, name, *args, **kwargs):
if name not in self._keyspaces:
self._keyspaces[name] = {}
def list_keyspaces(self):
return self._keyspaces.keys()
def get_keyspace_properties(self, ks_name):
return {'strategy_options': {'replication_factor': '1'}}
def get_keyspace_column_families(self, keyspace):
return self._keyspaces[keyspace]
def create_column_family(self, keyspace, name, *args, **kwargs):
self._keyspaces[keyspace][name] = {}
def drop_keyspace(self, ks_name):
try:
del self._keyspaces[name]
except KeyError:
pass
@classmethod
@contextlib.contextmanager
def patch_keyspace(cls, ks_name, ks_val=None):
try:
orig_ks_val = cls._keyspaces[ks_name]
orig_existed = True
except KeyError:
orig_existed = False
try:
cls._keyspaces[ks_name] = ks_val
yield
finally:
if orig_existed:
cls._keyspaces[ks_name] = orig_ks_val
else:
del cls._keyspaces[ks_name]
#end patch_keyspace
# end class FakeSystemManager
class CassandraCFs(object):
_all_cfs = {}
@classmethod
def add_cf(cls, keyspace, cf_name, cf):
CassandraCFs._all_cfs[keyspace + '_' + cf_name] = cf
# end add_cf
@classmethod
def get_cf(cls, keyspace, cf_name):
return CassandraCFs._all_cfs[keyspace + '_' + cf_name]
# end get_cf
@classmethod
def reset(cls, cf_list=None):
if cf_list:
for name in cf_list:
if name in cls._all_cfs:
del cls._all_cfs[name]
return
cls._all_cfs = {}
# end CassandraCFs
class FakeConnectionPool(object):
def __init__(*args, **kwargs):
self = args[0]
if "keyspace" in kwargs:
self.keyspace = kwargs['keyspace']
else:
self.keyspace = args[2]
# end __init__
# end FakeConnectionPool
class PatchContext(object):
def __init__(self, cf):
self.cf = cf
self.patches = [] # stack of patches
# end __init__
def patch(self, patch_list):
cf = self.cf
for patch_type, patch_info in patch_list:
patched = {}
if patch_type == 'row':
patched['type'] = 'row'
row_key, new_columns = patch_info
patched['row_key'] = row_key
if row_key in cf._rows:
patched['row_existed'] = True
patched['orig_cols'] = copy.deepcopy(cf._rows[row_key])
if new_columns is None:
# simulates absence of key in cf
del cf._rows[row_key]
else:
cf._rows[row_key] = new_columns
else: # row didn't exist, create one
patched['row_existed'] = False
cf.insert(row_key, new_columns)
elif patch_type == 'column':
patched['type'] = 'column'
row_key, col_name, col_val = patch_info
patched['row_key'] = row_key
patched['col_name'] = col_name
if col_name in cf._rows[row_key]:
patched['col_existed'] = True
patched['orig_col_val'] = copy.deepcopy(
cf._rows[row_key][col_name])
if col_val is None:
# simulates absence of col
del cf._rows[row_key][col_name]
else:
cf.insert(row_key, {col_name: col_val})
else: # column didn't exist, create one
patched['col_existed'] = False
cf.insert(row_key, {col_name: col_val})
else:
raise Exception(
"Unknown patch type %s in patching" %(patch_type))
self.patches.append(patched)
# end patch
def unpatch(self):
cf = self.cf
for patched in reversed(self.patches):
patch_type = patched['type']
row_key = patched['row_key']
if patch_type == 'row':
if patched['row_existed']:
cf._rows[row_key] = patched['orig_cols']
else:
del cf._rows[row_key]
elif patch_type == 'column':
col_name = patched['col_name']
if patched['col_existed']:
cf._rows[row_key][col_name] = patched['orig_col_val']
else:
del cf._rows[row_key][col_name]
else:
raise Exception(
"Unknown patch type %s in un-patching" %(patch_type))
# end unpatch
# end PatchContext
class FakeCF(object):
# 2 initializations for same CF get same contents
_all_cf_rows = {}
def __init__(*args, **kwargs):
self = args[0]
self._pool = args[2]
self._name = args[3]
self._ks_cf_name = '%s_%s' %(self._pool.keyspace, self._name)
try:
#old_cf = CassandraCFs.get_cf(self._pool.keyspace, self._name)
#self._rows = old_cf._rows
self._rows = self._all_cf_rows[self._ks_cf_name]
except KeyError:
self._all_cf_rows[self._ks_cf_name] = OrderedDict({})
self._rows = self._all_cf_rows[self._ks_cf_name]
self.column_validators = {}
CassandraCFs.add_cf(self._pool.keyspace, self._name, self)
# end __init__
def get_range(self, *args, **kwargs):
columns = kwargs.get('columns', None)
column_start = kwargs.get('column_start', None)
column_finish = kwargs.get('column_finish', None)
column_count = kwargs.get('column_count', 0)
include_timestamp = kwargs.get('include_timestamp', False)
for key in self._rows:
try:
col_dict = self.get(key, columns, column_start, column_finish,
column_count, include_timestamp)
yield (key, col_dict)
except pycassa.NotFoundException:
pass
# end get_range
def _column_within_range(self, column_name, column_start, column_finish):
if type(column_start) is tuple:
for i in range(len(column_start), len(column_name)):
column_start = column_start + (column_name[i],)
if type(column_finish) is tuple:
for i in range(len(column_finish), len(column_name)):
column_finish = column_finish + (column_name[i],)
if column_start and column_name < column_start:
return False
if column_finish and column_name > column_finish:
return False
return True
# end _column_within_range
def get(self, key, columns=None, column_start=None, column_finish=None,
column_count=0, include_timestamp=False, include_ttl=False):
if not isinstance(key, six.string_types):
raise TypeError("A str or unicode value was expected, but %s "
"was received instead (%s)"
% (key.__class__.__name__, str(key)))
if not key in self._rows:
raise pycassa.NotFoundException
if columns:
col_dict = {}
for col_name in columns:
try:
col_value = self._rows[key][col_name][0]
except KeyError:
if len(columns) > 1:
continue
else:
raise pycassa.NotFoundException
if include_timestamp or include_ttl:
ret = (col_value,)
if include_timestamp:
col_tstamp = self._rows[key][col_name][1]
ret += (col_tstamp,)
if include_ttl:
col_ttl = self._rows[key][col_name][2]
ret += (col_ttl,)
col_dict[col_name] = ret
else:
col_dict[col_name] = col_value
else:
col_dict = {}
for col_name in self._rows[key].keys():
if not self._column_within_range(col_name,
column_start, column_finish):
continue
col_value = self._rows[key][col_name][0]
if include_timestamp or include_ttl:
ret = (col_value,)
if include_timestamp:
col_tstamp = self._rows[key][col_name][1]
ret += (col_tstamp,)
if include_ttl:
col_ttl = self._rows[key][col_name][2]
ret += (col_ttl,)
col_dict[col_name] = ret
else:
col_dict[col_name] = col_value
if len(col_dict) == 0:
raise pycassa.NotFoundException
sorted_col_dict = OrderedDict(
(k, col_dict[k]) for k in sorted(col_dict))
return sorted_col_dict
# end get
def multiget(
self, keys, columns=None, column_start=None, column_finish=None,
column_count=0, include_timestamp=False):
result = {}
for key in keys:
try:
col_dict = self.get(key, columns, column_start, column_finish,
column_count, include_timestamp)
result[key] = col_dict
except pycassa.NotFoundException:
pass
return result
# end multiget
def insert(self, key, col_dict, ttl=None):
if key not in self._rows:
self._rows[key] = {}
#tstamp = datetime.now()
tstamp = (datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()
for col_name in col_dict.keys():
self._rows[key][col_name] = (col_dict[col_name], tstamp, ttl)
# end insert
def remove(self, key, columns=None):
try:
if columns:
# for each entry in col_name delete each that element
for col_name in columns:
del self._rows[key][col_name]
elif columns is None:
del self._rows[key]
except KeyError:
# pycassa remove ignores non-existing keys
pass
# end remove
def xget(self, key, column_start=None, column_finish=None,
column_count=0, include_timestamp=False, include_ttl=False):
try:
col_dict = self.get(key,
column_start=column_start,
column_finish=column_finish,
column_count=column_count,
include_timestamp=include_timestamp,
include_ttl=include_ttl)
except pycassa.NotFoundException:
col_dict = {}
for k, v in col_dict.items():
yield (k, v)
# end xget
def get_count(self, key, column_start=None, column_finish=None):
col_names = []
if key in self._rows:
col_names = self._rows[key].keys()
counter = 0
for col_name in col_names:
if self._column_within_range(col_name,
column_start, column_finish):
counter += 1
return counter
# end get_count
def batch(self):
return self
# end batch
def send(self):
pass
# end send
@contextlib.contextmanager
def patch_cf(self, new_contents=None):
orig_contents = self._all_cf_rows[self._ks_cf_name]
try:
self._all_cf_rows[self._ks_cf_name] = new_contents
yield
finally:
self._all_cf_rows[self._ks_cf_name] = orig_contents
# end patch_cf
@contextlib.contextmanager
def patch_row(self, key, new_columns=None):
ctx = PatchContext(self)
try:
ctx.patch([('row', (key, new_columns))])
yield
finally:
ctx.unpatch()
#end patch_row
@contextlib.contextmanager
def patch_column(self, key, col_name, col_val=None):
ctx = PatchContext(self)
try:
ctx.patch([('column', (key, col_name, col_val))])
yield
finally:
ctx.unpatch()
# end patch_column
@contextlib.contextmanager
def patches(self, patch_list):
ctx = PatchContext(self)
try:
ctx.patch(patch_list)
yield
finally:
ctx.unpatch()
# end patches
# end class FakeCF
class FakeNovaClient(object):
vnc_lib = None
@staticmethod
def initialize(*args, **kwargs):
return FakeNovaClient
class flavors:
@staticmethod
def find(*args, **kwargs):
return 1
# end class flavors
class images:
@staticmethod
def find(name):
return 1
# end class images
class servers:
@staticmethod
def create(name, image, flavor, nics, *args, **kwargs):
vm = vnc_api.VirtualMachine(name)
FakeNovaClient.vnc_lib.virtual_machine_create(vm)
for network in nics:
if 'nic-id' in network:
vn = FakeNovaClient.vnc_lib.virtual_network_read(
id=network['net-id'])
vmi = vnc_api.VirtualMachineInterface(vn.name, parent_obj=vm)
vmi.set_virtual_network(vn)
FakeNovaClient.vnc_lib.virtual_machine_interface_create(vmi)
ip_address = FakeNovaClient.vnc_lib.virtual_network_ip_alloc(
vn, count=1)[0]
ip_obj = vnc_api.InstanceIp(ip_address, ip_address)
ip_obj.add_virtual_network(vn)
ip_obj.add_virtual_machine_interface(vmi)
FakeNovaClient.vnc_lib.instance_ip_create(ip_obj)
elif 'port-id' in network:
vmi = FakeNovaClient.vnc_lib.virtual_machine_interface_read(id=network['port-id'])
vmi.add_virtual_machine(vm)
FakeNovaClient.vnc_lib.virtual_machine_interface_update(vmi)
# end for network
vm.id = vm.uuid
vm.delete = FakeNovaClient.delete_vm.__get__(
vm, vnc_api.VirtualMachine)
vm.get = stub
return vm
# end create
@staticmethod
def find(id):
try:
vm = FakeNovaClient.vnc_lib.virtual_machine_read(id=id)
except vnc_api.NoIdError:
raise nc_exc.NotFound(404, "")
vm.delete = FakeNovaClient.delete_vm.__get__(
vm, vnc_api.VirtualMachine)
vm.status = 'OK'
return vm
# end find
get = find
# end class servers
@staticmethod
def delete_vm(vm):
for if_ref in (vm.get_virtual_machine_interfaces() or
vm.get_virtual_machine_interface_back_refs() or []):
intf = FakeNovaClient.vnc_lib.virtual_machine_interface_read(
id=if_ref['uuid'])
for ip_ref in intf.get_instance_ip_back_refs() or []:
FakeNovaClient.vnc_lib.instance_ip_delete(id=ip_ref['uuid'])
FakeNovaClient.vnc_lib.virtual_machine_interface_delete(
id=if_ref['uuid'])
FakeNovaClient.vnc_lib.virtual_machine_delete(id=vm.uuid)
# end delete_vm
@classmethod
def reset(cls):
cls.vnc_lib = None
# end vnc_lib
# end class FakeNovaClient
class FakeKombu(object):
_exchange = defaultdict(dict)
@classmethod
def is_empty(cls, vhost, qname):
_vhost = ''.join(vhost)
for name, q in FakeKombu._exchange[_vhost].items():
if name.startswith(qname) and q.qsize() > 0:
return False
return True
# end is_empty
@classmethod
def new_queues(self, vhost, q_name, q_gevent_obj):
FakeKombu._exchange[vhost][q_name] = q_gevent_obj
# end new_queues
class Exchange(object):
def __init__(self, *args, **kwargs):
self.exchange = args[1]
# end __init__
# end Exchange
class Queue(object):
class Message(object):
def __init__(self, msg_dict):
self.payload = msg_dict
# end __init__
def ack(self, *args, **kwargs):
pass
# end ack
# end class Message
def __init__(self, entity, q_name, q_exchange, **kwargs):
self._sync_q = gevent.queue.Queue()
self._name = q_name
self._exchange = q_exchange
# end __init__
def __call__(self, *args):
class BoundQueue(object):
def __init__(self, parent):
self.parent_q = parent
def delete(self):
self.parent_q.clear()
pass
# end delete
return BoundQueue(self)
# end __call__
def put(self, msg_dict, serializer):
msg_obj = self.Message(msg_dict)
self._sync_q.put(copy.deepcopy(msg_obj))
# end put
def get(self):
rv = self._sync_q.get()
# In real systems, rabbitmq is little slow, hence add some wait to mimic
gevent.sleep(0.001)
return rv
# end get
def clear(self):
try:
while True:
self._sync_q.get_nowait()
except Queue.Empty:
pass
self._sync_q = gevent.queue.Queue()
# end class Queue
class FakeChannel(object):
def __init__(self, vhost):
self.vhost = vhost
# end __init__
# end class Channel
class Connection(object):
class ConnectionException(Exception): pass
class ChannelException(Exception): pass
def __init__(self, *args, **kwargs):
self._default_channel = None
self.args = args
self.vhost = args[1]
# end __init__
def channel(self):
chan = FakeKombu.FakeChannel(self.vhost)
return chan
# end channel
@property
def default_channel(self):
if self._default_channel is None:
self._default_channel = self.channel()
return self._default_channel
def clone(self, **kwargs):
return self.__class__(*self.args, **kwargs)
# end clone
def close(self):
pass
# end close
def ensure_connection(self, *args):
pass
# end ensure_connection
def connect(self):
pass
# end connection
def _info(self):
pass
# end _info
def drain_events(self, **kwargs):
gevent.sleep(0.001)
# end drain_events
def as_uri(self, *args):
return repr(self.args)
# end as_uri
def __enter__(self):
return self
# end __enter__
def __exit__(self, *args):
self.close()
# end __exit__
@property
def connection_errors(self):
return (self.ConnectionException, )
@property
def channel_errors(self):
return (self.ChannelException, )
@property
def connected(self):
return True
def heartbeat_check(self):
gevent.sleep(0.001)
# end class Connection
class Consumer(object):
def __init__(self, *args, **kwargs):
queues = kwargs['queues']
self.queue = queues[0] if isinstance(queues, list) else queues
self.callbacks = kwargs['callbacks']
self.vhost = ''.join(args[1].vhost)
FakeKombu._exchange[self.vhost][self.queue._name] \
= self.queue._sync_q
# end __init__
def consume(self):
if not self.queue:
return
while True:
msg = self.queue.get()
try:
for c in self.callbacks:
c(msg.payload, msg)
except Exception:
pass
# end consume
def close(self):
if self.queue:
self.queue.clear()
self.queue = None
# end close
def __enter__(self):
self.consume()
return self
# end __enter__
def __exit__(self, exc_type, exc_val, exc_tb):
pass
# end __exit__
# end class Consumer
class Producer(object):
def __init__(self, *args, **kwargs):
self.exchange = kwargs['exchange']
self.vhost = ''.join(args[1].vhost)
# end __init__
def publish(self, payload):
for q in FakeKombu._exchange[self.vhost].values():
msg_obj = FakeKombu.Queue.Message(payload)
q.put(msg_obj, None)
#end publish
def close(self):
for q in FakeKombu._exchange[self.vhost].values():
while True:
try:
q.get_nowait()
except Queue.Empty:
break
# end close
# end class Producer
@classmethod
def reset(cls, vhost):
_vhost = ''.join(vhost)
for name, gevent_q in cls._exchange[_vhost].items():
del FakeKombu._exchange[_vhost][name]
cls._exchange[_vhost].clear()
cls._exchange = defaultdict(dict)
# end class FakeKombu
class FakeRedis(object):
class Pubsub(object):
def __init__(self, *args, **kwargs):
self._event = gevent.event.Event()
# end __init__
def subscribe(self, *args, **kwargs):
pass
# end subscribe
def listen(self, *args, **kwargs):
self._event.wait()
# end listen
# end FakePubsub
def __init__(self, *args, **kwargs):
self._kv_store = {}
# end __init__
def pubsub(self, *args, **kwargs):
return FakeRedis.Pubsub()
# end pubsub
def publish(self, *args, **kwargs):
pass
# end publish
def set(self, key, value):
self._kv_store[key] = deepcopy(value)
# end set
def get(self, key):
return deepcopy(self._kv_store[key])
# end get
def delete(self, keys):
for key in keys:
try:
del self._kv_store[key]
except KeyError:
pass
# end delete
def setnx(self, key, value):
self.set(key, deepcopy(value))
return True
# end setnx
def hexists(self, key, hkey):
if key in self._kv_store:
if hkey in self._kv_store[key]:
return True
return False
# end hexists
def hset(self, key, hkey, value):
if key not in self._kv_store:
self._kv_store[key] = {}
self._kv_store[key][hkey] = deepcopy(value)
# end hset
def hget(self, key, hkey):
if key not in self._kv_store:
return json.dumps(None)
if hkey not in self._kv_store[key]:
return json.dumps(None)
return deepcopy(self._kv_store[key][hkey])
# end hget
def hgetall(self, key):
return deepcopy(self._kv_store[key])
# end hgetall
def hdel(self, key, hkey):
del self._kv_store[key][hkey]
# end hdel
# end FakeRedis
class FakeExtensionManager(object):
_entry_pt_to_classes = {}
_ext_objs = []
class FakeExtObj(object):
def __init__(self, entry_pt, cls, *args, **kwargs):
self.entry_pt = entry_pt
self.obj = cls(*args, **kwargs)
self.name = repr(cls)
def __init__(self, child, ep_name, **kwargs):
if ep_name not in self._entry_pt_to_classes:
return
classes = self._entry_pt_to_classes[ep_name]
self._ep_name = ep_name
for cls in classes or []:
ext_obj = FakeExtensionManager.FakeExtObj(
ep_name, cls, **kwargs)
self._ext_objs.append(ext_obj)
# end __init__
def names(self):
return [e.name for e in self._ext_objs]
def map(self, cb):
for ext_obj in self._ext_objs:
cb(ext_obj)
def map_method(self, method_name, *args, **kwargs):
for ext_obj in self._ext_objs:
method = getattr(ext_obj.obj, method_name, None)
if not method:
continue
method(*args, **kwargs)
@classmethod
def get_extension_objects(cls, entry_pt):
return [e.obj for e in cls._ext_objs if e.entry_pt == entry_pt]
@classmethod
def reset(cls):
cls._ext_objs = []
# end class FakeExtensionManager
class MiniResp(object):
def __init__(self, error_message, env, headers=[]):
# The HEAD method is unique: it must never return a body, even if
# it reports an error (RFC-2616 clause 9.4). We relieve callers
# from varying the error responses depending on the method.
if env['REQUEST_METHOD'] == 'HEAD':
self.body = ['']
else:
self.body = [error_message]
self.headers = list(headers)
self.headers.append(('Content-type', 'text/plain'))
"""
Fake Keystone Middleware.
Used in API server request pipeline to validate user token
Typically first app in the pipeline
"""
class FakeAuthProtocol(object):
def __init__(self, app, conf, *args, **kwargs):
self.app = app
self.conf = conf
self.request_uri = conf.get('auth_url')
if not self.request_uri:
auth_protocol = conf['auth_protocol']
auth_host = conf['auth_host']
auth_port = conf['auth_port']
self.request_uri = '%s://%s:%s' % (auth_protocol, auth_host, auth_port)
self.delay_auth_decision = conf.get('delay_auth_decision', False)
self.auth_uri = self.request_uri
# print 'FakeAuthProtocol init: auth-uri %s, conf %s' % (self.auth_uri, self.conf)
def get_admin_token(self):
# token format admin-name, tenat-name, role
token_dict = {
'X-User': self.conf['admin_user'],
'X-User-Name': self.conf['admin_user'],
'X-Project-Name': self.conf['admin_tenant_name'],
'X-Domain-Name' : 'default-domain',
'X-Role': 'cloud-admin',
}
rval = json.dumps(token_dict)
# print '%%%% generated admin token %s %%%%' % rval
return rval
def _header_to_env_var(self, key):
return 'HTTP_%s' % key.replace('-', '_').upper()
def _get_header(self, env, key, default=None):
"""Get http header from environment."""
env_key = self._header_to_env_var(key)
return env.get(env_key, default)
def _add_headers(self, env, headers):
"""Add http headers to environment."""
for (k, v) in six.iteritems(headers):
env_key = self._header_to_env_var(k)
env[env_key] = v
def _validate_user_token(self, user_token, env, retry=True):
return user_token
def _build_user_headers(self, token_info):
"""Convert token object into headers."""
"""
rval = {
'X-Identity-Status': 'Confirmed',
'X-Domain-Id': domain_id,
'X-Domain-Name': domain_name,
'X-Project-Id': project_id,
'X-Project-Name': project_name,
'X-Project-Domain-Id': project_domain_id,
'X-Project-Domain-Name': project_domain_name,
'X-User-Id': user_id,
'X-User-Name': user_name,
'X-User-Domain-Id': user_domain_id,
'X-User-Domain-Name': user_domain_name,
'X-Role': roles,
# Deprecated
'X-User': user_name,
'X-Tenant-Id': project_id,
'X-Tenant-Name': project_name,
'X-Tenant': project_name,
'X-Role': roles,
}
"""
rval = json.loads(token_info)
return rval
# simulate keystone token
def _fake_keystone_token(self, token_info):
rval = json.loads(token_info)
rval['token'] = {};
rval['access'] = {}; rval['access']['user'] = {};
rval['access']['user']['roles'] = [{'name': rval['X-Role']}]
rval['token']['roles'] = [{'name': rval['X-Role']}]
return rval
def _reject_request(self, env, start_response):
"""Redirect client to auth server.
:param env: wsgi request environment
:param start_response: wsgi response callback
:returns HTTPUnauthorized http response
"""
headers = [('WWW-Authenticate', 'Keystone uri=\'%s\'' % self.auth_uri)]
resp = MiniResp('Authentication required', env, headers)
start_response('401 Unauthorized', resp.headers)
return resp.body
def __call__(self, env, start_response):
"""Handle incoming request.
Authenticate send downstream on success. Reject request if
we can't authenticate.
"""
# print 'FakeAuthProtocol: Authenticating user token'
user_token = self._get_header(env, 'X-Auth-Token')
if user_token:
# print '%%%%%% user token %s %%%%% ' % user_token
pass
elif self.delay_auth_decision:
self._add_headers(env, {'X-Identity-Status': 'Invalid'})
return self.app(env, start_response)
else:
# print 'Missing token or Unable to authenticate token'
return self._reject_request(env, start_response)
token_info = self._validate_user_token(user_token, env)
env['keystone.token_info'] = self._fake_keystone_token(token_info)
user_headers = self._build_user_headers(token_info)
self._add_headers(env, user_headers)
return self.app(env, start_response)
fake_keystone_auth_protocol = None
def get_keystone_auth_protocol(*args, **kwargs):
global fake_keystone_auth_protocol
if not fake_keystone_auth_protocol:
fake_keystone_auth_protocol = FakeAuthProtocol(*args[1:], **kwargs)
return fake_keystone_auth_protocol
#end get_keystone_auth_protocol
class FakeKeystoneClient(object):
@property
def version(self):
return 'v2'
class Domains(object):
_domains = {}
def add_domain(self, id, name):
self.id = id
self.name = name
self._domains[id] = self
def create(self, name, id=None):
self.name = name
self.id = str(id or uuid.uuid4())
self._domains[id] = self
def list(self):
return self._domains.values()
def get(self, id):
return self._domains[str(uuid.UUID(id))]
class Tenants(object):
_tenants = {}
def add_tenant(self, id, name):
self.id = id
self.name = name
self._tenants[id] = self
def delete_tenant(self, id):
del self._tenants[id]
def create(self, name, id=None):
self.name = name
self.id = str(id or uuid.uuid4())
self._tenants[id] = self
def list(self):
return self._tenants.values()
def get(self, id):
return self._tenants[str(uuid.UUID(id))]
class Users(object):
_users = {}
def create(self, name, password, foo, tenant_id):
self.name = name
self.password = password
self.tenant_id = tenant_id
self._users[name] = self
def list(self):
return self._users.values()
def get(self, name):
for x in self._users.values():
if x.name == name:
return x
return None
class Roles(object):
_roles = {}
_roles_map = {}
def create(self, name):
self.name = name
self._roles[name] = self
def list(self):
return self._roles.values()
def get_user_role(self, username, tenant_id):
return self._roles_map[username][tenant_id]
def add_user_role(self, uobj, robj, tobj):
if uobj.name not in self._roles_map:
self._roles_map[uobj.name] = {}
self._roles_map[uobj.name][tobj.name] = robj.name
def __init__(self, *args, **kwargs):
self.tenants = FakeKeystoneClient.Tenants()
self.domains = FakeKeystoneClient.Domains()
self.users = FakeKeystoneClient.Users()
self.roles = FakeKeystoneClient.Roles()
pass
def user_role(self, username, tenant_id):
return self.roles.get_user_role(username, tenant_id)
# end class FakeKeystoneClient
fake_keystone_client = FakeKeystoneClient()
def get_keystone_client(*args, **kwargs):
return fake_keystone_client
#
# Find two consecutive free ports such that even port is greater than odd port
# Return the even port and socket locked to the odd port
#
def get_free_port(allocated_sockets):
single_port_list = []
tmp_sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while (1):
tmp_sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
tmp_sock1.bind(('', 0))
except:
#Bail out as all ports exhausted.
for tmp_sock in single_port_list:
tmp_sock.close()
raise
return None, 0
free_port1 = tmp_sock1.getsockname()[1]
if free_port1 % 2:
#We have odd port, check whether next even port free
try:
tmp_sock2.bind(('', free_port1 + 1))
except:
single_port_list.append(tmp_sock1)
continue
else:
#We have even port, check whether next odd port free
try:
tmp_sock2.bind(('', free_port1 - 1))
except:
single_port_list.append(tmp_sock1)
continue
free_port2 = tmp_sock2.getsockname()[1]
break
#we have found our twin ports, release the singles now
for tmp_sock in single_port_list:
tmp_sock.close()
#keep the odd port locked and return free port
if free_port1 % 2:
odd_port, odd_sock = free_port1, tmp_sock1
even_port, even_sock = free_port2, tmp_sock2
else:
odd_port, odd_sock = free_port2, tmp_sock2
even_port, even_sock = free_port1, tmp_sock1
even_sock.close()
allocated_sockets.append(odd_sock)
return even_port
# end get_free_port
def block_till_port_listened(server_ip, server_port, retries=30):
tries = 0
while tries < retries:
try:
s = socket.create_connection((server_ip, server_port))
s.close()
return
except Exception as err:
if err.errno == errno.ECONNREFUSED:
tries += 1
print("port %s not up, retrying in 2 secs, %d tries remaining"
% (server_port, retries-tries))
gevent.sleep(2)
raise Exception("port %s not up after %d retries" % (server_port, retries))
# end block_till_port_listened
def Fake_uuid_to_time(time_uuid_in_db):
ts = time.mktime(time_uuid_in_db.timetuple())
return ts
# end of Fake_uuid_to_time
class ZnodeStat(namedtuple('ZnodeStat', 'ctime')):
pass
def zk_scrub_path(path):
# remove trailing slashes if not root
if len(path) == 1:
return path
return path.rstrip('/')
# end zk_scrub_path
class FakeKazooClient(object):
_values = {}
class Election(object):
_locks = {}
def __init__(self, path, identifier):
self.path = zk_scrub_path(path)
if self.path not in self._locks:
self._locks[self.path] = gevent.lock.Semaphore()
def run(self, cb, *args, **kwargs):
self._locks[self.path].acquire()
try:
cb(*args, **kwargs)
finally:
self._locks[self.path].release()
class Lock(object):
_locks = {}
def __init__(self, path, identifier):
self._path = zk_scrub_path(path)
if self._path not in self._locks:
self._locks[self._path] = (
gevent.lock.Semaphore(), # write lock
gevent.lock.Semaphore(), # read lock
identifier,
)
def acquire(self, blocking=True, timeout=None):
w_lock, _, _ = self._locks[self._path]
return w_lock.acquire(blocking, timeout)
def release(self):
w_lock, _, _ = self._locks[self._path]
w_lock.release()
def contenders(self):
w_lock, _, contender = self._locks[self._path]
return [contender] if w_lock.locked() else []
def destroy(self):
self._locks.pop(self._path, None)
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.release()
self.destroy()
class ReadLock(Lock):
def acquire(self, blocking=True, timeout=None):
w_lock, r_lock, _ = self._locks[self._path]
if w_lock.acquire(blocking, timeout):
w_lock.release()
r_lock.acquire(False)
return True
return False
def release(self):
_, r_lock, _ = self._locks[self._path]
r_lock.release()
class WriteLock(Lock):
def acquire(self, blocking=True, timeout=None):
w_lock, r_lock, _ = self._locks[self._path]
if r_lock.acquire(blocking, timeout):
r_lock.release()
# we should substract time already passed in the read acquire
# to the timout before we tried acquire the write lock
return w_lock.acquire(blocking, timeout)
return False
def release(self):
w_lock, _, _ = self._locks[self._path]
w_lock.release()
def __init__(self, *args, **kwargs):
self.add_listener = stub
self.start = stub
self.state = KazooState.CONNECTED
# end __init__
@classmethod
def reset(cls):
cls._values = {}
# end reset
def command(self, cmd):
if cmd == 'stat':
return 'Mode:standalone\nNode count:%s\n' %(len(self._values.keys()))
# end command
def stop(*args, **kwargs):
pass
# end stop
def create(self, path, value='', *args, **kwargs):
scrubbed_path = zk_scrub_path(path)
if scrubbed_path in self._values:
raise ResourceExistsError(
path, str(self._values[scrubbed_path][0]), 'zookeeper')
self._values[scrubbed_path] = (value, ZnodeStat(time.time()*1000))
# end create
def create_node(self, path, value='', *args, **kwargs):
scrubbed_path = zk_scrub_path(path)
if scrubbed_path in self._values:
raise ResourceExistsError(
path, str(self._values[scrubbed_path][0]), 'zookeeper')
self._values[scrubbed_path] = (value, ZnodeStat(time.time()*1000))
# end create
def read_node(self, path):
try:
return self._values[zk_scrub_path(path)]
except KeyError:
raise kazoo.exceptions.NoNodeError()
# end get
def get(self, path):
try:
return self._values[zk_scrub_path(path)]
except KeyError:
raise kazoo.exceptions.NoNodeError()
# end get
def set(self, path, value):
scrubbed_path = zk_scrub_path(path)
if scrubbed_path not in self._values:
raise kazoo.exceptions.NoNodeError()
self._values[scrubbed_path] = (value, ZnodeStat(time.time()*1000))
def get_children(self, path):
if not path:
return []
children = set()
scrubbed_path = zk_scrub_path(path)
for node in self._values:
if node.startswith(scrubbed_path):
# return non-leading '/' in name
child_node = node[len(scrubbed_path):]
if not child_node:
continue
if child_node[0] == '/':
child_node = child_node[1:]
children.add(child_node.split('/')[0])
return list(children)
# end get_children
def exists(self, path):
scrubbed_path = zk_scrub_path(path)
if scrubbed_path in self._values:
return self._values[scrubbed_path]
return None
# end exists
def delete_node(self, path, recursive=False):
scrubbed_path = zk_scrub_path(path)
if not recursive:
try:
del self._values[scrubbed_path]
except KeyError:
raise kazoo.exceptions.NoNodeError()
else:
for path_key in self._values.keys():
if scrubbed_path in path_key:
del self._values[path_key]
# end delete
def delete(self, path, recursive=False):
scrubbed_path = zk_scrub_path(path)
if not recursive:
try:
del self._values[scrubbed_path]
except KeyError:
raise kazoo.exceptions.NoNodeError()
else:
for path_key in self._values.keys():
if scrubbed_path in path_key:
del self._values[path_key]
# end delete
@contextlib.contextmanager
def patch_path(self, path, new_values=None, recursive=True):
# if recursive is False, new_values is value at path
# if recursive is True, new_values is dict((path,path-val))
scrubbed_path = zk_scrub_path(path)
orig_nodes = {}
paths_to_patch = []
# collect path(s) to patch...
for node in self._values.keys():
if recursive: # simulate wipe of node with path and descendants
if node.startswith(scrubbed_path):
paths_to_patch.append(node)
else: # only one path
if node == scrubbed_path:
paths_to_patch = [node]
break
# ...and patch it
for path in paths_to_patch:
orig_nodes[path] = self._values[path]
if recursive:
if new_values and path in new_values:
self._values[path] = new_values[path]
else:
del self._values[path]
else: # only one path
if new_values is None:
del self._values[path]
else:
self._values[path] = new_values
break
try:
yield
finally:
for node in orig_nodes:
self._values[node] = orig_nodes[node]
#end patch_path
# end class FakeKazooClient
def fake_zk_counter_init(self, client, path, default=0, *args, **kwargs):
self.client = client
self.path = path
self.default = default
self.default_type = type(default)
self._ensured_path = False
self._value = default
@property
def fake_zk_counter_value(self):
return self._value
def fake_zk_counter_change(self, value):
data = int(self._value + value)
if data > self.max_count:
raise OverQuota()
else:
self._value = data
return self
def fake_zk_counter_ensure_node(self):
self._ensured_path = True
class ZookeeperClientMock(object):
def __init__(self, *args, **kwargs):
self._count = 0
self._values = {}
# end __init__
def is_connected(self):
return True
def alloc_from(self, path, max_id):
self._count = self._count + 1
return self._count
# end alloc_from
def alloc_from_str(self, path, value=''):
self._count = self._count + 1
zk_val = "%(#)010d" % {'#': self._count}
self._values[path + zk_val] = (value, ZnodeStat(time.time()*1000))
return zk_val
# end alloc_from_str
def delete(self, path):
try:
del self._values[path]
except KeyError:
raise kazoo.exceptions.NoNodeError()
# end delete
def read(self, path, include_timestamp=False):
try:
if include_timestamp:
return self._values[path]
return self._values[path][0]
except Exception as err:
raise pycassa.NotFoundException
# end read
def get_children(self, path):
return []
# end get_children
def read_node(self, path, include_timestamp=False):
try:
return self.read(path, include_timestamp)
except pycassa.NotFoundException:
return None
# end read_node
def create_node(self, path, value=''):
#if path in self._values:
#raise ResourceExistsError(
# path, str(self._values[path][0], 'zookeeper'))
self._values[path] = (value, ZnodeStat(time.time()*1000))
# end create_node
def delete_node(self, path, recursive=False):
if not recursive:
try:
del self._values[path]
except KeyError:
raise kazoo.exceptions.NoNodeError()
else:
for path_key in self._values.keys():
if path in path_key:
del self._values[path_key]
# end delete_node
def master_election(self, path, pid, func, *args, **kwargs):
func(*args, **kwargs)
# end master_election
# end Class ZookeeperClientMock
class FakeVncApiStatsLog(object):
_all_logs = []
send = stub
def __init__(self, *args, **kwargs):
FakeVncApiStatsLog._all_logs.append(kwargs['api_stats'])
@classmethod
def _print(cls):
for log in cls._all_logs:
x = copy.deepcopy(log.__dict__)
#body = x.pop('body')
#pprint(json.loads(body))
pprint(x)
print "\n"
# class FakeVncApiStatsLog
|
|
import os, sys, re, copy
import matplotlib.pyplot as pyplot
import immunoseq.lib.immunoseqLib as iseqlib
from sonLib.bioio import system
def getSubSamples(seqs):
samples = []
for header, name2seq in seqs.iteritems():
for name in name2seq.keys():
if name not in samples:
samples.append(name)
return samples
def sharedSeqFreq(sample):
numsam2header2freqs = {}
for header, name2seq in sample.seqs.iteritems():
numsam = len( name2seq.keys() )
#freqs = sorted( [ seq.freq for seq in name2seq.values() ] )
n2freq = {}
for n, s in name2seq.iteritems():
#n2freq[n] = s.freq
n2freq[n] = s.count #HACK
if numsam not in numsam2header2freqs:
numsam2header2freqs[numsam] = {header:n2freq}
else:
numsam2header2freqs[numsam][header] = n2freq
return numsam2header2freqs
def printSharedSeqFreq(sample, minsam, outfile):
f = open(outfile, 'w')
numsam2header2freqs = sharedSeqFreq(sample)
for numsam, header2freqs in numsam2header2freqs.iteritems():
if numsam >= minsam:
for header, n2f in header2freqs.iteritems():
#freqs = n2f.values()
#f.write("%s\t%s\n" %(header, ','.join( [str(freq) for freq in freqs] )) )
names = []
freqs = []
for n, freq in n2f.iteritems():
names.append(n)
freqs.append( str(freq) )
f.write("%s\t%s\t%s\n" % (header, ','.join(names) , ','.join(freqs) ))
f.close()
return
def printSharedSeqFreqAll( samples, minsam, outdir ):
for s in samples:
file = os.path.join( outdir, "%s-freqs.txt" %s.name )
printSharedSeqFreq(s, minsam, file)
def filterByNumSample(sample, minsam, outfile, minfreq):
#sample = calcFreq(sample) #HACK
f = open(outfile, 'w')
for header, name2seq in sample.seqs.iteritems():
if len(name2seq.keys()) >= minsam:
for name, seq in name2seq.iteritems():
if seq.freq < minfreq:
continue
h = seq.getFastaHeader()
f.write("%s\n" %h)
f.write("%s\n" %seq.seq)
f.close()
return
def filterByNumSampleAll(samples, minsam, outdir, minfreq):
for s in samples:
outfile = os.path.join(outdir, s.name)
filterByNumSample(s, minsam, outfile, minfreq)
#========== DRAWING ===
def combineSamples(samples, name):
#combine iseqlib samples into One sample where seqs = {header: {subSampleName: Seq}}
sample = iseqlib.Sample(name)
seqs = {}
for s in samples:
for header, seq in s.seqs.iteritems():
if header not in seqs:
seqs[header] = {s.name: seq}
else:
seqs[header][s.name] = seq
sample.seqs = seqs
return sample
def getSharedSeqDist(samples, uniq):
sample2dist = {}
for sample in samples:
#sample = calcFreq(sample) #HACK
numsam2count = {}
for header, name2seq in sample.seqs.iteritems():
numsam = len( name2seq.keys() ) #number of samples having this sequence
if uniq:
if numsam not in numsam2count:
numsam2count[numsam] = 1
else:
numsam2count[numsam] += 1
else:
reads = sum([seq.count for seq in name2seq.values()])
if numsam not in numsam2count:
numsam2count[numsam] = reads
else:
numsam2count[numsam] += reads
sample2dist[sample.name] = numsam2count
return sample2dist
def printNumsamVsClonesDist(outfile, sample2dist, relative):
#Get the union list of numsam:
nums = []
for s, num2count in sample2dist.iteritems():
for num in num2count:
if num not in nums:
nums.append(num)
nums.sort()
#Convert to percentage if option 'relative' is True
#if relative:
# for s, n2c in sample2dist.iteritems():
# total = sum(n2c.values())
# if total >0:
# for n, c in n2c.iteritems():
# n2c[n] = c*100.0/total
f = open(outfile, 'w')
f.write("Group\t%s\n" %( '\t'.join([str(n) for n in nums]) ))
for s, num2count in sample2dist.iteritems():
f.write("%s" %s)
total = sum(num2count.values())
for n in nums:
if n not in num2count or total == 0:
f.write("\t0")
else:
if not relative:
f.write("\t%f" %num2count[n])
else:
f.write("\t%f" %( num2count[n]*100.0/total ))
f.write("\n")
f.close()
def drawDistData(axes, sample2dist):
#sample2dist = getSharedSeqDist(samples, uniq)
lines = []
labels = []
colors = iseqlib.getColors6()
#lightColors = getColors6light()
markersize = 10.0
c = -1
xmax = 0
for s in sorted( sample2dist.keys() ):
numsam2count = sample2dist[s]
c += 1
xdata = sorted( numsam2count.keys() )
xmax = max([xmax, max(xdata)])
ydata = [ numsam2count[x] for x in xdata ]
totaly = sum(ydata)
pcydata = [(100.0*y)/totaly for y in ydata]
#line = axes.plot(xdata, ydata, color=colors[c], marker='o', markeredgecolor=colors[c], markersize = markersize, linestyle='-', linewidth=2)
line = axes.plot(xdata, pcydata, color=colors[c], marker='o', markeredgecolor=colors[c], markersize = markersize, linestyle='-', linewidth=2)
#axes.plot(xdata, ydata, color=lightColors[c], linestyle='-', linewidth=0.5)
lines.append(line)
labels.append(s)
print s
print xdata
print ydata
print pcydata
axes.set_yscale('log')
axes.set_xlim(0.8, xmax + 0.2)
xticks = xrange(1, xmax + 1)
xticklabels = [ str(x) for x in xticks ]
axes.xaxis.set_ticks(xticks)
axes.xaxis.set_ticklabels( xticklabels )
axes.set_title('Shared sequences', size="xx-large")
iseqlib.editSpine( axes )
axes.set_xlabel("Number of samples", size='x-large')
axes.set_ylabel("Number of clones", size='x-large')
legend = axes.legend( lines, labels, numpoints=1, loc='best', ncol=1)
legend.__drawFrame = False
axes.yaxis.grid(b=True, color="#BDBDBD", linestyle='-', linewidth=0.005)
axes.xaxis.grid(b=True, color="#BDBDBD", linestyle='-', linewidth=0.005)
def drawDist(samples, outfile, uniq):
dpi = 300
outformat = 'pdf'
fig, pdf = iseqlib.initImage2(10.0, 10.0, outformat, outfile, dpi)
axes = iseqlib.setAxes(fig)
sample2dist = getSharedSeqDist(samples, uniq)
drawDistData(axes, sample2dist)
iseqlib.writeImage2(fig, pdf, outformat, outfile, dpi)
def drawNumsamVsClonesDist2(sample2dist, outfile):
dpi = 300
outformat = 'pdf'
fig, pdf = iseqlib.initImage2(10.0, 10.0, outformat, outfile, dpi)
axes = iseqlib.setAxes(fig)
drawDistData(axes, sample2dist)
iseqlib.writeImage2(fig, pdf, outformat, outfile, dpi)
######### CLONE MATRIX ###############
def calcFreq(sample):
newsample = copy.copy(sample)
name2total = {}
for header, name2seq in sample.seqs.iteritems():
for name, seq in name2seq.iteritems():
if name in name2total:
name2total[name] += seq.count
else:
name2total[name] = seq.count
for header, name2seq in sample.seqs.iteritems():
for name, seq in name2seq.iteritems():
seq.setFreq( name2total[name] )
return newsample
def printCloneMatrix(outfile, sample, minsam, freqTransform):
sample = calcFreq(sample)
f = open(outfile, 'w')
f.write( "Clone\t%s\n" % '\t'.join(sample.subsamples) )
for header, name2seq in sample.seqs.iteritems():
rowvals = []
if len(name2seq) < minsam:
continue
for name in sample.subsamples:
count = 0
if name in name2seq:
count = name2seq[name].count
if freqTransform:
count = int ( name2seq[name].freq*freqTransform + 0.5 )
rowvals.append( str(count) )
f.write("%s\t%s\n" %(header, "\t".join(rowvals)))
f.close()
def printCloneMatrixAll(outdir, samples, minsam, freqTransform):
for sample in samples:
outfile = os.path.join(outdir, "cloneM-%s.txt" %sample.name)
printCloneMatrix(outfile, sample, minsam, freqTransform)
#def main():
# samples = readfiles(options.indir, options.count)
# if options.drawdist:
# drawDist(samples, options)
# #printSharedSeqFreqAll( samples, minsam, "counts-atleast3sams" )
# if options.fasta:
# faoutdir = os.path.join(options.outdir, "fasta-atleast%dsams" %options.minsam)
# system("mkdir -p %s" %faoutdir)
# filterByNumSampleAll(samples, options.minsam, faoutdir, options.freq)
# if options.clonematrix:
# printCloneMatrixAll(options.outdir, samples, options.minsam, options.freqTransform)
|
|
import copy
from datetime import datetime
import functools
import hashlib
import json
import os
import random
import re
import string
import socket
import subprocess
import sys
import time
import uuid
import warnings
import yaml
with warnings.catch_warnings():
# Fabric maintenance is lagging a bit so let's suppress these warnings.
warnings.filterwarnings("ignore", category=DeprecationWarning)
from fabric import Connection
from ipaddress import ip_address, ip_network
from paramiko.ssh_exception import SSHException
import requests
import retrying
import textwrap
import winrm
from cloudify_rest_client.exceptions import CloudifyClientError
from cosmo_tester.framework import util
from cosmo_tester.framework.constants import CLOUDIFY_TENANT_HEADER
HEALTHY_STATE = 'OK'
RSYNC_LOCATIONS = ['/etc',
'/opt',
'/var',
'/usr']
def only_manager(func):
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
if not self.is_manager:
raise RuntimeError('This is not a manager')
return func(self, *args, **kwargs)
return wrapped
def ensure_conn(func):
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
if self.windows:
# We don't maintain a conn for windows currently
return func(self, *args, **kwargs)
if self._conn is None or self._conn.transport is None:
_make_connection(self)
# SFTP session gets cached and breaks after reboots or conn drops.
# Someone has a PR to fabric in-flight since Nov 2021.
self._conn._sftp = None
try:
self._conn.transport.open_session().close()
except Exception as err:
self._logger.warning('SSH connection failure: %s', err)
_make_connection(self)
self._conn.transport.open_session().close()
return func(self, *args, **kwargs)
return wrapped
@retrying.retry(stop_max_attempt_number=5, wait_fixed=3000)
def _make_connection(vm):
vm._conn = Connection(
host=vm.ip_address,
user=vm.username,
connect_kwargs={
'key_filename': [vm.private_key_path],
},
port=22,
connect_timeout=3,
)
vm._conn.open()
vm._conn.transport.set_keepalive(15)
class VM(object):
def __init__(self, image_type, test_config, bootstrappable=False):
self.image_name = None
self.userdata = ""
self.username = None
self.password = None
self.api_ca_path = None
self.enable_ssh_wait = True
self.should_finalize = True
self.restservice_expected = False
self.client = None
self._test_config = test_config
self.windows = 'windows' in image_type
self._tmpdir_base = None
self.bootstrappable = bootstrappable
self.image_type = image_type
self.is_manager = self._is_manager_image_type()
self.reboot_required = False
self._set_image_details()
if self.windows:
self.prepare_for_windows()
if self.is_manager:
self.basic_install_config = {
'manager': {
'security': {
'admin_username': self._test_config[
'test_manager']['username'],
'admin_password': util.generate_password(),
},
},
'sanity': {'skip_sanity': True},
}
def assign(
self,
public_ip_address,
private_ip_address,
networks,
ssh_key,
logger,
tmpdir,
node_instance_id,
deployment_id,
server_id,
server_index,
):
self.ip_address = public_ip_address
self.private_ip_address = private_ip_address
self._ssh_key = ssh_key
self._logger = logger
self._tmpdir_base = tmpdir
self._tmpdir = os.path.join(tmpdir, public_ip_address)
os.makedirs(self._tmpdir)
self.node_instance_id = node_instance_id
self.deployment_id = deployment_id
self.server_id = server_id
# This is overridden in some cluster tests
self.friendly_name = '{} ({})'.format(server_id, private_ip_address)
self.server_index = server_index
if self.is_manager:
self.basic_install_config['manager']['public_ip'] = \
str(public_ip_address)
self.basic_install_config['manager']['private_ip'] = \
str(private_ip_address)
self.basic_install_config['manager']['hostname'] = str(server_id)
self.networks = networks
self.install_config = copy.deepcopy(self.basic_install_config)
self._create_conn_script()
self._conn = None
def _create_conn_script(self):
script_path = self._tmpdir_base / '{prefix}_{index}'.format(
prefix='rdp' if self.windows else 'ssh',
index=self.server_index)
if self.windows:
script_content = (
"xfreerdp /u:{user} /p:'{password}' "
'/w:1366 /h:768 /v:{addr}'
).format(
user=self.username,
password=self.password,
addr=self.ip_address,
)
else:
# Don't check this call- it might fail due to missing known_hosts
# file or similar, and we shouldn't fail the test because of that.
subprocess.call(['ssh-keygen', '-R', self.ip_address])
script_content = (
'ssh -i {key} -o StrictHostKeyChecking=no {connstr} ${{*}}\n'
).format(
key=self._ssh_key.private_key_path,
connstr='{}@{}'.format(self.username, self.ip_address),
)
with open(script_path, 'w') as fh:
fh.write(script_content)
subprocess.check_call(['chmod', '+x', script_path])
def log_action(self, action):
"""Log that we're doing something with this node."""
self._logger.info('%s on %s', action, self.friendly_name)
def prepare_for_windows(self):
"""Prepare this VM to be created as a windows VM."""
add_firewall_cmd = "&netsh advfirewall firewall add rule"
password = ''.join(random.choice(string.ascii_letters + string.digits)
for _ in range(16))
# To meet complexity requirements- the above should be hard enough to
# crack for a short lived test VM
password += '!'
self.enable_ssh_wait = False
self.restservice_expected = False
self.should_finalize = False
self.userdata = """#ps1_sysnative
$PSDefaultParameterValues['*:Encoding'] = 'utf8'
Write-Host "## Configuring WinRM and firewall rules.."
winrm quickconfig -q
winrm set winrm/config '@{{MaxTimeoutms="1800000"}}'
winrm set winrm/config/winrs '@{{MaxMemoryPerShellMB="300"}}'
winrm set winrm/config/service '@{{AllowUnencrypted="true"}}'
winrm set winrm/config/service/auth '@{{Basic="true"}}'
{fw_cmd} name="WinRM 5985" protocol=TCP dir=in localport=5985 action=allow
{fw_cmd} name="WinRM 5986" protocol=TCP dir=in localport=5986 action=allow
Write-Host "## Setting password for Admin user.."
$user = [ADSI]"WinNT://localhost/{user}"
$user.SetPassword("{password}")
$user.SetInfo()""".format(fw_cmd=add_firewall_cmd,
user=self.username,
password=password)
self.password = password
@retrying.retry(stop_max_attempt_number=120, wait_fixed=3000)
def wait_for_winrm(self):
self._logger.info('Checking Windows VM %s is up...', self.ip_address)
try:
self.run_command('Write-Output "Testing winrm."',
powershell=True)
except Exception as err:
self._logger.warning('...failed: {err}'.format(err=err))
raise
self._logger.info('...Windows VM is up.')
def get_windows_remote_file_content(self, path):
return self.run_command(
'Get-Content -Path {}'.format(path),
powershell=True).std_out
# We're allowing about 5 minutes in case of /really/ slow VM start/restart
@retrying.retry(stop_max_attempt_number=100, wait_fixed=3000)
def wait_for_ssh(self):
if self.enable_ssh_wait:
self.run_command('true')
self.log_action('SSH check complete')
@property
def private_key_path(self):
return self._ssh_key.private_key_path
def __str__(self):
if self.is_manager:
return 'Cloudify manager [{}]'.format(self.ip_address)
return 'Cloudify Test VM ({image}) [{ip}]'.format(
image=self.image_name,
ip=self.ip_address,
)
def stop(self):
"""Stops this instance."""
self._logger.info('Stopping server.. [id=%s]', self.server_id)
# Previously, we were calling stop_server on openstack, which allowed
# clean shutdown
self.run_command('shutdown -h now', warn_only=True, use_sudo=True)
while True:
try:
self.log_action('Checking connection')
time.sleep(3)
except (SSHException, socket.timeout, EOFError, TimeoutError):
# Errors like 'Connection reset by peer' can occur during the
# shutdown, but we should wait a little longer to give other
# services time to stop
time.sleep(3)
if not self._conn.is_connected:
# By this point everything should be down.
self.log_action('Server stopped')
break
def finalize_preparation(self):
"""Complete preparations for using a new instance."""
self._logger.info('Finalizing server preparations.')
self.wait_for_ssh()
if self.restservice_expected:
# When creating the rest client here we can't check for SSL yet,
# because the manager probably isn't up yet. Therefore, we'll just
# make the client.
self.client = self.get_rest_client(proto='http')
self._logger.info('Checking rest service.')
self.wait_for_manager()
self._logger.info('Applying license.')
self.apply_license()
def _get_python_path(self):
return self.run_command(
'which python || which python3').stdout.strip()
def get_distro(self):
# Get the distro string we expect agents to be
if self.windows:
return 'windows'
self.put_remote_file_content(
'/tmp/get_distro',
'''#! {python}
import platform
distro, _, codename = platform.dist()
print('{{}} {{}}'.format(distro, codename).lower())
'''.format(python=self._get_python_path()))
self.run_command('chmod +x /tmp/get_distro')
return self.run_command('/tmp/get_distro').stdout.strip()
@property
def ssh_key(self):
return self._ssh_key
@ensure_conn
def get_remote_file(self, remote_path, local_path):
""" Dump the contents of the remote file into the local path """
# Similar to the way fabric1 did it
remote_tmp = '/tmp/' + hashlib.sha1(
remote_path.encode('utf-8')).hexdigest()
self.run_command(
'cp {} {}'.format(remote_path, remote_tmp),
use_sudo=True,
)
self.run_command(
'chmod 444 {}'.format(remote_tmp),
use_sudo=True,
)
local_dir = os.path.dirname(local_path)
if not os.path.exists(local_dir):
os.makedirs(local_dir)
self._conn.get(
remote_tmp,
local_path,
)
@ensure_conn
def put_remote_file(self, remote_path, local_path):
""" Dump the contents of the local file into the remote path """
if self.windows:
with open(local_path) as fh:
content = fh.read()
self.put_remote_file_content(remote_path, content)
else:
remote_tmp = '/tmp/' + hashlib.sha1(
remote_path.encode('utf-8')).hexdigest()
self.run_command(
'rm -rf {}'.format(remote_tmp),
use_sudo=True,
)
# Similar to the way fabric1 did it
self._conn.put(
local_path,
remote_tmp,
)
self.run_command(
'mkdir -p {}'.format(
os.path.dirname(remote_path),
),
use_sudo=True,
)
self.run_command(
'mv {} {}'.format(remote_tmp, remote_path),
use_sudo=True,
)
def get_remote_file_content(self, remote_path):
tmp_local_path = os.path.join(self._tmpdir, str(uuid.uuid4()))
try:
self.get_remote_file(remote_path, tmp_local_path)
with open(tmp_local_path, 'r') as f:
content = f.read()
finally:
if os.path.exists(tmp_local_path):
os.unlink(tmp_local_path)
return content
def put_remote_file_content(self, remote_path, content):
if self.windows:
self.run_command(
"Add-Content -Path {} -Value '{}'".format(
remote_path,
# Single quoted string will not be interpreted
# But single quotes must be represented in such a string
# with double single quotes
content.replace("'", "''"),
),
powershell=True,
)
else:
tmp_local_path = os.path.join(self._tmpdir, str(uuid.uuid4()))
try:
with open(tmp_local_path, 'w') as f:
f.write(content)
self.put_remote_file(remote_path, tmp_local_path)
finally:
if os.path.exists(tmp_local_path):
os.unlink(tmp_local_path)
@ensure_conn
def run_command(self, command, use_sudo=False, warn_only=False,
hide_stdout=False, powershell=False):
if self.windows:
url = 'http://{host}:{port}/wsman'.format(host=self.ip_address,
port=5985)
session = winrm.Session(url, auth=(self.username, self.password))
self._logger.info('Running command: %s', command)
runner = session.run_ps if powershell else session.run_cmd
result = runner(command)
self._logger.info('- stdout: %s', result.std_out)
self._logger.info('- stderr: %s', result.std_err)
self._logger.info('- status_code: %d', result.status_code)
if not warn_only:
assert result.status_code == 0
# To allow the same calling conventions as linux commands
result.stdout = result.std_out
return result
else:
hide = 'stdout' if hide_stdout else None
if use_sudo:
return self._conn.sudo(command, warn=warn_only, hide=hide)
else:
return self._conn.run(command, warn=warn_only, hide=hide)
@property
@only_manager
def mgr_password(self):
return self.install_config['manager']['security']['admin_password']
@only_manager
def upload_init_script_plugin(self, tenant_name='default_tenant'):
self._logger.info('Uploading init script plugin to %s', tenant_name)
self._upload_plugin(
'plugin/init_script_plugin-1.0.0-py27-none-any.zip',
tenant_name)
@only_manager
def upload_test_plugin(self, tenant_name='default_tenant'):
self._logger.info('Uploading test plugin to %s', tenant_name)
self._upload_plugin(
'plugin/test_plugin-1.0.0-py27-none-any.zip',
tenant_name)
def _upload_plugin(self, plugin_path, tenant_name):
with util.set_client_tenant(self.client, tenant_name):
try:
self.client.plugins.upload(
util.get_resource_path(plugin_path),
)
self.wait_for_all_executions(include_system_workflows=True)
except CloudifyClientError as err:
if self._test_config['premium']:
raise
# On community this can happen if multiple tests use the
# same manager (because the first will upload the plugin and
# the later test(s) will then conflict due to it existing).
# Premium avoids this with multiple tenants.
if 'already exists' in str(err):
pass
else:
raise
@only_manager
@retrying.retry(stop_max_attempt_number=6 * 10, wait_fixed=10000)
def verify_services_are_running(self):
if not self.is_manager:
return True
# the manager-ip-setter script creates the `touched` file when it
# is done.
try:
# will fail on bootstrap based managers
self.run_command('supervisorctl -a | grep manager-ip-setter')
except Exception:
pass
else:
self._logger.info('Verify manager-ip-setter is done..')
self.run_command('cat /opt/cloudify/manager-ip-setter/touched')
self._logger.info(
'Verifying all services are running on manager %s...',
self.ip_address,
)
manager_status = self.client.manager.get_status()
if manager_status['status'] == HEALTHY_STATE:
return
for display_name, service in manager_status['services'].items():
assert service['status'] == 'Active', \
'service {0} is in {1} state'.format(
display_name, service['status'])
@only_manager
def get_installed_configs(self):
conf_files = [
conf_file.strip() for conf_file in
self.run_command(
'ls /etc/cloudify/*_config.yaml || true').stdout.split()
]
return conf_files or ['/etc/cloudify/config.yaml']
@only_manager
def is_configured(self):
services = self.run_command(
'if [[ -d {confed_dir} ]]; then ls {confed_dir}; fi'.format(
confed_dir='/etc/cloudify/.configured',
)
).stdout.strip()
return any(service in services for service in
['database', 'manager', 'queue'])
@only_manager
def start_manager_services(self):
if not self.is_configured():
self._logger.info('No services configured')
return
for config_name in self.get_installed_configs():
config_path = self._get_config_path(config_name)
self._logger.info('Starting services using {}'.format(
config_path))
self.run_command('cfy_manager start -c {}'.format(config_path))
@only_manager
def stop_manager_services(self):
if not self.is_configured():
self._logger.info('No services configured')
return
for config_name in self.get_installed_configs():
config_path = self._get_config_path(config_name)
self._logger.info('Stopping services using {}'.format(
config_path))
self.run_command('cfy_manager stop -c {}'.format(config_path))
@only_manager
def teardown(self, kill_certs=True):
self._logger.info('Tearing down using any installed configs')
if self.is_configured():
for config_name in self.get_installed_configs():
config_path = self._get_config_path(config_name)
self._logger.info('Tearing down using {}'.format(config_path))
self.run_command(
'cfy_manager remove -c {}'.format(config_path))
else:
self._logger.info('No services configured')
if kill_certs:
self._logger.info('Removing certs directory')
self.run_command('sudo rm -rf /etc/cloudify/ssl')
if self.api_ca_path and os.path.exists(self.api_ca_path):
os.unlink(self.api_ca_path)
@only_manager
def _create_config_file(self, upload_license=True):
config_file = self._tmpdir / 'config_{0}.yaml'.format(self.ip_address)
cloudify_license_path = \
'/tmp/test_valid_paying_license.yaml' if upload_license else ''
self.install_config['manager'][
'cloudify_license_path'] = cloudify_license_path
install_config_str = yaml.safe_dump(self.install_config)
self._logger.info(
'Install config:\n%s', str(install_config_str))
config_file.write_text(install_config_str)
return config_file
@only_manager
def apply_license(self):
license = util.get_resource_path('test_valid_paying_license.yaml')
self.client.license.upload(license)
@only_manager
def apply_override(self, override_name):
override_path = self._test_config['override'][override_name]
if not override_path:
self._logger.info('No override set for %s', override_name)
return
override_subdirs = ['cfy_manager']
remote_paths = [
'/opt/cloudify/cfy_manager/lib/python3.6/site-packages/'
]
local_tar_path = self._tmpdir_base / 'override_{}.tar.gz'.format(
override_name,
)
remote_tar_path = '/tmp/override_{}.tar.gz'.format(override_name)
subprocess.check_call(
[
'tar', '-czf', local_tar_path, *override_subdirs
],
cwd=override_path,
)
self.put_remote_file(remote_tar_path, local_tar_path)
for remote_path in remote_paths:
self._logger.info('Removing existing files for %s', override_name)
for subdir in override_subdirs:
subdir_path = remote_path + subdir
self.run_command('rm -r {}'.format(subdir_path),
use_sudo=True)
self._logger.info('Extracting new files from %s to %s for %s',
remote_tar_path, remote_path, override_name)
self.run_command(
'bash -c "cd {remote_path} '
'&& tar -xzf {archive_path}"'.format(
remote_path=remote_path,
archive_path=remote_tar_path,
),
use_sudo=True,
)
@only_manager
def _get_config_path(self, config_name=None):
if config_name:
if config_name.startswith('/'):
return config_name
return '/etc/cloudify/{0}_config.yaml'.format(config_name)
return '/etc/cloudify/config.yaml'
@only_manager
def bootstrap(self, upload_license=False,
blocking=True, restservice_expected=True, config_name=None,
include_sanity=False):
if self.image_type == '5.0.5':
# We don't have a bootstrappable 5.0.5, so skip this
return
if include_sanity:
self.install_config['sanity']['skip_sanity'] = False
self.wait_for_ssh()
if self.image_type == 'master':
# Only apply the overrides to the version being tested.
# The others were already released, don't pretend changing them is
# reasonable.
self.apply_override('cloudify_manager_install')
self.restservice_expected = restservice_expected
install_config = self._create_config_file(
upload_license and self._test_config['premium'])
# If we leave this lying around on a compact cluster, we think we
# finished bootstrapping every component after the first as soon
# as we check it, because the first component did finish.
self.run_command('rm -f /tmp/bootstrap_complete')
self.run_command('mkdir -p /tmp/bs_logs')
self.put_remote_file(
'/tmp/cloudify.conf',
install_config,
)
if upload_license:
self.put_remote_file(
'/tmp/test_valid_paying_license.yaml',
util.get_resource_path('test_valid_paying_license.yaml'),
)
if config_name:
dest_config_path = self._get_config_path(config_name)
commands = [
'sudo mv /tmp/cloudify.conf {0} > '
'/tmp/bs_logs/0_mv 2>&1'.format(dest_config_path),
'cfy_manager install -c {0} > '
'/tmp/bs_logs/3_install 2>&1'.format(dest_config_path)
]
else:
commands = [
'sudo mv /tmp/cloudify.conf /etc/cloudify/config.yaml > '
'/tmp/bs_logs/0_mv 2>&1',
'cfy_manager install > /tmp/bs_logs/3_install 2>&1'
]
commands.append('touch /tmp/bootstrap_complete')
install_command = ' && '.join(commands)
install_command = (
'( ' + install_command + ') '
'|| touch /tmp/bootstrap_failed &'
)
install_file = self._tmpdir / 'install_{0}.yaml'.format(
self.ip_address,
)
install_file.write_text(install_command)
self.put_remote_file('/tmp/bootstrap_script', install_file)
self.run_command('nohup bash /tmp/bootstrap_script &>/dev/null &')
if blocking:
while True:
if self.bootstrap_is_complete():
break
else:
time.sleep(5)
@only_manager
def bootstrap_is_complete(self):
if self.image_type == '5.0.5':
# We don't have a bootstrappable 5.0.5, so we use pre-bootstrapped
return True
# Using a bash construct because fabric seems to change its mind
# about how non-zero exit codes should be handled frequently
result = self.run_command(
'if [[ -f /tmp/bootstrap_complete ]]; then'
' echo done; '
'elif [[ -f /tmp/bootstrap_failed ]]; then '
' echo failed; '
'else '
' echo not done; '
'fi'
).stdout.strip()
if result == 'done':
self._logger.info('Bootstrap complete.')
self.finalize_preparation()
return True
else:
# To aid in troubleshooting (e.g. where a VM runs commands too
# slowly)
self.run_command('date > /tmp/cfy_mgr_last_check_time')
if result == 'failed':
self._logger.error('BOOTSTRAP FAILED!')
# Get all the logs on failure
self.run_command(
'cat /tmp/bs_logs/*'
)
raise RuntimeError('Bootstrap failed.')
else:
self.run_command(
'tail -n5 /tmp/bs_logs/* || echo Waiting for logs'
)
self._logger.info('Bootstrap in progress...')
return False
@only_manager
@retrying.retry(stop_max_attempt_number=200, wait_fixed=1000)
def wait_for_all_executions(self, include_system_workflows=True):
executions = self.client.executions.list(
include_system_workflows=include_system_workflows,
_all_tenants=True,
_get_all_results=True
)
for execution in executions:
if execution['status'] != 'terminated':
raise Exception(
'Timed out: Execution {} did not terminate'.format(
execution['id'],
)
)
@only_manager
@retrying.retry(stop_max_attempt_number=60, wait_fixed=5000)
def wait_for_manager(self):
self._logger.info('Checking for starter service')
# If we don't wait for this then tests get a bit racier
self.run_command(
"systemctl status cloudify-starter 2>&1"
"| grep -E '(status=0/SUCCESS)|(could not be found)'")
# ...and apparently we're misnaming it at the moment
self.run_command(
"systemctl status cfy-starter 2>&1"
"| grep -E '(status=0/SUCCESS)|(could not be found)'")
self._logger.info('Checking manager status')
try:
manager_status = self.client.manager.get_status()
except Exception as err:
self._logger.info(str(err))
if 'SSL must be used' in str(err):
self._logger.info(
'Detected that SSL was required, '
'updating certs and client.')
self.client = self.get_rest_client()
raise
if manager_status['status'] != HEALTHY_STATE:
raise Exception(
'Timed out: Manager services did not start successfully. '
'Inactive services: {}'.format(
', '.join(
str(item)
for item in manager_status['services'].values()
if item['status'] != 'Active'
)
)
)
self._logger.info('Manager on %s is up', self.ip_address)
@only_manager
def get_rest_client(self, username=None, password=None, tenant=None,
proto='auto', download_ca=True):
test_mgr_conf = self._test_config['test_manager']
username = username or test_mgr_conf['username']
password = (
password
or self.install_config['manager']['security']['admin_password']
)
tenant = tenant or test_mgr_conf['tenant']
if proto == 'auto':
proto = 'http'
ssl_check = requests.get(
'http://{}/api/v3.1/status'.format(self.ip_address))
self._logger.info('Rest client generation SSL check response: %s',
ssl_check.text)
if 'SSL_REQUIRED' in ssl_check.text:
proto = 'https'
if proto == 'https' and download_ca:
self.download_rest_ca()
return util.create_rest_client(
self.ip_address,
username=username,
password=password,
tenant=tenant,
cert=self.api_ca_path,
protocol=proto,
)
@only_manager
def download_rest_ca(self, force=False):
self.api_ca_path = self._tmpdir / self.server_id + '_api.crt'
if os.path.exists(self.api_ca_path):
if force:
os.unlink(self.api_ca_path)
else:
self._logger.info('Skipping rest CA download, already in %s',
self.api_ca_path)
return
self._logger.info('Downloading rest CA to %s', self.api_ca_path)
self.get_remote_file(
'/etc/cloudify/ssl/cloudify_internal_ca_cert.pem',
self.api_ca_path,
)
@only_manager
def clean_local_rest_ca(self):
if self.api_ca_path and os.path.exists(self.api_ca_path):
self._logger.info('Removing local copy of manager CA.')
os.unlink(self.api_ca_path)
@only_manager
def enable_nics(self):
"""
Extra network interfaces need to be manually enabled on the manager
`manager.networks` is a dict that looks like this:
{
"network_0": "10.0.0.6",
"network_1": "11.0.0.6",
"network_2": "12.0.0.6"
}
"""
# The MTU is set to 1450 because we're using a static BOOTPROTO here
# (as opposed to DHCP), which sets a lower default by default
template = textwrap.dedent("""
DEVICE="eth{0}"
BOOTPROTO="static"
ONBOOT="yes"
TYPE="Ethernet"
USERCTL="yes"
PEERDNS="yes"
IPV6INIT="no"
PERSISTENT_DHCLIENT="1"
IPADDR="{1}"
NETMASK="255.255.255.0"
DEFROUTE="no"
MTU=1450
""")
self._logger.info('Adding extra NICs...')
for i in range(0, len(self.networks)):
network_file_path = self._tmpdir / 'network_cfg_{}'.format(i)
ip_addr = self.networks['network_{}'.format(i + 1)]
config_content = template.format(i, ip_addr)
with open(network_file_path, 'w') as conf_handle:
conf_handle.write(config_content)
self.put_remote_file(
'/etc/sysconfig/network-scripts/ifcfg-eth{0}'.format(i),
network_file_path,
)
self.run_command('ifup eth{0}'.format(i), use_sudo=True)
def _is_manager_image_type(self):
if self.image_type == 'master':
return True
try:
# If the name starts with a number, it's a manager version
int(self.image_type[0])
return True
except Exception:
return False
def _set_image_details(self):
if self.is_manager:
distro = self._test_config['test_manager']['distro']
username_key = 'centos_7' if distro == 'centos' else 'rhel_7'
if self.image_type == '5.0.5':
# We didn't make a bootstrappable image for 5.0.5, so we have
# this ugly hack until 5.0.5 stops being supported
image_template = 'cloudify-manager-premium-{testing_version}'
if distro == 'rhel':
image_template += '-rhel'
else:
image_template = self._test_config[
'manager_image_names'][distro]
if self.image_type in ('master', 'installer'):
manager_version = self._test_config['testing_version']
else:
manager_version = self.image_type
if self.bootstrappable:
self.should_finalize = False
else:
self.restservice_expected = True
self.image_name = util.substitute_testing_version(
image_template,
manager_version,
).replace('-ga', '')
else:
username_key = self.image_type
image_names = {
entry: img
for entry, img in self._test_config.platform.items()
if entry.endswith('_image')
}
image_name = self.image_type + '_image'
if image_name not in image_names:
raise ValueError(
'{img} is not a supported image. '
'Supported: {supported}'.format(
img=image_name,
supported=','.join(image_names),
)
)
self.image_name = image_names[image_name]
if username_key.startswith('rhel') and not self.is_manager:
self.username = (
self._test_config.platform['rhel_username_override']
or self._test_config['test_os_usernames'][username_key]
)
else:
self.username = (
self._test_config['test_os_usernames'][username_key]
)
def rsync_backup(self):
self.wait_for_ssh()
self._logger.info(
'Creating Rsync backup for host {}. Might take up to 5 '
'minutes...'.format(self.deployment_id))
self.run_command("mkdir /cfy_backup", use_sudo=True)
rsync_backup_file = self._tmpdir / 'rsync_backup_{0}'.format(
self.ip_address)
locations = ' '.join(RSYNC_LOCATIONS)
backup_commands = (
f'sudo rsync -aAHX {locations} /cfy_backup '
'> /tmp/rsync_backup.log 2>&1 '
'; res=$? '
# An exit code of 24 means files vanished during copy. This is
# something that will happen occasionally and we should not
# treat it as a failure.
'; [[ $res -eq 24 ]] && res=0 '
'; [[ $res -eq 0 ]] && touch /tmp/rsync_backup_complete'
)
rsync_backup_file.write_text(
"(" + backup_commands + ") "
"|| touch /tmp/rsync_backup_failed &")
self.put_remote_file('/tmp/rsync_backup_script', rsync_backup_file)
self.run_command('nohup bash /tmp/rsync_backup_script &>/dev/null &')
def rsync_restore(self):
# Revert install config to avoid leaking state between tests
if self.is_manager:
self.install_config = copy.deepcopy(self.basic_install_config)
if self.is_manager:
self.stop_manager_services()
self._logger.info('Cleaning profile/CA dir from home dir')
self.run_command('rm -rf ~/.cloudify*')
self._logger.info('Cleaning root cloudify profile')
self.run_command('sudo rm -rf /root/.cloudify')
self.clean_local_rest_ca()
self._logger.info(
'Restoring from an Rsync backup for host {}. Might take '
'up to 1 minute...'.format(self.deployment_id))
rsync_restore_file = self._tmpdir / 'rsync_restore_{0}'.format(
self.ip_address)
rsync_restore_file.write_text(
"(sudo rsync -aAHX /cfy_backup/* / --delete "
"> /tmp/rsync_restore.log 2>&1 "
"&& touch /tmp/rsync_restore_complete) "
"|| touch /tmp/rsync_restore_failed &")
self.put_remote_file('/tmp/rsync_restore_script',
rsync_restore_file)
self.run_command('nohup bash /tmp/rsync_restore_script '
'&>/dev/null &')
def async_command_is_complete(self, process_name):
unfriendly_name = process_name.replace(' ', '_').lower()
result = self.run_command(
'if [[ -f /tmp/{0}_complete ]]; then echo done; '
'elif [[ -f /tmp/{0}_failed ]]; then echo failed; '
'else echo not done; '
'fi'.format(unfriendly_name)
).stdout.strip()
if result == 'done':
self._logger.info('{0} complete for host {1}!'
.format(process_name, self.deployment_id))
return True
elif result == 'failed':
self._logger.error('{0} FAILED for host {1}!'
.format(process_name, self.deployment_id))
self.run_command(f'cat /tmp/{unfriendly_name}.log',
warn_only=True)
raise RuntimeError('{} failed.'.format(process_name))
else:
self._logger.info('Still performing {0} on host {1}...'
.format(process_name, self.deployment_id))
return False
class Hosts(object):
def __init__(self,
ssh_key,
tmpdir,
test_config,
logger,
request,
number_of_instances=1,
instances=None,
flavor=None,
multi_net=False,
bootstrappable=False,
vm_net_mappings=None,
ipv6_net=False):
"""
instances: supply a list of VM instances.
This allows pre-configuration to happen before starting the hosts, or
for a list of instances of different versions to be created at once.
if instances is provided, number_of_instances will be ignored
"""
if sys.stdout.encoding.lower() != 'utf-8':
raise RuntimeError(
'Trying to run without IO encoding being set to utf-8 '
'will occasionally result in errors. Current encoding is '
'{current}. Please re-run, e.g. '
'PYTHONIOENCODING=utf-8 {command}'.format(
current=sys.stdout.encoding,
command=' '.join(sys.argv),
)
)
self._logger = logger
self._test_config = test_config
self._tmpdir = tmpdir
self._ssh_key = ssh_key
self.preconfigure_callback = None
if instances is None:
self.instances = [VM('master', test_config, bootstrappable)
for _ in range(number_of_instances)]
else:
self.instances = instances
self._request = request
self.tenant = None
self.deployments = []
self.blueprints = []
self.test_identifier = None
self._test_vm_installs = {}
self._test_vm_uninstalls = {}
self._platform_resource_ids = {}
self.multi_net = multi_net
self.vm_net_mappings = vm_net_mappings or {}
self.ipv6_net = ipv6_net
if self.ipv6_net:
if self._test_config['target_platform'].lower() != 'aws':
raise RuntimeError('Tests in the IPv6-enabled environments '
'require AWS target platform.')
if self.multi_net:
raise RuntimeError('Cannot initialize both multi-net and '
'IPv6-enabled infrastructure.')
infra_mgr_config = self._test_config['infrastructure_manager']
self._infra_client = util.create_rest_client(
infra_mgr_config['address'],
username='admin',
password=infra_mgr_config['admin_password'],
cert=infra_mgr_config['ca_cert'],
protocol='https' if infra_mgr_config['ca_cert'] else 'http',
)
if flavor:
self.server_flavor = flavor
else:
self.server_flavor = self._test_config.platform['linux_size']
def create(self):
"""Creates the infrastructure for a Cloudify manager."""
self._logger.info('Creating image based cloudify instances: '
'[number_of_instances=%d]', len(self.instances))
test_identifier = '{test}_{time}'.format(
# Strip out any characters from the test name that might cause
# systems with restricted naming to become upset
test=re.sub(
'[^a-zA-Z0-9]',
'',
# This is set by pytest and looks like:
# cosmo_tester/test_suites/some_tests/\
# some_test.py::test_specific_thing
os.environ['PYTEST_CURRENT_TEST'].split('/')[-1],
),
time=datetime.strftime(datetime.now(), '%Y%m%d%H%M%S'),
)
self.test_identifier = test_identifier
try:
self._logger.info('Creating test tenant')
self._infra_client.tenants.create(test_identifier)
self._infra_client._client.headers[
CLOUDIFY_TENANT_HEADER] = test_identifier
self.tenant = test_identifier
self._upload_secrets_to_infrastructure_manager()
self._upload_plugins_to_infrastructure_manager()
self._upload_blueprints_to_infrastructure_manager()
self._deploy_test_infrastructure(test_identifier)
# Deploy hosts in parallel
for index, instance in enumerate(self.instances):
self._start_deploy_test_vm(instance.image_name, index,
test_identifier,
instance.is_manager)
self._finish_deploy_test_vms()
for instance in self.instances:
if instance.is_manager and not instance.bootstrappable:
# A pre-bootstrapped manager is desired for this test,
# let's make it happen.
instance.bootstrap(
upload_license=self._test_config['premium'],
blocking=False)
for instance in self.instances:
if instance.is_manager and not instance.bootstrappable:
self._logger.info('Waiting for instance %s to bootstrap',
instance.image_name)
while not instance.bootstrap_is_complete():
time.sleep(3)
if instance.should_finalize:
instance.finalize_preparation()
except Exception as err:
self._logger.error(
"Encountered exception trying to create test resources: %s.\n"
"Attempting to tear down test resources.", str(err)
)
self.destroy()
raise
def destroy(self, passed=None):
"""Destroys the infrastructure. """
if passed is None:
try:
passed = self._request.session.testspassed
except AttributeError:
passed = 0
if passed:
if self._test_config['teardown']['on_success']:
self._logger.info('Preparing to destroy with passed tests...')
else:
self._logger.info(
'Tests passed, skipping teardown due to configuration.'
'To tear down, clean deployments on your test manager '
'under tenant {}'.format(self.test_identifier)
)
return
else:
if self._test_config['teardown']['on_failure']:
self._logger.info('Preparing to destroy with failed tests...')
else:
self._logger.info(
'Tests failed, skipping teardown due to configuration. '
'To tear down, clean deployments on your test manager '
'under tenant {}'.format(self.test_identifier)
)
return
self._logger.info('Destroying test hosts..')
if self.tenant:
self._logger.info('Ensuring executions are stopped.')
cancelled = []
execs = self._infra_client.executions.list()
for execution in execs:
if execution['workflow_id'] != (
'create_deployment_environment'
):
self._logger.info(
'Ensuring %s (%s) is not running.',
execution['id'],
execution['workflow_id'],
)
self._infra_client.executions.cancel(
execution['id'], force=True, kill=True
)
cancelled.append(execution['id'])
else:
self._logger.info(
'Skipping %s (%s).',
execution['id'],
execution['workflow_id'],
)
cancel_complete = []
for execution_id in cancelled:
self._logger.info('Checking {} is cancelled.'.format(
execution_id,
))
for _ in range(30):
execution = self._infra_client.executions.get(
execution_id)
self._logger.info('{} is in state {}.'.format(
execution_id,
execution['status'],
))
if execution['status'] == 'cancelled':
cancel_complete.append(execution_id)
break
else:
time.sleep(3)
cancel_failures = set(cancelled).difference(cancel_complete)
if cancel_failures:
self._logger.error(
'Teardown failed due to the following executions not '
'entering the correct state after kill-cancel: {}'.format(
', '.join(cancel_failures),
)
)
raise RuntimeError('Could not complete teardown.')
self._start_undeploy_test_vms()
self._finish_undeploy_test_vms()
self._logger.info('Uninstalling infrastructure')
util.run_blocking_execution(
self._infra_client, 'infrastructure', 'uninstall',
self._logger)
util.delete_deployment(self._infra_client, 'infrastructure',
self._logger)
self._logger.info('Deleting blueprints.')
for blueprint in self.blueprints:
self._logger.info('Deleting %s', blueprint)
self._infra_client.blueprints.delete(blueprint)
self._logger.info('Deleting plugins.')
plugins = self._infra_client.plugins.list()
for plugin in plugins:
if plugin["tenant_name"] != self.tenant:
self._logger.info(
'Skipping shared %s (%s)',
plugin['package_name'],
plugin['id'],
)
else:
self._logger.info(
'Deleting %s (%s)',
plugin['package_name'],
plugin['id'],
)
self._infra_client.plugins.delete(plugin['id'])
self._logger.info('Deleting tenant %s', self.tenant)
self._infra_client._client.headers[
CLOUDIFY_TENANT_HEADER] = 'default_tenant'
self._infra_client.tenants.delete(self.tenant)
self.tenant = None
def rsync_backup(self):
for instance in self.instances:
instance.rsync_backup()
self._logger.info('Waiting for instance %s to Rsync backup',
instance.image_name)
for instance in self.instances:
while not instance.async_command_is_complete('Rsync backup'):
time.sleep(3)
def _upload_secrets_to_infrastructure_manager(self):
self._logger.info(
'Uploading secrets to infrastructure manager.'
)
mappings = self._test_config.platform.get(
'secrets_mapping', {})
for secret_name, mapping in mappings.items():
self._infra_client.secrets.create(
secret_name, self._test_config.platform[mapping],
)
with open(self._ssh_key.public_key_path) as ssh_pubkey_handle:
ssh_pubkey = ssh_pubkey_handle.read()
self._infra_client.secrets.create(
"ssh_public_key", ssh_pubkey,
)
def _upload_plugins_to_infrastructure_manager(self):
plugin_details = self._test_config.platform
current_plugins = self._infra_client.plugins.list(_all_tenants=True)
if any(
plugin["package_name"] == plugin_details['plugin_package_name']
and re.match(r'{}'.format(plugin_details['plugin_version']),
plugin["package_version"])
for plugin in current_plugins
):
self._logger.info('Plugin already present.')
else:
raise RuntimeError(
'The manager must have a plugin called {}. '
'This should be uploaded with --visibility=global,'
'and match version regex: {}'.format(
plugin_details['plugin_package_name'],
plugin_details['plugin_version'],
)
)
def _upload_blueprints_to_infrastructure_manager(self):
self._logger.info(
'Uploading test blueprints to infrastructure manager.'
)
suffix = ''
if self.ipv6_net:
suffix = '{}-ipv6'.format(suffix)
if self.multi_net:
suffix = '{}-multi-net'.format(suffix)
self._infra_client.blueprints.upload(
util.get_resource_path(
'infrastructure_blueprints/{}/infrastructure{}.yaml'.format(
self._test_config['target_platform'],
suffix,
)
),
"infrastructure",
async_upload=True
)
util.wait_for_blueprint_upload(self._infra_client, "infrastructure")
self.blueprints.append('infrastructure')
test_vm_suffixes = ['']
if self.ipv6_net:
test_vm_suffixes.append('-ipv6')
elif self.multi_net:
test_vm_suffixes.append('-multi-net')
for suffix in test_vm_suffixes:
blueprint_id = "test_vm{}".format(suffix)
self._infra_client.blueprints.upload(
util.get_resource_path(
'infrastructure_blueprints/{}/vm{}.yaml'.format(
self._test_config['target_platform'],
suffix,
)
),
blueprint_id,
async_upload=True
)
util.wait_for_blueprint_upload(self._infra_client, blueprint_id)
self.blueprints.append('test_vm{}'.format(suffix))
def _deploy_test_infrastructure(self, test_identifier):
self._logger.info('Creating test infrastructure inputs.')
infrastructure_inputs = {'test_infrastructure_name': test_identifier}
mappings = self._test_config.platform.get(
'infrastructure_inputs_mapping', {})
for blueprint_input, mapping in mappings.items():
infrastructure_inputs[blueprint_input] = (
self._test_config.platform[mapping]
)
# Written to disk to aid in troubleshooting
infrastructure_inputs_path = self._tmpdir / 'infra_inputs.yaml'
with open(infrastructure_inputs_path, 'w') as inp_handle:
inp_handle.write(json.dumps(infrastructure_inputs))
self._logger.info(
'Creating test infrastructure using infrastructure manager.'
)
util.create_deployment(
self._infra_client, 'infrastructure', 'infrastructure',
self._logger, inputs=infrastructure_inputs,
)
self.deployments.append('infrastructure')
util.run_blocking_execution(
self._infra_client, "infrastructure", "install", self._logger)
if self.multi_net:
network_mappings = {}
if self._test_config['target_platform'] == 'aws':
cidr = 'CidrBlock'
else:
cidr = 'cidr'
for sn in range(1, 4):
subnet_details = self._infra_client.nodes.get(
deployment_id='infrastructure',
node_id='test_subnet_{}'.format(sn)
)['properties']['resource_config']
network_mappings['network_{}'.format(sn)] = ip_network(
# Has to be unicode for ipaddress library.
# Converting like this for py3 compat
u'{}'.format(subnet_details[cidr]),
)
self.network_mappings = network_mappings
if self._test_config['target_platform'] == 'aws':
self._populate_aws_platform_properties()
def _start_deploy_test_vm(self, image_id, index, test_identifier,
is_manager):
self._logger.info(
'Preparing to deploy instance %d of image %s',
index,
image_id,
)
vm_id = 'vm_{}_{}'.format(
image_id
.replace(' ', '_')
.replace('(', '_')
.replace(')', '_')
# Openstack drop the part that contains '.' when generate the name
# This to replace '.' with '-'
.replace('.', '-'),
index,
)
self._logger.info('Creating test VM inputs for %s_%d',
image_id, index)
vm_inputs = {
'test_infrastructure_name': test_identifier,
'userdata': self.instances[index].userdata,
'flavor': self.server_flavor,
}
if self._test_config['target_platform'] == 'openstack':
vm_inputs['floating_network_id'] = (
self._test_config['openstack']['floating_network_id']
)
vm_inputs['image'] = image_id
elif self._test_config['target_platform'] == 'aws':
vm_inputs.update(self._platform_resource_ids)
if self.multi_net:
use_net = self.vm_net_mappings.get(index, 1)
if use_net > 1:
key = 'subnet_{}_id'.format(use_net)
else:
key = 'subnet_id'
vm_inputs['subnet_id'] = self._platform_resource_ids[key]
if is_manager:
vm_inputs['name_filter'] = {
"Name": "tag:Name",
"Values": [image_id]
}
vm_inputs['image_owner'] = self._test_config['aws'][
'named_image_owners']
else:
vm_inputs['image_id'] = image_id
blueprint_id = 'test_vm'
if self.multi_net:
if index in self.vm_net_mappings:
vm_inputs['use_net'] = self.vm_net_mappings.get(index, 1)
if self._test_config['target_platform'] == 'aws':
vm_inputs.pop('subnet_2_id')
vm_inputs.pop('subnet_3_id')
else:
blueprint_id = blueprint_id + '-multi-net'
# Dumped to file to aid in troubleshooting
vm_inputs_path = self._tmpdir / '{}_{}.yaml'.format(vm_id, index)
with open(vm_inputs_path, 'w') as inp_handle:
inp_handle.write(json.dumps(vm_inputs))
self._logger.info('Deploying instance %d of %s', index, image_id)
util.create_deployment(
self._infra_client, blueprint_id, vm_id, self._logger,
inputs=vm_inputs,
)
self.deployments.append(vm_id)
self._test_vm_installs[vm_id] = (
self._infra_client.executions.start(
vm_id, 'install',
),
index,
)
def _populate_aws_platform_properties(self):
self._logger.info('Retrieving AWS resource IDs')
resource_ids = {}
subnet = util.get_node_instances(
'test_subnet_1', 'infrastructure', self._infra_client)[0]
resource_ids['subnet_id'] = subnet['runtime_properties'][
'aws_resource_id']
if self.multi_net:
subnet_2 = util.get_node_instances(
'test_subnet_2', 'infrastructure', self._infra_client)[0]
resource_ids['subnet_2_id'] = subnet_2['runtime_properties'][
'aws_resource_id']
subnet_3 = util.get_node_instances(
'test_subnet_3', 'infrastructure', self._infra_client)[0]
resource_ids['subnet_3_id'] = subnet_3['runtime_properties'][
'aws_resource_id']
vpc = util.get_node_instances(
'vpc', 'infrastructure', self._infra_client)[0]
resource_ids['vpc_id'] = vpc['runtime_properties']['aws_resource_id']
security_group = util.get_node_instances(
'security_group', 'infrastructure', self._infra_client)[0]
resource_ids['security_group_id'] = security_group[
'runtime_properties']['aws_resource_id']
self._platform_resource_ids = resource_ids
def _finish_deploy_test_vms(self):
node_instances = {}
for vm_id, details in self._test_vm_installs.items():
execution, index = details
util.wait_for_execution(self._infra_client, execution,
self._logger)
self._logger.info('Retrieving deployed instance details.')
node_instance = util.get_node_instances('test_host', vm_id,
self._infra_client)[0]
self._logger.info('Storing instance details.')
self._update_instance(
index,
node_instance,
)
node_instances[index] = node_instance
if self.ipv6_net:
self._disable_ipv4(node_instances)
def _start_undeploy_test_vms(self):
# Operate on all deployments except the infrastructure one
for vm_id in self.deployments[1:]:
self._logger.info('Uninstalling %s', vm_id)
self._test_vm_uninstalls[vm_id] = (
self._infra_client.executions.start(
vm_id, 'uninstall',
)
)
def _finish_undeploy_test_vms(self):
for vm_id, execution in self._test_vm_uninstalls.items():
util.wait_for_execution(self._infra_client, execution,
self._logger)
# Do this separately to cope with large deployment counts and small
# mgmtworker worker counts
for vm_id, execution in self._test_vm_uninstalls.items():
util.delete_deployment(self._infra_client, vm_id,
self._logger)
def _update_instance(self, server_index, node_instance):
instance = self.instances[server_index]
runtime_props = node_instance['runtime_properties']
public_ip_address = runtime_props['public_ip_address']
private_ip_address = runtime_props['ipv6_address'] if self.ipv6_net \
else runtime_props['ip']
node_instance_id = node_instance['id']
deployment_id = node_instance['deployment_id']
id_key = 'id'
if self._test_config['target_platform'] == 'aws':
id_key = 'aws_resource_id'
server_id = runtime_props[id_key]
networks = {}
if self.multi_net:
# Filter out public IPs from ipv4 addresses
ipv4_addresses = sorted([
# Has to be unicode for ipaddress library.
# Converting like this for py3 compat
ip_address(u'{}'.format(addr))
for addr in runtime_props['ipv4_addresses']
])
for ip in ipv4_addresses:
for net_name, network in self.network_mappings.items():
if ip in network:
networks[net_name] = str(ip)
break
instance.assign(
public_ip_address,
private_ip_address,
networks,
self._ssh_key,
self._logger,
self._tmpdir,
node_instance_id,
deployment_id,
server_id,
server_index,
)
def _disable_ipv4(self, node_instances):
self._logger.info('Disabling IPv4 on private interfaces.')
# This code needs to be run when all of the cluster VMs are already set
# up and running. This is because we must know IP addresses of all of
# the nodes in order to disable IPv4 communication across the cluster.
for server_index, node_instance in node_instances.items():
instance = self.instances[server_index]
instance.wait_for_ssh()
for ip in [ni['runtime_properties']['ip']
for i, ni in node_instances.items()
if i != server_index]:
self._logger.info(
'Poisoning ARP to disable IPv4 communication {0}->{1}.'
.format(node_instance['runtime_properties']['ip'], ip))
instance.run_command('sudo arp -s {0} de:ad:be:ef:ca:fe'
.format(ip))
|
|
import hashlib
import logging
import os
import tempfile
import pickle
import random
import shutil
import string
import sys
class BlockMD5(object):
def __init__(self):
return None
def compare_blocks(offset, name1, name2):
'''compare two files byte-by-byte'''
info = os.stat(name1)
fd1 = os.open(name1, os.O_RDONLY)
fd2 = os.open(name2, os.O_RDONLY)
os.lseek(fd1, offset, os.SEEK_SET)
os.lseek(fd2, offset, os.SEEK_SET)
buf1 = os.read(fd1, info.st_blksize)
buf2 = os.read(fd2, info.st_blksize)
os.close(fd1)
os.close(fd2)
for i in range(0, info.st_blksize):
if buf1[i] != buf2[i]:
print("Mismatch at byte_num '{0}': {1}, {2}".format(
i, buf1[i], buf2[i]))
return
def create_map(self, name):
'''Create a per block md5sum of a file
and return a dict of block->md5hashes'''
info = os.stat(name)
left = info.st_size
fd = os.open(name, os.O_RDONLY)
offset = 0
mapd = {}
while left > 0:
buf = os.read(fd, info.st_blksize)
left -= len(buf)
h5 = hashlib.md5(buf)
mapd[offset] = h5.hexdigest()
offset += len(buf)
os.close(fd)
return mapd
def validate_map(self, name, mapd):
'''Compares the block md5sums to each block of the file'''
failed = []
info = os.stat(name)
fd = os.open(name, os.O_RDONLY)
# O_DIRECT didn't work on my test system, but worked on a GPFS filesystem
#fd = os.open(name, os.O_RDONLY+os.O_DIRECT)
left = info.st_size
offset = 0
while left > 0:
buf = os.read(fd, info.st_blksize)
left -= len(buf)
h5 = hashlib.md5(buf)
digest = h5.hexdigest()
if digest != mapd[offset]:
failed.append((offset, digest, mapd[offset]))
offset += len(buf)
os.close(fd)
if len(failed) > 0:
return False, failed
else:
return True
class FileMD5(object):
def __init__(self, loglvl='info'):
if loglvl == 'verbose':
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(message)s', level=logging.INFO)
return None
def create_md5(self, name):
with open(name, 'rb') as f:
md5sum = hashlib.md5(f.read()).hexdigest()
return md5sum
def validate_md5(self, name, md5sum):
logging.debug("DEBUG: FileMD5().validate_md5({0}, {1})".format(name, md5sum))
with open(name, 'rb') as f:
current_md5 = hashlib.md5(f.read()).hexdigest()
if current_md5 != md5sum:
return False, (current_md5, md5sum)
else:
return True
class FileTree(object):
def __init__(self):
'''Set defaults'''
self.aligned = True
self.dirs_per_level = 1
self.files_per_dir = 1
self.fixed_size = False
self.loglvl = 'info'
self.max_size = 8192
self.num_levels = 1
self.stats = False
self.suffix = ''
self.topdir = None
return None
def set_config(self, kwargs):
'''Set class configuration'''
for k, v in kwargs.items():
setattr(self, k, v)
# get the blocksize
vfsstats = os.statvfs(self.topdir)
self.bufsize = vfsstats.f_bsize
# set logging
if self.loglvl == 'verbose' or self.loglvl == 'debug':
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(message)s', level=logging.INFO)
return
def _free_space(self, num_bytes):
'''Checks to see if there is enough space on the filesystem'''
vfsstats = os.statvfs(os.path.dirname(os.path.abspath(self.topdir)))
bytes_free = vfsstats.f_ffree * vfsstats.f_bfree
logging.debug("DEBUG: Bytes_to_write: {0}, Bytes_Free: {1}".format(
num_bytes, bytes_free))
if num_bytes > bytes_free:
return False
return True
def _path_exists(self, filepath):
'''Checks to see if the path exists'''
if not os.path.isdir(os.path.dirname(filepath)):
return False
return True
def _sub_tree(self, path, levels, dirs_per_level):
'''should be called recursively to generate names of levels of dirs'''
for n in range(dirs_per_level):
dirname = "L{0}D{1}".format(levels, n)
newdir = os.path.join(path, dirname)
self.dirs.append(newdir)
for nl in range(levels):
self._sub_tree(newdir, nl, dirs_per_level)
def gen_dir_array(self, topdir, levels, dirs_per_level):
'''Generate the directory hierarchy array all at once
I won't lie, I'm basically recreating (poor attempt anyway)
fdtree in Python:
https://computing.llnl.gov/?set=code&page=sio_downloads
'''
# make an array of directory paths
self.dirs = []
# this will start recursively calling itself until
# you've reached the end (num_levels)
self._sub_tree(topdir, levels, dirs_per_level)
return
def queue_walk_tree(self, path, tasks=2):
'''import modules we wouldn't have normally used'''
#import multiprocessing
return
def random_name(self, size=10, chars=string.ascii_lowercase + string.digits):
'''return a random name'''
rname = ''.join(random.choice(chars) for x in range(size))
rname += self.suffix
return rname
def serial_create_dir_tree(self):
'''Create a directory tree'''
for d in self.dirs:
if not os.path.exists(d):
os.makedirs(d)
return
def serial_delete_dirs(self):
'''Delete the FileTree root dir'''
for d in self.dirs:
if os.path.exists(d):
shutil.rmtree(d)
return
def serial_populate_dir_tree(self):
'''Write data files in serial to the directory tree'''
for d in self.dirs:
for f in range(self.files_per_dir):
name = self.random_name()
filename = os.path.join(d, name)
result, err = self.write_file(filename)
if not result:
print(err)
break
return
def walk_tree_generator(self, path):
'''
Returns a generator that can be used to walk a directory
tree
You can then make a list of all files via:
files = []
for dir in walk:
for f in dir[2]:
files.append("{0}/{1}".format(dir[0], f))
Then use that for whatever...
'''
walk = os.walk(path)
return walk
def write_file(self, filename):
'''Create a number of random files in a directory tree of varying size'''
# the number of bytes written is a multiple of the fs blocksize
if self.fixed_size:
num_bytes = self.max_size
elif self.aligned and not self.fixed_size:
num_bytes = random.randrange(self.bufsize,
stop=self.max_size, step=self.bufsize)
# pick a random bytesize between 0 and max_size
else:
num_bytes = random.randrange(1, self.max_size)
# check to see if enough space is available
if not self._free_space(num_bytes):
return False, "Not enough space to write data."
# check to see if path exists
if not self._path_exists(filename):
return False, "Directory does not exist."
# figure out how many chunks we need to write
bytes_left = num_bytes
# write out the random data
logging.debug("DEBUG: {0}.{1}(): Writing file: {2}".format(
self.__class__.__name__, self.write_file.__name__, filename))
with open(filename, 'wb') as f:
try:
while bytes_left > 0:
if bytes_left < self.bufsize:
f.write(os.urandom(bytes_left))
bytes_left -= self.bufsize
else:
f.write(os.urandom(self.bufsize))
bytes_left -= self.bufsize
except IOError as ioe:
print("IOError: {0}".format(ioe))
print("We bail on IO Errors...")
sys.exit(1)
return True, "Success"
# for when you don't want to use the FileTree class,
# and simply want to create a random file
def create_random_file(name, numbytes):
'''writes out a file full of random data'''
path = os.path.dirname(os.path.abspath(name))
vfsstats = os.statvfs(path)
# dont write the file if there isn't enough free space on the filesystem
if numbytes > (vfsstats.f_ffree * vfsstats.f_bfree):
print("Not enough space to write data.")
return
bufsize = vfsstats.f_bsize
if numbytes % bufsize != 0:
print("Number of bytes must be a multiple of blocksize ({0})".format(
bufsize))
return
bytes_left = numbytes
with open(name, 'wb') as f:
while bytes_left > 0:
f.write(os.urandom(bufsize))
bytes_left -= bufsize
return
|
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from functools import partial
start = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end = end_base + pd.Timedelta(days=365*10) # <JDG> increased to 1 year to 10 for long dated options
def canonicalize_datetime(dt):
# Strip out any HHMMSS or timezone info in the user's datetime, so that
# all the datetimes we return will be 00:00:00 UTC.
return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc)
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(
rrule.MONTHLY,
byyearday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_sunday)
mlk_day = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
byweekday=(rrule.MO(+3)),
cache=True,
dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(mlk_day)
presidents_day = rrule.rrule(
rrule.MONTHLY,
bymonth=2,
byweekday=(rrule.MO(3)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(presidents_day)
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
memorial_day = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=(rrule.MO(-1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(memorial_day)
july_4th = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=4,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th)
july_4th_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_sunday)
july_4th_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_saturday)
labor_day = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
byweekday=(rrule.TH(4)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
christmas_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_sunday)
# If Christmas is a Saturday then 24th, a Friday is observed.
christmas_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# Add September 11th closings
# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks
# Due to the terrorist attacks, the stock market did not open on 9/11/2001
# It did not open again until 9/17/2001.
#
# September 2001
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
for day_num in range(11, 17):
non_trading_days.append(
datetime(2001, 9, day_num, tzinfo=pytz.utc))
# Add closings due to Hurricane Sandy in 2012
# http://en.wikipedia.org/wiki/Hurricane_sandy
#
# The stock exchange was closed due to Hurricane Sandy's
# impact on New York.
# It closed on 10/29 and 10/30, reopening on 10/31
# October 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
for day_num in range(29, 31):
non_trading_days.append(
datetime(2012, 10, day_num, tzinfo=pytz.utc))
# Misc closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# National Days of Mourning
# - President Richard Nixon
non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc))
# - President Ronald W. Reagan - June 11, 2004
non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc))
# - President Gerald R. Ford - Jan 2, 2007
non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
def get_early_closes(start, end):
# 1:00 PM close rules based on
# http://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa
# and verified against http://www.nyse.com/pdfs/closings.pdf
# These rules are valid starting in 1993
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
day_after_thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
# 4th Friday isn't correct if month starts on Friday, so restrict to
# day range:
byweekday=(rrule.FR),
bymonthday=range(23, 30),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_after_thanksgiving)
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
friday_after_christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# valid 1993-2007
until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(friday_after_christmas)
day_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=(rrule.MO, rrule.TU, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_before_independence_day)
day_after_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# starting in 2013: wednesday before independence day
until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(day_after_independence_day)
wednesday_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.WE,
cache=True,
# starting in 2013
dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)),
until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc))
)
early_close_rules.append(wednesday_before_independence_day)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
# Misc early closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# New Year's Eve
nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc)
if start <= nye_1999 and nye_1999 <= end:
early_closes.append(nye_1999)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=9,
minute=31),
tz='US/Eastern').tz_convert('UTC')
# 1 PM if early close, 4 PM otherwise
close_hour = 13 if day in early_closes else 16
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=close_hour),
tz='US/Eastern').tz_convert('UTC')
return market_open, market_close
def get_open_and_closes(trading_days, early_closes, get_open_and_close):
open_and_closes = pd.DataFrame(index=trading_days,
columns=('market_open', 'market_close'))
get_o_and_c = partial(get_open_and_close, early_closes=early_closes)
open_and_closes['market_open'], open_and_closes['market_close'] = \
zip(*open_and_closes.index.map(get_o_and_c))
return open_and_closes
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close)
|
|
from __future__ import unicode_literals
from django.db import models
from django_evolution.errors import SimulationFailure
from django_evolution.mutations import RenameField
from django_evolution.signature import (AppSignature,
ModelSignature,
ProjectSignature)
from django_evolution.tests.base_test_case import EvolutionTestCase
from django_evolution.tests.models import BaseTestModel
class RenameAnchor1(BaseTestModel):
value = models.IntegerField()
class RenameAnchor2(BaseTestModel):
value = models.IntegerField()
class Meta(BaseTestModel.Meta):
db_table = 'custom_rename_anchor_table'
class RenameAnchor3(BaseTestModel):
value = models.IntegerField()
class RenameFieldBaseModel(BaseTestModel):
char_field = models.CharField(max_length=20)
int_field = models.IntegerField()
int_field_named = models.IntegerField(db_column='custom_db_col_name')
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed',
db_index=True)
fk_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
m2m_field = models.ManyToManyField(RenameAnchor2)
m2m_field_named = models.ManyToManyField(
RenameAnchor3, db_table='non-default_db_table')
class CustomRenameTableModel(BaseTestModel):
value = models.IntegerField()
alt_value = models.CharField(max_length=20)
class Meta(BaseTestModel.Meta):
db_table = 'custom_rename_table_name'
class RenameFieldTests(EvolutionTestCase):
"""Unit tests for RenameField mutations."""
sql_mapping_key = 'rename_field'
default_base_model = RenameFieldBaseModel
default_extra_models = [
('RenameAnchor1', RenameAnchor1),
('RenameAnchor2', RenameAnchor2),
('RenameAnchor3', RenameAnchor3),
]
def default_create_test_data(self, db_name):
"""Create test data for the base model.
Args:
db_name (unicode):
The name of the database to create models on.
"""
anchor1 = RenameAnchor1.objects.using(db_name).create(value=100)
anchor2 = RenameAnchor2.objects.using(db_name).create(value=200)
anchor3 = RenameAnchor3.objects.using(db_name).create(value=300)
model = RenameFieldBaseModel.objects.using(db_name).create(
char_field='test',
int_field=1,
int_field_named=2,
int_field_named_indexed=3,
fk_field=anchor1)
model.m2m_field.add(anchor2)
model.m2m_field_named.add(anchor3)
def test_with_bad_app(self):
"""Testing RenameField with application not in signature"""
mutation = RenameField('TestModel', 'char_field1', 'char_field2')
message = (
'Cannot rename the field "char_field1" on model '
'"badapp.TestModel". The application could not be found in the '
'signature.'
)
with self.assertRaisesMessage(SimulationFailure, message):
mutation.run_simulation(app_label='badapp',
project_sig=ProjectSignature(),
database_state=None)
def test_with_bad_model(self):
"""Testing RenameField with model not in signature"""
mutation = RenameField('TestModel', 'char_field1', 'char_field2')
project_sig = ProjectSignature()
project_sig.add_app_sig(AppSignature(app_id='tests'))
message = (
'Cannot rename the field "char_field1" on model '
'"tests.TestModel". The model could not be found in the '
'signature.'
)
with self.assertRaisesMessage(SimulationFailure, message):
mutation.run_simulation(app_label='tests',
project_sig=project_sig,
database_state=None)
def test_with_bad_field(self):
"""Testing RenameField with field not in signature"""
mutation = RenameField('TestModel', 'char_field1', 'char_field2')
model_sig = ModelSignature(model_name='TestModel',
table_name='tests_testmodel')
app_sig = AppSignature(app_id='tests')
app_sig.add_model_sig(model_sig)
project_sig = ProjectSignature()
project_sig.add_app_sig(app_sig)
message = (
'Cannot rename the field "char_field1" on model '
'"tests.TestModel". The field could not be found in the '
'signature.'
)
with self.assertRaisesMessage(SimulationFailure, message):
mutation.run_simulation(app_label='tests',
project_sig=project_sig,
database_state=None)
def test_rename(self):
"""Testing RenameField"""
class DestModel(BaseTestModel):
char_field = models.CharField(max_length=20)
renamed_field = models.IntegerField()
int_field_named = models.IntegerField(
db_column='custom_db_col_name')
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed', db_index=True)
fk_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
m2m_field = models.ManyToManyField(RenameAnchor2)
m2m_field_named = models.ManyToManyField(
RenameAnchor3, db_table='non-default_db_table')
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'int_field', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'int_field' has been deleted"),
[
"AddField('TestModel', 'renamed_field', models.IntegerField,"
" initial=<<USER VALUE REQUIRED>>)",
"DeleteField('TestModel', 'int_field')",
],
'RenameColumnModel')
def test_rename_with_custom_table_non_m2m_ignored(self):
"""Testing RenameField with custom table name for non-ManyToManyField
is ignored
"""
class DestModel(BaseTestModel):
char_field = models.CharField(max_length=20)
renamed_field = models.IntegerField()
int_field_named = models.IntegerField(
db_column='custom_db_col_name')
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed', db_index=True)
fk_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
m2m_field = models.ManyToManyField(RenameAnchor2)
m2m_field_named = models.ManyToManyField(
RenameAnchor3, db_table='non-default_db_table')
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'int_field', 'renamed_field',
db_table='ignored_db-table'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'int_field' has been deleted"),
[
"AddField('TestModel', 'renamed_field', models.IntegerField,"
" initial=<<USER VALUE REQUIRED>>)",
"DeleteField('TestModel', 'int_field')",
],
'RenameColumnWithTableNameModel')
def test_rename_with_primary_key(self):
"""Testing RenameField with primary key"""
class DestModel(BaseTestModel):
my_pk_id = models.AutoField(primary_key=True)
char_field = models.CharField(max_length=20)
int_field = models.IntegerField()
int_field_named = models.IntegerField(
db_column='custom_db_col_name')
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed', db_index=True)
fk_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
m2m_field = models.ManyToManyField(RenameAnchor2)
m2m_field_named = models.ManyToManyField(
RenameAnchor3, db_table='non-default_db_table')
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'id', 'my_pk_id'),
],
("In model tests.TestModel:\n"
" Field 'my_pk_id' has been added\n"
" Field 'id' has been deleted"),
[
"AddField('TestModel', 'my_pk_id', models.AutoField,"
" initial=<<USER VALUE REQUIRED>>, primary_key=True)",
"DeleteField('TestModel', 'id')",
],
'RenamePrimaryKeyColumnModel')
def test_rename_with_foreign_key(self):
"""Testing RenameField with ForeignKey"""
class DestModel(BaseTestModel):
char_field = models.CharField(max_length=20)
int_field = models.IntegerField()
int_field_named = models.IntegerField(
db_column='custom_db_col_name')
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed', db_index=True)
renamed_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
m2m_field = models.ManyToManyField(RenameAnchor2)
m2m_field_named = models.ManyToManyField(
RenameAnchor3, db_table='non-default_db_table')
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'fk_field', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'fk_field' has been deleted"),
[
"AddField('TestModel', 'renamed_field', models.ForeignKey,"
" initial=<<USER VALUE REQUIRED>>,"
" related_model='tests.RenameAnchor1')",
"DeleteField('TestModel', 'fk_field')",
],
perform_mutations=False)
def test_rename_with_custom_column_name(self):
"""Testing RenameField with custom column name"""
class DestModel(BaseTestModel):
char_field = models.CharField(max_length=20)
int_field = models.IntegerField()
renamed_field = models.IntegerField()
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed', db_index=True)
fk_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
m2m_field = models.ManyToManyField(RenameAnchor2)
m2m_field_named = models.ManyToManyField(
RenameAnchor3, db_table='non-default_db_table')
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'int_field_named', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'int_field_named' has been deleted"),
[
"AddField('TestModel', 'renamed_field', models.IntegerField,"
" initial=<<USER VALUE REQUIRED>>)",
"DeleteField('TestModel', 'int_field_named')",
],
'RenameNonDefaultColumnNameModel')
def test_rename_custom_column_name_to_new_custom_name(self):
"""Testing RenameField with custom column name to a new custom column
name
"""
class DestModel(BaseTestModel):
char_field = models.CharField(max_length=20)
int_field = models.IntegerField()
renamed_field = models.IntegerField(
db_column='non-default_column_name')
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed', db_index=True)
fk_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
m2m_field = models.ManyToManyField(RenameAnchor2)
m2m_field_named = models.ManyToManyField(
RenameAnchor3, db_table='non-default_db_table')
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'int_field_named', 'renamed_field',
db_column='non-default_column_name'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'int_field_named' has been deleted"),
[
"AddField('TestModel', 'renamed_field', models.IntegerField,"
" db_column='non-default_column_name',"
" initial=<<USER VALUE REQUIRED>>)",
"DeleteField('TestModel', 'int_field_named')",
],
'RenameNonDefaultColumnNameToNonDefaultNameModel')
def test_rename_with_custom_column_and_table_names(self):
"""Testing RenameField with custom column and ignored
custom table name
"""
class DestModel(BaseTestModel):
char_field = models.CharField(max_length=20)
int_field = models.IntegerField()
renamed_field = models.IntegerField(
db_column='non-default_column_name2')
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed', db_index=True)
fk_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
m2m_field = models.ManyToManyField(RenameAnchor2)
m2m_field_named = models.ManyToManyField(
RenameAnchor3, db_table='non-default_db_table')
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'int_field_named', 'renamed_field',
db_column='non-default_column_name2',
db_table='custom_ignored_db-table'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'int_field_named' has been deleted"),
[
"AddField('TestModel', 'renamed_field', models.IntegerField,"
" db_column='non-default_column_name2',"
" initial=<<USER VALUE REQUIRED>>)",
"DeleteField('TestModel', 'int_field_named')",
],
'RenameNonDefaultColumnNameToNonDefaultNameAndTableModel')
def test_rename_in_custom_table_name(self):
"""Testing RenameField with custom table name"""
class DestModel(BaseTestModel):
renamed_field = models.IntegerField()
alt_value = models.CharField(max_length=20)
class Meta(BaseTestModel.Meta):
db_table = 'custom_rename_table_name'
def create_test_data(db_name):
CustomRenameTableModel.objects.create(value=1,
alt_value='test')
self.set_base_model(CustomRenameTableModel,
name='CustomRenameTableModel')
self.perform_evolution_tests(
DestModel,
[
RenameField('CustomRenameTableModel', 'value',
'renamed_field'),
],
("In model tests.CustomRenameTableModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'value' has been deleted"),
[
"AddField('CustomRenameTableModel', 'renamed_field',"
" models.IntegerField, initial=<<USER VALUE REQUIRED>>)",
"DeleteField('CustomRenameTableModel', 'value')",
],
'RenameColumnCustomTableModel',
model_name='CustomRenameTableModel',
create_test_data_func=create_test_data)
def test_rename_m2m_table(self):
"""Testing RenameField with renaming ManyToManyField table name"""
class DestModel(BaseTestModel):
char_field = models.CharField(max_length=20)
int_field = models.IntegerField()
int_field_named = models.IntegerField(
db_column='custom_db_col_name')
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed', db_index=True)
fk_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
renamed_field = models.ManyToManyField(RenameAnchor2)
m2m_field_named = models.ManyToManyField(
RenameAnchor3, db_table='non-default_db_table')
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'm2m_field', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'm2m_field' has been deleted"),
[
"AddField('TestModel', 'renamed_field',"
" models.ManyToManyField,"
" related_model='tests.RenameAnchor2')",
"DeleteField('TestModel', 'm2m_field')",
],
'RenameManyToManyTableModel')
def test_rename_m2m_db_column_ignored(self):
"""Testing RenameField with renaming ManyToManyField db_column is
ignored
"""
class DestModel(BaseTestModel):
char_field = models.CharField(max_length=20)
int_field = models.IntegerField()
int_field_named = models.IntegerField(
db_column='custom_db_col_name')
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed', db_index=True)
fk_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
renamed_field = models.ManyToManyField(RenameAnchor2)
m2m_field_named = models.ManyToManyField(
RenameAnchor3, db_table='non-default_db_table')
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'm2m_field', 'renamed_field',
db_column='ignored_db-column'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'm2m_field' has been deleted"),
[
"AddField('TestModel', 'renamed_field',"
" models.ManyToManyField,"
" related_model='tests.RenameAnchor2')",
"DeleteField('TestModel', 'm2m_field')",
],
'RenameManyToManyTableWithColumnNameModel')
def test_rename_m2m_custom_table_name_to_default(self):
"""Testing RenameField with renaming ManyToManyField custom table
name to default name
"""
class DestModel(BaseTestModel):
char_field = models.CharField(max_length=20)
int_field = models.IntegerField()
int_field_named = models.IntegerField(
db_column='custom_db_col_name')
int_field_named_indexed = models.IntegerField(
db_column='custom_db_col_name_indexed', db_index=True)
fk_field = models.ForeignKey(RenameAnchor1,
on_delete=models.CASCADE)
m2m_field = models.ManyToManyField(RenameAnchor2)
renamed_field = models.ManyToManyField(
RenameAnchor3)
self.perform_evolution_tests(
DestModel,
[
RenameField('TestModel', 'm2m_field_named', 'renamed_field'),
],
("In model tests.TestModel:\n"
" Field 'renamed_field' has been added\n"
" Field 'm2m_field_named' has been deleted"),
[
"AddField('TestModel', 'renamed_field',"
" models.ManyToManyField,"
" related_model='tests.RenameAnchor3')",
"DeleteField('TestModel', 'm2m_field_named')",
],
'RenameNonDefaultManyToManyTableModel')
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cross-language tests for the JWT primitives."""
import datetime
import json
from typing import Iterable
from absl.testing import absltest
from absl.testing import parameterized
import tink
from tink import jwt
from util import supported_key_types
from util import testing_servers
SUPPORTED_LANGUAGES = testing_servers.SUPPORTED_LANGUAGES_BY_PRIMITIVE['jwt']
def setUpModule():
testing_servers.start('jwt')
def tearDownModule():
testing_servers.stop()
def all_jwt_mac_key_template_names() -> Iterable[str]:
"""Yields all JWT MAC key template names."""
for key_type in supported_key_types.JWT_MAC_KEY_TYPES:
for key_template_name in supported_key_types.KEY_TEMPLATE_NAMES[key_type]:
yield key_template_name
def all_jwt_signature_key_template_names() -> Iterable[str]:
"""Yields all JWT signature key template names."""
for key_type in supported_key_types.JWT_SIGNATURE_KEY_TYPES:
for key_template_name in supported_key_types.KEY_TEMPLATE_NAMES[key_type]:
yield key_template_name
class JwtTest(parameterized.TestCase):
@parameterized.parameters(all_jwt_mac_key_template_names())
def test_compute_verify_jwt_mac(self, key_template_name):
supported_langs = supported_key_types.SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[
key_template_name]
self.assertNotEmpty(supported_langs)
key_template = supported_key_types.KEY_TEMPLATE[key_template_name]
# Take the first supported language to generate the keyset.
keyset = testing_servers.new_keyset(supported_langs[0], key_template)
supported_jwt_macs = [
testing_servers.jwt_mac(lang, keyset) for lang in supported_langs
]
unsupported_jwt_macs = [
testing_servers.jwt_mac(lang, keyset)
for lang in SUPPORTED_LANGUAGES
if lang not in supported_langs
]
now = datetime.datetime.now(tz=datetime.timezone.utc)
raw_jwt = jwt.new_raw_jwt(
issuer='issuer',
expiration=now + datetime.timedelta(seconds=100))
for p in supported_jwt_macs:
compact = p.compute_mac_and_encode(raw_jwt)
validator = jwt.new_validator(expected_issuer='issuer', fixed_now=now)
for p2 in supported_jwt_macs:
verified_jwt = p2.verify_mac_and_decode(compact, validator)
self.assertEqual(verified_jwt.issuer(), 'issuer')
for p2 in unsupported_jwt_macs:
with self.assertRaises(
tink.TinkError,
msg='%s supports verify_mac_and_decode with %s unexpectedly'
% (p2.lang, key_template_name)):
p2.verify_mac_and_decode(compact, validator)
for p in unsupported_jwt_macs:
with self.assertRaises(
tink.TinkError,
msg='%s supports compute_mac_and_encode with %s unexpectedly' %
(p.lang, key_template_name)):
p.compute_mac_and_encode(raw_jwt)
@parameterized.parameters(all_jwt_signature_key_template_names())
def test_jwt_public_key_sign_verify(self, key_template_name):
supported_langs = supported_key_types.SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[
key_template_name]
self.assertNotEmpty(supported_langs)
key_template = supported_key_types.KEY_TEMPLATE[key_template_name]
# Take the first supported language to generate the private keyset.
private_keyset = testing_servers.new_keyset(supported_langs[0],
key_template)
supported_signers = [
testing_servers.jwt_public_key_sign(lang, private_keyset)
for lang in supported_langs
]
unsupported_signers = [
testing_servers.jwt_public_key_sign(lang, private_keyset)
for lang in SUPPORTED_LANGUAGES
if lang not in supported_langs
]
public_keyset = testing_servers.public_keyset('java', private_keyset)
supported_verifiers = [
testing_servers.jwt_public_key_verify(lang, public_keyset)
for lang in supported_langs
]
unsupported_verifiers = [
testing_servers.jwt_public_key_verify(lang, public_keyset)
for lang in SUPPORTED_LANGUAGES
if lang not in supported_langs
]
now = datetime.datetime.now(tz=datetime.timezone.utc)
raw_jwt = jwt.new_raw_jwt(
issuer='issuer',
expiration=now + datetime.timedelta(seconds=100))
for signer in supported_signers:
compact = signer.sign_and_encode(raw_jwt)
validator = jwt.new_validator(expected_issuer='issuer', fixed_now=now)
for verifier in supported_verifiers:
verified_jwt = verifier.verify_and_decode(compact, validator)
self.assertEqual(verified_jwt.issuer(), 'issuer')
for verifier in unsupported_verifiers:
with self.assertRaises(
tink.TinkError,
msg='%s supports jwt_public_key_verify with %s unexpectedly' %
(verifier.lang, key_template_name)):
verifier.verify_and_decode(compact, validator)
for signer in unsupported_signers:
with self.assertRaises(
tink.TinkError,
msg='%s supports jwt_public_key_sign with %s unexpectedly' %
(signer.lang, key_template_name)):
_ = signer.sign_and_encode(raw_jwt)
@parameterized.parameters(all_jwt_signature_key_template_names())
def test_jwt_public_key_sign_export_import_verify(self, key_template_name):
supported_langs = supported_key_types.SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[
key_template_name]
self.assertNotEmpty(supported_langs)
key_template = supported_key_types.KEY_TEMPLATE[key_template_name]
# Take the first supported language to generate the private keyset.
private_keyset = testing_servers.new_keyset(supported_langs[0],
key_template)
now = datetime.datetime.now(tz=datetime.timezone.utc)
raw_jwt = jwt.new_raw_jwt(
issuer='issuer', expiration=now + datetime.timedelta(seconds=100))
validator = jwt.new_validator(expected_issuer='issuer', fixed_now=now)
for lang1 in supported_langs:
# in lang1: sign token and export public keyset to a JWK set
signer = testing_servers.jwt_public_key_sign(lang1, private_keyset)
compact = signer.sign_and_encode(raw_jwt)
public_keyset = testing_servers.public_keyset(lang1, private_keyset)
public_jwk_set = testing_servers.jwk_set_from_keyset(
lang1, public_keyset)
for lang2 in supported_langs:
# in lang2: import the public JWK set and verify the token
public_keyset = testing_servers.jwk_set_to_keyset(
lang2, public_jwk_set)
verifier = testing_servers.jwt_public_key_verify(lang2, public_keyset)
verified_jwt = verifier.verify_and_decode(compact, validator)
self.assertEqual(verified_jwt.issuer(), 'issuer')
# Additional tests for the "kid" property of the JWK and the "kid"
# header of the token. Either of them may be missing, but they must not
# have different values.
jwks = json.loads(public_jwk_set)
has_kid = 'kid' in jwks['keys'][0]
if has_kid:
# Change the "kid" property of the JWK.
jwks['keys'][0]['kid'] = 'unknown kid'
public_keyset = testing_servers.jwk_set_to_keyset(
lang2, json.dumps(jwks))
verifier = testing_servers.jwt_public_key_verify(lang2, public_keyset)
with self.assertRaises(
tink.TinkError,
msg='%s accepts tokens with an incorrect kid unexpectedly' %
lang2):
verifier.verify_and_decode(compact, validator)
# Remove the "kid" property of the JWK.
del jwks['keys'][0]['kid']
public_keyset = testing_servers.jwk_set_to_keyset(
lang2, json.dumps(jwks))
verifier = testing_servers.jwt_public_key_verify(lang2, public_keyset)
verified_jwt = verifier.verify_and_decode(compact, validator)
self.assertEqual(verified_jwt.issuer(), 'issuer')
else:
# Add a "kid" property of the JWK.
jwks['keys'][0]['kid'] = 'unknown kid'
public_keyset = testing_servers.jwk_set_to_keyset(
lang2, json.dumps(jwks))
verifier = testing_servers.jwt_public_key_verify(lang2, public_keyset)
verified_jwt = verifier.verify_and_decode(compact, validator)
self.assertEqual(verified_jwt.issuer(), 'issuer')
if __name__ == '__main__':
absltest.main()
|
|
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common logic to assign test cases to CI jobs.
Some background knowledge about Gitlab CI and use flow in esp-idf:
* Gitlab CI jobs are static in ``.gitlab-ci.yml``. We can't dynamically create test jobs
* For test job running on DUT, we use ``tags`` to select runners with different test environment
* We have ``assign_test`` stage, will collect cases, and then assign them to correct test jobs
* ``assign_test`` will fail if failed to assign any cases
* with ``assign_test``, we can:
* dynamically filter test case we want to test
* alert user if they forget to add CI jobs and guide how to add test jobs
* the last step of ``assign_test`` is to output config files, then test jobs will run these cases
The Basic logic to assign test cases is as follow:
1. do search all the cases
2. do filter case (if filter is specified by @bot)
3. put cases to different groups according to rule of ``Group``
* try to put them in existed groups
* if failed then create a new group and add this case
4. parse and filter the test jobs from CI config file
5. try to assign all groups to jobs according to tags
6. output config files for jobs
"""
import os
import re
import json
import yaml
from Utility import (CaseConfig, SearchCases, GitlabCIJob, console_log)
class Group(object):
MAX_EXECUTION_TIME = 30
MAX_CASE = 15
SORT_KEYS = ["env_tag"]
# Matching CI job rules could be different from the way we want to group test cases.
# For example, when assign unit test cases, different test cases need to use different test functions.
# We need to put them into different groups.
# But these groups can be assigned to jobs with same tags, as they use the same test environment.
CI_JOB_MATCH_KEYS = SORT_KEYS
def __init__(self, case):
self.execution_time = 0
self.case_list = [case]
self.filters = dict(zip(self.SORT_KEYS, [self._get_case_attr(case, x) for x in self.SORT_KEYS]))
# we use ci_job_match_keys to match CI job tags. It's a set of required tags.
self.ci_job_match_keys = set([self._get_case_attr(case, x) for x in self.CI_JOB_MATCH_KEYS])
@staticmethod
def _get_case_attr(case, attr):
# we might use different type for case (dict or test_func)
# this method will do get attribute form cases
return case.case_info[attr]
def accept_new_case(self):
"""
check if allowed to add any case to this group
:return: True or False
"""
max_time = (sum([self._get_case_attr(x, "execution_time") for x in self.case_list])
< self.MAX_EXECUTION_TIME)
max_case = (len(self.case_list) < self.MAX_CASE)
return max_time and max_case
def add_case(self, case):
"""
add case to current group
:param case: test case
:return: True if add succeed, else False
"""
added = False
if self.accept_new_case():
for key in self.filters:
if self._get_case_attr(case, key) != self.filters[key]:
break
else:
self.case_list.append(case)
added = True
return added
def output(self):
"""
output data for job configs
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
output_data = {
"Filter": self.filters,
"CaseConfig": [{"name": self._get_case_attr(x, "name")} for x in self.case_list],
}
return output_data
class AssignTest(object):
"""
Auto assign tests to CI jobs.
:param test_case_path: path of test case file(s)
:param ci_config_file: path of ``.gitlab-ci.yml``
"""
# subclass need to rewrite CI test job pattern, to filter all test jobs
CI_TEST_JOB_PATTERN = re.compile(r"^test_.+")
# by default we only run function in CI, as other tests could take long time
DEFAULT_FILTER = {
"category": "function",
"ignore": False,
}
def __init__(self, test_case_path, ci_config_file, case_group=Group):
self.test_case_path = test_case_path
self.test_cases = []
self.jobs = self._parse_gitlab_ci_config(ci_config_file)
self.case_group = case_group
def _parse_gitlab_ci_config(self, ci_config_file):
with open(ci_config_file, "r") as f:
ci_config = yaml.load(f)
job_list = list()
for job_name in ci_config:
if self.CI_TEST_JOB_PATTERN.search(job_name) is not None:
job_list.append(GitlabCIJob.Job(ci_config[job_name], job_name))
return job_list
def _search_cases(self, test_case_path, case_filter=None):
"""
:param test_case_path: path contains test case folder
:param case_filter: filter for test cases. the filter to use is default filter updated with case_filter param.
:return: filtered test case list
"""
_case_filter = self.DEFAULT_FILTER.copy()
if case_filter:
_case_filter.update(case_filter)
test_methods = SearchCases.Search.search_test_cases(test_case_path)
return CaseConfig.filter_test_cases(test_methods, _case_filter)
def _group_cases(self):
"""
separate all cases into groups according group rules. each group will be executed by one CI job.
:return: test case groups.
"""
groups = []
for case in self.test_cases:
for group in groups:
# add to current group
if group.add_case(case):
break
else:
# create new group
groups.append(self.case_group(case))
return groups
@staticmethod
def _apply_bot_filter():
"""
we support customize CI test with bot.
here we process from and return the filter which ``_search_cases`` accepts.
:return: filter for search test cases
"""
bot_filter = os.getenv("BOT_CASE_FILTER")
if bot_filter:
bot_filter = json.loads(bot_filter)
else:
bot_filter = dict()
return bot_filter
def _apply_bot_test_count(self):
"""
Bot could also pass test count.
If filtered cases need to be tested for several times, then we do duplicate them here.
"""
test_count = os.getenv("BOT_TEST_COUNT")
if test_count:
test_count = int(test_count)
self.test_cases *= test_count
def assign_cases(self):
"""
separate test cases to groups and assign test cases to CI jobs.
:raise AssertError: if failed to assign any case to CI job.
:return: None
"""
failed_to_assign = []
case_filter = self._apply_bot_filter()
self.test_cases = self._search_cases(self.test_case_path, case_filter)
self._apply_bot_test_count()
test_groups = self._group_cases()
for group in test_groups:
for job in self.jobs:
if job.match_group(group):
job.assign_group(group)
break
else:
failed_to_assign.append(group)
if failed_to_assign:
console_log("Too many test cases vs jobs to run. Please add the following jobs to .gitlab-ci.yml with specific tags:", "R")
for group in failed_to_assign:
console_log("* Add job with: " + ",".join(group.ci_job_match_keys), "R")
raise RuntimeError("Failed to assign test case to CI jobs")
def output_configs(self, output_path):
"""
:param output_path: path to output config files for each CI job
:return: None
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
for job in self.jobs:
job.output_config(output_path)
|
|
#!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Blender Driver unit test that can be run from the unittest application.
This module is intended for use within Blender Driver and can only be used from
within Blender."""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Module for mathematical operations, only used for degree to radian conversion.
# https://docs.python.org/3/library/math.html
from math import degrees, radians
#
# Local imports.
#
# Custom TestCase
from applications.unittest import TestCaseWithApplication
class TestAnimation(TestCaseWithApplication):
def test_linear(self):
'''\
Test basic linear animation and replacement of the animation object
by None on completion.
'''
with self.application.mainLock:
gameObject = self.add_test_object()
self.show_status("Created")
# ToDo: Change this to use a STATIC object.
gameObject.physics = False
self.restInterface.rest_put(gameObject, self.objectPath)
#
# Path to the object's Z value.
valuePath = list(self.objectPath) + ['worldPosition', 2]
#
# Get the current value.
value = self.restInterface.rest_get(valuePath)
addition = 2.0
speed = 1.0
target = value + addition
self.add_phase_starts(addition / speed)
self.add_phase_offsets(1.0)
#
# Assemble the animation in a dictionary. Note there is no
# subjectPath so that physics don't get resumed.
animation = {
'modulo': 0,
'valuePath': valuePath,
'speed': speed,
'targetValue': target}
#
# There is up to one animation for this test. It has to have a
# number though, so that the point maker sees it as deep enough.
animationPath = ['animations', self.id(), 0]
#
# Insert the animation. The point maker will set the store
# attribute.
self.restInterface.rest_put(animation, animationPath)
#
# Set the start time, which has the following side effects:
#
# - Retrieves the start value.
# - Clears the complete state.
animationPath.append('startTime')
self.restInterface.rest_put(
self.application.tickPerf, animationPath)
del animationPath[-1]
while self.up_to_phase(1):
with self.tick, self.application.mainLock:
if self.up_to_phase(0):
#
# Check object hasn't reached its destination.
self.assertLess(gameObject.worldPosition.z, target)
#
# Check animation is still in progress.
self.assertIsNotNone(
self.restInterface.rest_get(animationPath))
else:
#
# Check object has reached its destination.
self.assertAlmostEqual(gameObject.worldPosition.z, target)
#
# Check animation has been discarded.
self.assertIsNone(
self.restInterface.rest_get(animationPath))
with self.tick, self.application.mainLock:
# Next line makes the object fall away, which is nice.
gameObject.physics = True
def test_physics(self):
'''Test that physics gets suspended during animation.'''
zPosition = None
gameObject = None
self.add_phase_starts(1.0, 5.0, 10.0)
turn = radians(135.0)
animation = None
animationPath = None
with self.tick:
lastTick = self.application.tickPerf
with self.application.mainLock:
gameObject = self.add_test_object()
gameObject.physics = False
gameObject.worldPosition.z = 6.0
self.show_status("Created")
self.restInterface.rest_put(gameObject, self.objectPath)
valuePath = tuple(self.objectPath) + ('rotation', 'z')
value = self.restInterface.rest_get(valuePath)
#
# Assemble the animation in a dictionary. Note that there is a
# subjectPath so that physics gets suspended and resumed.
animation = {
'modulo': radians(360.0),
'valuePath': valuePath,
'subjectPath': self.objectPath,
'speed': turn / (self.phases[1] - self.phases[0]),
'targetValue': value + turn}
#
# There is up to one animation for this test. It has to have a
# number though, so that the point maker sees it as deep enough.
animationPath = ['animations', self.id(), 0]
zPosition = gameObject.worldPosition.z
gameObject.physics = True
self.show_status("Falling")
while self.up_to_phase(0):
with self.tick, self.application.mainLock:
# Check that its z position is falling every tick.
# Next is LessEqual because sometimes it doesn't fall.
self.assertLessEqual(gameObject.worldPosition.z, zPosition)
zPosition = gameObject.worldPosition.z
lastTick = self.application.tickPerf
with self.tick, self.application.mainLock:
self.show_status("Animating")
#
# Insert the animation. The point maker will set the store
# attribute and suspend physics.
self.restInterface.rest_put(animation, animationPath)
#
# Set the start time, which has the following side effects:
# - Retrieves the start value.
# - Clears the complete state.
animationPath.append('startTime')
self.restInterface.rest_put(
self.application.tickPerf, animationPath)
del animationPath[-1]
zPosition = gameObject.worldPosition.z
while self.up_to_phase(1):
with self.tick, self.application.mainLock:
#
# Check that time marches on.
self.assertGreater(self.application.tickPerf, lastTick)
lastTick = self.application.tickPerf
#
# Check physics is suspended, literally, and in effect.
self.assertFalse(gameObject.physics)
self.assertAlmostEqual(
gameObject.worldPosition.z, zPosition)
#
# Check animation is still in progress.
self.assertIsNotNone(
self.restInterface.rest_get(animationPath))
#
# There now follows a short intermission.
#
# Wait for some ticks. In theory maybe it should only ever have to wait
# for one tick but in practice this test is sometimes ahead of the
# application thread.
for waitTick in range(3):
with self.tick, self.application.mainLock:
self.show_status("Waiting {}".format(waitTick))
#
# Check that time marches on.
self.assertGreater(self.application.tickPerf, lastTick)
lastTick = self.application.tickPerf
#
animationNow = self.restInterface.rest_get(animationPath)
print("waitTick:{:d} {:.4f} {} {} {}".format(
waitTick, self.application.tickPerf, self.id()
, self.application.lastCompletion
, (None if animationNow is None else animationNow.complete)
))
#
# Check if completions have been processed.
lastCompletionTick = self.application.lastCompletionTick
if lastCompletionTick is None:
continue
if lastCompletionTick < self.application.tickPerf:
continue
if animationNow is None:
break
#
# Intermission over. Resume checking.
with self.tick, self.application.mainLock:
#
# Check animation has been discarded.
self.assertIsNone(self.restInterface.rest_get(animationPath))
#
# Check physics has resumed, literally.
self.assertTrue(gameObject.physics)
self.show_status("Falling")
while self.up_to_phase(2):
#
# Check physics has resumed, in effect.
with self.tick, self.application.mainLock:
self.assertGreater(self.application.tickPerf, lastTick)
lastTick = self.application.tickPerf
#
# Check that its z position is falling every tick.
# Next is LessEqual because sometimes it doesn't fall. These are
# called fall errors. They are scrutinised in a different unit
# test: TestGameObject.test_physics
self.assertLessEqual(gameObject.worldPosition.z, zPosition)
zPosition = gameObject.worldPosition.z
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===================================
Simple Pygame application framework
===================================
A component that sets up a pygame display surface and provides a main loop and
simple event dispatch framework.
The rendering surface is requested from the Pygame Display service component, so
this component can coexist with other components using pygame.
Example Usage
-------------
::
class SimpleApp1(PyGameApp):
def initialiseComponent(self):
self.addHandler(MOUSEBUTTONDOWN, lambda event : self.mousedown(event))
def mainLoop(self):
... draw and do other stuff here...
return 1
def mousedown(self, event):
print ("Mouse down!")
app = SimpleApp1( (800,600) ).run()
How does it work?
-----------------
Subclass this component to implement your own pygame 'app'. Replace the
mainLoop() stub with your own code to redraw the display surface etc. This
method will be called every cycle - do not incorporate your own loop!
The self.screen attribute is the pygame surface you should render to.
The component provides a simple event dispatch framework. Call addHandler and
removeHandler to register and deregister handlers from events.
More than one handler can be registered for a given event. They are called in
the order in which they were registered. If a handler returns True then the
event is 'claimed' and no further handlers will be called.
The component will terminate if the user clicks the close button on the pygame
display window, however your mainLoop() method will not be notified, and there
is no specific 'quit' event handler.
"""
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#
# XXX TODO
#
# - 'mainloop' ought to be modded to be a true loop - something that yields.
# but users of this class will need to be modded too.
# - redundant args in initialiser need removing too (same applies)
#
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
import pygame
from pygame.locals import *
import Axon as _Axon
from Kamaelia.UI.GraphicDisplay import PygameDisplay
class PyGameApp(_Axon.Component.component):
"""\
PyGameApp(screensize[,caption][,transparency][,position]) -> new PyGameApp component.
Creates a PyGameApp component that obtains a pygame display surface and provides
an internal pygame event dispatch mechanism.
Subclass to implement your own pygame "app".
Keyword arguments:
- screensize -- (width,height) of the display area (default = (800,600))
- caption -- Caption for the pygame window (default = "Topology Viewer")
- fullscreen -- True to start up in fullscreen mode (default = False)
- transparency -- None, or (r,g,b) colour to make transparent
- position -- None, or (left,top) position for surface within pygame window
"""
Inboxes = { "inbox" : "NOT USED",
"control" : "NOT USED",
"events" : "Event notifications from Pygame Display service",
"displaycontrol" : "Replies from Pygame Display service",
}
Outboxes = { "signal" : "NOT USED",
"outbox" : "NOT USED",
"displaysignal" : "Requests to Pygame Display service",
}
def __init__(self, screensize,
caption="PyGame Application",
fullscreen=False,
depth=0,
transparency = None,
position = None):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(PyGameApp, self).__init__()
pygame.init() # FIXME: This should NEVER be called inside a component unless it is the
# PygameDisplay or GraphicDisplay or similar.
# flags = DOUBLEBUF
# if fullscreen:
# flags = flags | -abs(FULLSCREEN)
# self.flags = flags
# self.depth = depth
self.screensize = screensize
self.caption = caption
self.transparency = transparency
self.eventHandlers = {}
self.position = position
self.flip = False
def waitBox(self,boxname):
"""Generator. Yields until data ready on the named inbox."""
waiting = True
while waiting:
if self.dataReady(boxname): return
else: yield 1
def main(self):
"""Main loop. Do not override"""
displayservice = PygameDisplay.getDisplayService()
self.link((self,"displaysignal"), displayservice)
displayrequest = { "DISPLAYREQUEST" : True,
"events" : (self, "events"),
"callback" : (self, "displaycontrol"),
"transparency": self.transparency,
"size" : self.screensize,
}
if self.position is not None:
displayrequest["position"] = self.position
self.send(displayrequest, "displaysignal")
for _ in self.waitBox("displaycontrol"):
# print ("Waiting for display")
yield 1
display = self.recv("displaycontrol")
self.screen = display
pygame.display.set_caption(self.caption)
self.screensize = self.screen.get_width(), self.screen.get_height()
self.addHandler(QUIT, lambda event : self.quit(event))
self.flip = True
self.initialiseComponent()
self.quitting = False
# Event loop
while not self.quitting:
self._dispatch()
if not self.quitting:
self.mainLoop()
self.send({"REDRAW":True, "surface":self.screen}, "displaysignal")
if not self.quitting and self.flip:
# FIXME: This does not play nicely at all with the standard pygame display
# handling, despite requesting it's display from the standard
# location.
pygame.display.flip()
yield 1
else:
yield 0
print ("QUIT")
def initialiseComponent(self):
pass
def go(self):
"""Call this to run the pygame app, without using an Axon scheduler.
Returns when the app 'quits'
"""
for i in self.main():
pass
def mainLoop(self):
"""Implement your runtime loop in this method here.
FIXME: This is less than ideal.
"""
return 1
def events(self):
"""Generator. Receive events on "events" inbox and yield then one at a time."""
while self.dataReady("events"):
event_bundle = self.recv("events")
for event in event_bundle:
yield event
def _dispatch(self):
"""\
Internal pygame event dispatcher.
For all events received, it calls all event handlers in sequence
until one returns True.
"""
for event in self.events():
if event.type in self.eventHandlers:
for handler in self.eventHandlers[event.type]:
if handler(event):
break
def addHandler(self, eventtype, handler):
"""\
Add an event handler, for a given PyGame event type.
The handler is passed the pygame event object as its argument when called.
"""
if not ( eventtype in self.eventHandlers ):
self.eventHandlers[eventtype] = []
self.send({ "ADDLISTENEVENT" : eventtype,
"surface" : self.screen,
}, "displaysignal")
self.eventHandlers[eventtype] += [handler]
return handler
def removeHandler(self, eventtype, handler):
"""Remove the specified pygame event handler from the specified event."""
if ( eventtype in self.eventHandlers ):
self.eventHandlers[eventtype].remove(handler) # Latent bug, handler not in list
def quit(self, event = None):
"""Call this method/event handler to finish"""
self.quitting = True
__kamaelia_components__ = ( PyGameApp, )
|
|
# Copyright 2011 Dorgival Guedes
# Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tracks host location and configuration
Keep track of hosts in the network, where they are and how they are
configured (at least MAC/IP addresses).
For the time being, it keeps tables with the information; later, it should
transfer that information to Topology and handle just the actual
discovery/update of host information.
Timer configuration can be changed when needed (e.g., for debugging) using
the launch facility (check timeoutSec dict and PingCtrl.pingLim).
You can set various timeouts from the commandline. Names and defaults:
arpAware=60*2 Quiet ARP-responding entries are pinged after this
arpSilent=60*20 This is for uiet entries not known to answer ARP
arpReply=4 Time to wait for an ARP reply before retrial
timerInterval=5 Seconds between timer routine activations
entryMove=60 Minimum expected time to move a physical entry
Good values for testing:
--arpAware=15 --arpSilent=45 --arpReply=1 --entryMove=4
You can also specify how many ARP pings we try before deciding it failed:
--pingLim=2
"""
from pox.core import core
from pox.lib.addresses import EthAddr
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.recoco import Timer
from pox.lib.revent import Event, EventHalt
import pox.openflow.libopenflow_01 as of
import pox.openflow.discovery as discovery
from pox.lib.revent.revent import *
import time
import pox
log = core.getLogger()
# Times (in seconds) to use for differente timouts:
timeoutSec = dict(
arpAware=60*2, # Quiet ARP-responding entries are pinged after this
arpSilent=60*20, # This is for uiet entries not known to answer ARP
arpReply=4, # Time to wait for an ARP reply before retrial
timerInterval=5, # Seconds between timer routine activations
entryMove=60 # Minimum expected time to move a physical entry
)
# Address to send ARP pings from.
# The particular one here is just an arbitrary locally administered address.
DEFAULT_ARP_PING_SRC_MAC = '02:00:00:00:be:ef'
class HostEvent (Event):
"""
Event when hosts join, leave, or move within the network
"""
def __init__ (self, entry, new_dpid = None, new_port = None, join = False,
leave = False, move = False):
super(HostEvent,self).__init__()
self.entry = entry
self.join = join
self.leave = leave
self.move = move
assert sum(1 for x in [join,leave,move] if x) == 1
# You can alter these and they'll change where we think it goes...
self._new_dpid = new_dpid
self._new_port = new_port
#TODO: Allow us to cancel add/removes
@property
def new_dpid (self):
"""
New DPID for move events"
"""
assert self.move
return self._new_dpid
@property
def new_port (self):
"""
New port for move events"
"""
assert self.move
return self._new_port
class Alive (object):
"""
Holds liveliness information for MAC and IP entries
"""
def __init__ (self, livelinessInterval=timeoutSec['arpAware']):
self.lastTimeSeen = time.time()
self.interval=livelinessInterval
def expired (self):
return time.time() > self.lastTimeSeen + self.interval
def refresh (self):
self.lastTimeSeen = time.time()
class PingCtrl (Alive):
"""
Holds information for handling ARP pings for hosts
"""
# Number of ARP ping attemps before deciding it failed
pingLim=3
def __init__ (self):
super(PingCtrl,self).__init__(timeoutSec['arpReply'])
self.pending = 0
def sent (self):
self.refresh()
self.pending += 1
def failed (self):
return self.pending > PingCtrl.pingLim
def received (self):
# Clear any pending timeouts related to ARP pings
self.pending = 0
class IpEntry (Alive):
"""
This entry keeps track of IP addresses seen from each MAC entry and will
be kept in the macEntry object's ipAddrs dictionary. At least for now,
there is no need to refer to the original macEntry as the code is organized.
"""
def __init__ (self, hasARP):
if hasARP:
super(IpEntry,self).__init__(timeoutSec['arpAware'])
else:
super(IpEntry,self).__init__(timeoutSec['arpSilent'])
self.hasARP = hasARP
self.pings = PingCtrl()
def setHasARP (self):
if not self.hasARP:
self.hasARP = True
self.interval = timeoutSec['arpAware']
class MacEntry (Alive):
"""
Not strictly an ARP entry.
When it gets moved to Topology, may include other host info, like
services, and it may replace dpid by a general switch object reference
We use the port to determine which port to forward traffic out of.
"""
def __init__ (self, dpid, port, macaddr):
super(MacEntry,self).__init__()
self.dpid = dpid
self.port = port
self.macaddr = macaddr
self.ipAddrs = {}
def __str__(self):
return ' '.join([str(self.dpid), str(self.port), str(self.macaddr)])
def __eq__ (self, other):
if other is None:
return False
elif type(other) == tuple:
return (self.dpid,self.port,self.macaddr)==other
if self.dpid != other.dpid: return False
if self.port != other.port: return False
if self.macaddr != other.macaddr: return False
if self.dpid != other.dpid: return False
# What about ipAddrs??
return True
def __ne__ (self, other):
return not self.__eq__(other)
class host_tracker (EventMixin):
"""
Host tracking component
"""
_eventMixin_events = set([HostEvent])
def __init__ (self, ping_src_mac = None, install_flow = True,
eat_packets = True):
if ping_src_mac is None:
ping_src_mac = DEFAULT_ARP_PING_SRC_MAC
self.ping_src_mac = EthAddr(ping_src_mac)
self.install_flow = install_flow
self.eat_packets = eat_packets
# The following tables should go to Topology later
self.entryByMAC = {}
self._t = Timer(timeoutSec['timerInterval'],
self._check_timeouts, recurring=True)
# Listen to openflow with high priority if we want to eat our ARP replies
listen_args = {}
if eat_packets:
listen_args={'openflow':{'priority':0}}
core.listen_to_dependencies(self, listen_args=listen_args)
def _all_dependencies_met (self):
log.info("host_tracker ready")
# The following two functions should go to Topology also
def getMacEntry (self, macaddr):
try:
result = self.entryByMAC[macaddr]
except KeyError as e:
result = None
return result
def sendPing (self, macEntry, ipAddr):
"""
Builds an ETH/IP any-to-any ARP packet (an "ARP ping")
"""
r = arp()
r.opcode = arp.REQUEST
r.hwdst = macEntry.macaddr
r.hwsrc = self.ping_src_mac
r.protodst = ipAddr
# src is IP_ANY
e = ethernet(type=ethernet.ARP_TYPE, src=r.hwsrc, dst=r.hwdst)
e.payload = r
log.debug("%i %i sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
msg = of.ofp_packet_out(data = e.pack(),
action = of.ofp_action_output(port=macEntry.port))
if core.openflow.sendToDPID(macEntry.dpid, msg.pack()):
ipEntry = macEntry.ipAddrs[ipAddr]
ipEntry.pings.sent()
else:
# macEntry is stale, remove it.
log.debug("%i %i ERROR sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
del macEntry.ipAddrs[ipAddr]
return
def getSrcIPandARP (self, packet):
"""
Gets source IPv4 address for packets that have one (IPv4 and ARP)
Returns (ip_address, has_arp). If no IP, returns (None, False).
"""
if isinstance(packet, ipv4):
log.debug("IP %s => %s",str(packet.srcip),str(packet.dstip))
return ( packet.srcip, False )
elif isinstance(packet, arp):
log.debug("ARP %s %s => %s",
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(packet.opcode,
'op:%i' % (packet.opcode,)),
str(packet.protosrc), str(packet.protodst))
if (packet.hwtype == arp.HW_TYPE_ETHERNET and
packet.prototype == arp.PROTO_TYPE_IP and
packet.protosrc != 0):
return ( packet.protosrc, True )
return ( None, False )
def updateIPInfo (self, pckt_srcip, macEntry, hasARP):
"""
Update given MacEntry
If there is IP info in the incoming packet, update the macEntry
accordingly. In the past we assumed a 1:1 mapping between MAC and IP
addresses, but removed that restriction later to accomodate cases
like virtual interfaces (1:n) and distributed packet rewriting (n:1)
"""
if pckt_srcip in macEntry.ipAddrs:
# that entry already has that IP
ipEntry = macEntry.ipAddrs[pckt_srcip]
ipEntry.refresh()
log.debug("%s already has IP %s, refreshing",
str(macEntry), str(pckt_srcip) )
else:
# new mapping
ipEntry = IpEntry(hasARP)
macEntry.ipAddrs[pckt_srcip] = ipEntry
log.info("Learned %s got IP %s", str(macEntry), str(pckt_srcip) )
if hasARP:
ipEntry.pings.received()
def _handle_openflow_ConnectionUp (self, event):
if not self.install_flow: return
log.debug("Installing flow for ARP ping responses")
m = of.ofp_flow_mod()
m.priority += 1 # Higher than normal
m.match.dl_type = ethernet.ARP_TYPE
m.match.dl_dst = self.ping_src_mac
m.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
event.connection.send(m)
def _handle_openflow_PacketIn (self, event):
"""
Populate MAC and IP tables based on incoming packets.
Handles only packets from ports identified as not switch-only.
If a MAC was not seen before, insert it in the MAC table;
otherwise, update table and enry.
If packet has a source IP, update that info for the macEntry (may require
removing the info from antoher entry previously with that IP address).
It does not forward any packets, just extract info from them.
"""
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if packet.type == ethernet.LLDP_TYPE: # Ignore LLDP packets
return
# This should use Topology later
if not core.openflow_discovery.is_edge_port(dpid, inport):
# No host should be right behind a switch-only port
log.debug("%i %i ignoring packetIn at switch-only port", dpid, inport)
return
log.debug("PacketIn: %i %i ETH %s => %s",
dpid, inport, str(packet.src), str(packet.dst))
# Learn or update dpid/port/MAC info
macEntry = self.getMacEntry(packet.src)
if macEntry is None:
# there is no known host by that MAC
# should we raise a NewHostFound event (at the end)?
macEntry = MacEntry(dpid,inport,packet.src)
self.entryByMAC[packet.src] = macEntry
log.info("Learned %s", str(macEntry))
self.raiseEventNoErrors(HostEvent, macEntry, join=True)
elif macEntry != (dpid, inport, packet.src):
# there is already an entry of host with that MAC, but host has moved
# should we raise a HostMoved event (at the end)?
log.info("Learned %s moved to %i %i", str(macEntry), dpid, inport)
# if there has not been long since heard from it...
if time.time() - macEntry.lastTimeSeen < timeoutSec['entryMove']:
log.warning("Possible duplicate: %s at time %i, now (%i %i), time %i",
str(macEntry), macEntry.lastTimeSeen,
dpid, inport, time.time())
# should we create a whole new entry, or keep the previous host info?
# for now, we keep it: IP info, answers pings, etc.
e = HostEvent(macEntry, move=True, new_dpid = dpid, new_port = inport)
self.raiseEventNoErrors(e)
macEntry.dpid = e._new_dpid
macEntry.inport = e._new_port
macEntry.refresh()
(pckt_srcip, hasARP) = self.getSrcIPandARP(packet.next)
if pckt_srcip is not None:
self.updateIPInfo(pckt_srcip,macEntry,hasARP)
if self.eat_packets and packet.dst == self.ping_src_mac:
return EventHalt
def _check_timeouts (self):
"""
Checks for timed out entries
"""
for macEntry in self.entryByMAC.values():
entryPinged = False
for ip_addr, ipEntry in macEntry.ipAddrs.items():
if ipEntry.expired():
if ipEntry.pings.failed():
del macEntry.ipAddrs[ip_addr]
log.info("Entry %s: IP address %s expired",
str(macEntry), str(ip_addr) )
else:
self.sendPing(macEntry,ip_addr)
ipEntry.pings.sent()
entryPinged = True
if macEntry.expired() and not entryPinged:
log.info("Entry %s expired", str(macEntry))
# sanity check: there should be no IP addresses left
if len(macEntry.ipAddrs) > 0:
for ip in macEntry.ipAddrs.keys():
log.warning("Entry %s expired but still had IP address %s",
str(macEntry), str(ip_addr) )
del macEntry.ipAddrs[ip_addr]
self.raiseEventNoErrors(HostEvent, macEntry, leave=True)
del self.entryByMAC[macEntry.macaddr]
|
|
#!/usr/bin/env python
#
# Copyright 2014 tigmi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A library that provides a Python interface to the Kubernetes API'''
import sys
import urllib
import urllib2
import urlparse
import requests
import urllib3
urllib3.disable_warnings()
from kubernetes import (__version__, _FileCache, simplejson, KubernetesError, PodList)
# A singleton representing a lazily instantiated FileCache.
DEFAULT_CACHE = object()
class Api(object):
'''A python interface into the Kubernetes API'''
def __init__(self,
user_id=None,
user_password=None,
input_encoding=None,
request_headers=None,
cache=DEFAULT_CACHE,
base_url=None,
debugHTTP=None,
timeout=None):
'''Instantiate a new kubernetes.Api object
Args:
user_id:
Your agent user id
user_password
Your agent user password
input_encoding:
The encoding used to encode input strings. [Optional]
request_headers
A dictionary of additional HTTP request headers. [Optional]
cache:
The cache instance to use. Defaults to DEFAULT_CACHE.
Use None to disable caching. [Optional]
base_url:
The base URL to use to contact the kubernetes API.
Defaults to https://10.245.1.2/api/v1beta2
debugHTTP:
Set to True to enable debug output from urllib2 when performing
any HTTP requests. Defaults to False. [Optional]
timeout:
Set timeout (in seconds) of the http/https requests. If None the
requests lib default will be used. Defaults to None. [Optional]
'''
self.SetCache(cache)
self._urllib = urllib2
self._input_encoding = input_encoding
self._debugHTTP = debugHTTP
self._timeout = timeout
self._InitializeRequestHeaders(request_headers)
self._InitializeUserAgent()
self._InitializeDefaultParameters()
if base_url is None:
self.base_url = 'https://10.245.1.2/api/v1beta2'
else:
self.base_url = base_url
if user_id is None or user_password is None:
print >> sys.stderr, 'Kubernetes requires user_id, user_password.'
raise KubernetesError({'message': "Kubernetes requires user_id and user_password"})
self.SetCredentials(user_id, user_password)
if debugHTTP:
import logging
import httplib
httplib.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def SetCredentials(self,
user_id,
user_password):
'''Set the user_id and user_password for this instance
Args:
user_id:
Your agent user id
user_password:
Your agent user password
'''
self._user_id = user_id
self._user_password = user_password
auth_list = [user_id, user_password]
if all(auth_list):
self.__auth = (user_id, user_password)
self._config = None
def ClearCredentials(self):
'''Clear any credentials for this instance'''
self._user_id = None
self._user_password = None
def GetPods(self):
'''List all pods on this cluster'''
# Make and send requests
url = '%s/pods' % self.base_url
json = self._RequestUrl(url, 'GET')
data = self._ParseAndCheckKubernetes(json.content)
return PodList.NewFromJsonDict(data)
def GetReplicationControllers(self):
'''List all replicationControllers on this cluster'''
# Make and send requests
url = '%s/replicationControllers' % self.base_url
json = self._RequestUrl(url, 'GET')
data = self._ParseAndCheckKubernetes(json.content)
return PodList.NewFromJsonDict(data)
def GetServices(self):
'''List all services on this cluster'''
# Make and send requests
url = '%s/services' % self.base_url
json = self._RequestUrl(url, 'GET')
data = self._ParseAndCheckKubernetes(json.content)
return PodList.NewFromJsonDict(data)
def SetCache(self, cache):
'''Override the default cache. Set to None to prevent caching.
Args:
cache:
An instance that supports the same API as the kubernetes._FileCache
'''
if cache == DEFAULT_CACHE:
self._cache = _FileCache()
else:
self._cache = cache
def _InitializeRequestHeaders(self, request_headers):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
def _InitializeUserAgent(self):
user_agent = 'Python-urllib/%s (python-kubernetes/%s)' % \
(self._urllib.__version__, __version__)
self.SetUserAgent(user_agent)
def _InitializeDefaultParameters(self):
self._default_params = {}
def SetUserAgent(self, user_agent):
'''Override the default user agent.
Args:
user_agent:
A string that should be send to the server as the user-agent.
'''
self._request_headers['User-Agent'] = user_agent
def _Encode(self, s):
if self._input_encoding:
return unicode(s, self._input_encoding).encode('utf-8')
else:
return unicode(s).encode('utf-8')
def _EncodeParameters(self, parameters):
'''Return a string in key=value&key=value form
Value of None are not included in the output string.
Args:
parameters:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if parameters is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in parameters.items() if v is not None]))
def _BuildUrl(self, url, path_elements=None, extra_params=None):
# Break url into constituent parts
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
p = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(p)
# add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._EncodeParameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlparse.urlparse((scheme, netloc, path, params, query, fragment))
def _RequestUrl(self, url, verb, data=None):
'''Request a url.
Args:
url:
The web location we want to retrieve.
verb:
POST, GET, PUT, DELETE.
data:
a dict of (str, unicode) key/value pairs.
Returns:
A JSON object.
'''
if verb == 'POST':
try:
return requests.post(
url,
data=data,
auth=self.__auth,
timeout=self._timeout,
verify=False
)
except requests.RequestException as e:
raise KubernetesError(str(e))
if verb == 'GET':
try:
return requests.get(
url,
auth=self.__auth,
timeout=self._timeout,
verify=False
)
except requests.RequestException as e:
raise KubernetesError(str(e))
if verb == 'PUT':
try:
return requests.put(
url,
data=data,
auth=self.__auth,
timeout=self._timeout,
verify=False
)
except requests.RequestException as e:
raise KubernetesError(str(e))
if verb == 'DELETE':
try:
return requests.get(
url,
auth=self.__auth,
timeout=self._timeout,
verify=False
)
except requests.RequestException as e:
raise KubernetesError(str(e))
return 0
def _ParseAndCheckKubernetes(self, json):
'''Try and parse the JSON returned from Kubernetes and return
an empty dictionary if there is any error
'''
try:
data = simplejson.loads(json)
except ValueError:
raise KubernetesError({'message': 'parsing error ['+json+']'})
return data
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-440,
setuptools-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
from .util import parse_requirement
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
# this is a method only to support alternative implementations
# via overriding
def parse_requirement(self, s):
return parse_requirement(s)
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
r = self.parse_requirement(s)
if not r:
raise ValueError('Not valid: %r' % s)
self.name = r.name
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if r.constraints:
# import pdb; pdb.set_trace()
for op, s in r.constraints:
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: String or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # minimum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is probably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile(r'^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
|
|
# -*- coding: utf-8 -*-
'''Module containing helpers for testing the Chevah server.'''
from __future__ import with_statement
from StringIO import StringIO
import base64
import logging
from chevah.compat.testing import CompatManufacture
from chevah.utils.configuration_file import (
FileConfigurationProxy,
)
from chevah.utils.constants import (
LOG_SECTION_DEFAULTS,
)
from chevah.utils.credentials import (
PasswordCredentials,
FTPPasswordCredentials,
FTPSPasswordCredentials,
HTTPBasicAuthCredentials,
HTTPSBasicAuthCredentials,
SSHKeyCredentials,
SSHPasswordCredentials,
SSLCertificateCredentials,
)
from chevah.utils.event import (
EventDefinition,
EventGroupDefinition,
EventsDefinition,
)
from chevah.utils.json_file import JSONFile
from chevah.utils.json_rpc import JSONRPCResource
from chevah.utils.logger import (
_Logger,
LogEntry,
)
from chevah.utils.log_configuration_section import (
LogConfigurationSection,
)
from chevah.utils.observer import Signal
class UtilsManufacture(CompatManufacture):
'''This class creates objects from chevah.utils module.
It is designed to help with the tests and creating 'mock' objects.
'''
def Signal(self, *args, **kwargs):
"""
Create a signal.
"""
return Signal(*args, **kwargs)
def _makeGenericPasswordCredentials(self,
credentials_class,
username=None, password=None, token=None,
):
'''Create PasswordCredentials.'''
if username is None:
username = self.username
else:
username = unicode(username)
if password is not None:
password = unicode(password)
credentials = credentials_class(
username=username,
password=password,
)
return credentials
def makeFTPPasswordCredentials(self, *args, **kwargs):
return self._makeGenericPasswordCredentials(
FTPPasswordCredentials, *args, **kwargs)
def makeFTPSPasswordCredentials(self, *args, **kwargs):
return self._makeGenericPasswordCredentials(
FTPSPasswordCredentials, *args, **kwargs)
def makeSSHPasswordCredentials(self, *args, **kwargs):
return self._makeGenericPasswordCredentials(
SSHPasswordCredentials, *args, **kwargs)
def makeHTTPBasicAuthCredentials(self, *args, **kwargs):
return self._makeGenericPasswordCredentials(
HTTPBasicAuthCredentials, *args, **kwargs)
def makeHTTPSBasicAuthCredentials(self, *args, **kwargs):
return self._makeGenericPasswordCredentials(
HTTPSBasicAuthCredentials, *args, **kwargs)
def makePasswordCredentials(self,
username=None, password=None, token=None,
):
'''Create PasswordCredentials.'''
if username is None:
username = self.getUniqueString()
else:
username = unicode(username)
if password is not None:
password = unicode(password)
credentials = PasswordCredentials(
username=username,
password=password,
)
return credentials
def makeSSHKeyCredentials(self,
username=None,
key=None,
key_algorithm=None, key_data=None, key_signed_data=None,
key_signature=None,
*args, **kwargs
):
if username is None:
username = self.username
else:
username = unicode(username)
if key is not None:
key_parts = key.split()
key_algorithm = key_parts[0]
key_data = base64.decodestring(key_parts[1])
credentials = SSHKeyCredentials(
username=username,
key_algorithm=key_algorithm,
key_data=key_data,
key_signed_data=key_signed_data,
key_signature=key_signature,
*args, **kwargs
)
return credentials
def makeSSLCertificateCredentials(self,
username=None,
certificate=None,
*args, **kwargs
):
if username is None:
username = self.username
else:
username = unicode(username)
credentials = SSLCertificateCredentials(
username=username,
certificate=certificate,
*args, **kwargs
)
return credentials
def makeLogEntry(self):
id = 100
text = u'Entry content ' + self.getUniqueString()
avatar = self.makeFilesystemApplicationAvatar()
peer = self.makeIPv4Address()
return LogEntry(
message_id=id, text=text, avatar=avatar, peer=peer)
def makeJSONRPCResource(self):
'''Create a JSONRPCResource.'''
return JSONRPCResource()
def makeLogConfigurationSection(self, proxy=None):
if proxy is None:
content = (
'[log]\n'
'log_file: Disabled\n'
'log_syslog: Disabled\n'
)
proxy = self.makeFileConfigurationProxy(
content=content,
defaults=LOG_SECTION_DEFAULTS,
)
return LogConfigurationSection(proxy=proxy)
def makeFileConfigurationProxy(self, content=None, defaults=None):
if content is None:
content = ''
proxy_file = FileConfigurationProxy(
configuration_file=StringIO(content),
defaults=defaults)
proxy_file.load()
return proxy_file
def makeJSONFile(self, content=None, load=True):
"""
Create a JSONFile.
"""
json_file = JSONFile(file=StringIO(content))
if load:
json_file.load()
return json_file
def makeLogger(self, log_name=None):
result = _Logger()
if not log_name:
log_name = self.getUniqueString()
result._log = logging.getLogger(log_name)
return result
def makeEventGroupDefinition(self, name=None, description=None):
"""Creates an EventGroupDefinition."""
if name is None:
name = self.getUniqueString()
if description is None:
description = self.getUniqueString()
event_group = EventGroupDefinition(name=name, description=description)
return event_group
def makeEventDefinition(self, id=None, message=None, groups=None,
version_added=None, version_removed=None):
"""Creates an EventGroupDefinition."""
if id is None:
id = self.getUniqueString()
if message is None:
message = self.getUniqueString()
event_definition = EventDefinition(
id=id,
message=message,
groups=groups,
version_added=version_added,
version_removed=version_removed,
)
return event_definition
def makeEventsDefinition(self,
configuration_file=None, content=None,
load=True,
):
"""Creates an EventHandler."""
if configuration_file is None:
if content is None:
content = u''
configuration_file = StringIO(content)
config = EventsDefinition(file=configuration_file)
if load:
config.load()
return config
manufacture = UtilsManufacture()
|
|
"""
Shipyard models pertaining to the librarian app.
"""
from datetime import datetime, timedelta
import os
import random
import re
import tempfile
import logging
import json
import shutil
import stat
from io import BytesIO
from zipfile import ZipFile
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User, Group
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, skipIfDBFeature, Client
from django.urls import reverse, resolve
from django.core.files import File
from django.core.files.base import ContentFile
# from django.utils.timezone import get_default_timezone, get_current_timezone
from django.utils import timezone
from mock import patch
from rest_framework.test import force_authenticate, APIRequestFactory
from rest_framework import status
from constants import groups
from container.models import ContainerFamily, ContainerArgument, Container
from librarian.ajax import ExternalFileDirectoryViewSet, DatasetViewSet
from librarian.models import Dataset, ExternalFileDirectory
from librarian.serializers import DatasetSerializer
from metadata.models import kive_user, everyone_group
import file_access_utils
import kive.testing_utils as tools
from kive.tests import BaseTestCases, DuckContext, capture_log_stream
from librarian.management.commands import find_orphans
FROM_FILE_END = 2
samplecode_path = os.path.abspath(os.path.join(__file__, '../../../samplecode'))
@skipIfDBFeature('is_mocked')
class LibrarianTestCase(TestCase, object):
"""
Set up a database state for unit testing the librarian app.
This extends PipelineTestCase, which itself extended
other stuff (follow the chain).
"""
def setUp(self):
"""Set up default database state for librarian unit testing."""
self.myUser = User.objects.create_user('john',
'[email protected]',
'johnpassword')
self.ringoUser = User.objects.create_user('ringo',
'[email protected]',
'ringopassword')
self.singlet_dataset = Dataset.create_dataset(
os.path.join(samplecode_path, "singlet_cdt_large.csv"),
self.myUser,
groups_allowed=[everyone_group()],
name="singlet",
description="lol")
def tearDown(self):
tools.clean_up_all_files()
class DatasetTests(LibrarianTestCase):
def setUp(self):
super(DatasetTests, self).setUp()
# Turn off logging, so the test output isn't polluted.
logging.getLogger('Dataset').setLevel(logging.CRITICAL)
logging.getLogger('CompoundDatatype').setLevel(logging.CRITICAL)
rows = 10
seqlen = 10
self.data = ""
for i in range(rows):
seq = "".join([random.choice("ATCG") for _ in range(seqlen)])
self.data += "patient{},{}\n".format(i, seq)
self.header = "header,sequence"
self.data_file = tempfile.NamedTemporaryFile(delete=False)
data_str = self.header + "\n" + self.data
self.data_file.write(data_str.encode())
self.file_path = self.data_file.name
self.data_file.close()
self.dsname = "good data"
self.dsdesc = "some headers and sequences"
self.dataset = Dataset.create_dataset(
file_path=self.file_path,
user=self.myUser,
keep_file=True,
name=self.dsname,
description=self.dsdesc
)
def tearDown(self):
super(DatasetTests, self).tearDown()
os.remove(self.file_path)
def test_filehandle(self):
"""
Test that you can pass a filehandle to create_dataset() to make a dataset.
"""
import datetime
dt = datetime.datetime.now()
# Turn off logging, so the test output isn't polluted.
logging.getLogger('Dataset').setLevel(logging.CRITICAL)
logging.getLogger('CompoundDatatype').setLevel(logging.CRITICAL)
with tempfile.NamedTemporaryFile(delete=True) as tmpfile:
tmpfile.file.write("Random stuff".encode())
tmpfile.file.flush() # flush python buffer to os buffer
os.fsync(tmpfile.file.fileno()) # flush os buffer to disk
tmpfile.file.seek(0) # go to beginning of file before calculating expected md5
expected_md5 = file_access_utils.compute_md5(tmpfile)
tmpfile.file.seek(0) # return to beginning before creating a Dataset
raw_datatype = None # raw compound datatype
name = "Test file handle" + str(dt.microsecond)
desc = "Test create dataset with file handle"
dataset = Dataset.create_dataset(
file_path=None,
user=self.myUser,
cdt=raw_datatype,
keep_file=True,
name=name,
description=desc,
check=True,
file_handle=tmpfile
)
self.assertIsNotNone(Dataset.objects.filter(name=name).get(),
msg="Can't find Dataset in DB for name=" + name)
actual_md5 = Dataset.objects.filter(id=dataset.id).get().MD5_checksum
self.assertEqual(actual_md5, expected_md5,
msg="Checksum for Dataset ({}) file does not match expected ({})".format(
actual_md5,
expected_md5
))
def test_dataset_creation(self):
"""
Test coherence of a freshly created Dataset.
"""
self.assertEqual(self.dataset.clean(), None)
self.assertEqual(self.dataset.has_data(), True)
self.assertTrue(self.dataset.is_raw())
self.assertEqual(self.dataset.user, self.myUser)
self.assertEqual(self.dataset.name, self.dsname)
self.assertEqual(self.dataset.description, self.dsdesc)
self.assertEqual(self.dataset.date_created.date(), timezone.now().date())
self.assertEqual(self.dataset.date_created < timezone.now(), True)
self.assertEqual(self.dataset.file_source, None)
self.assertEqual(os.path.basename(self.dataset.dataset_file.path), os.path.basename(self.file_path))
self.data_file.close()
def test_dataset_increase_permissions_from_json(self):
"""
Test increase_permissions_from_json reaches any usurping Datasets.
"""
# First, we revoke Everyone permissions on a Dataset.
self.singlet_dataset.groups_allowed.remove(everyone_group())
# We store the original contents of a Dataset...
self.singlet_dataset.dataset_file.open()
orig_contents = self.singlet_dataset.dataset_file.read()
self.singlet_dataset.dataset_file.close()
orig_md5 = self.singlet_dataset.MD5_checksum
# ... and then we corrupt it.
self.singlet_dataset.MD5_checksum = "corruptedmd5"
self.singlet_dataset.save()
usurping_ds = Dataset(
name="Usurping DS",
description="Usurps self.singlet_dataset",
user=self.myUser,
dataset_file=ContentFile(orig_contents),
MD5_checksum=orig_md5
)
usurping_ds.save()
# Now, let's try to grant some permissions on self.singlet_dataset.
new_perms_json = json.dumps(
[
[self.ringoUser.username],
[Group.objects.get(pk=groups.DEVELOPERS_PK).name]
]
)
self.singlet_dataset.increase_permissions_from_json(new_perms_json)
self.assertTrue(self.singlet_dataset.users_allowed.filter(pk=self.ringoUser.pk).exists())
self.assertFalse(usurping_ds.users_allowed.filter(pk=self.ringoUser.pk).exists())
self.assertTrue(self.singlet_dataset.groups_allowed.filter(pk=groups.DEVELOPERS_PK).exists())
self.assertFalse(usurping_ds.groups_allowed.filter(pk=groups.DEVELOPERS_PK).exists())
def test_update_name(self):
dataset = self.singlet_dataset
self.assertEqual('singlet', dataset.name)
user = dataset.user
client = Client()
client.force_login(user)
expected_name = 'Changed to Synglet'
response = client.post(reverse('dataset_view',
kwargs=dict(dataset_id=dataset.id)),
dict(name=expected_name))
if response.status_code != 302:
self.assertEqual({}, response.context['form'].errors)
dataset.refresh_from_db()
self.assertEqual(expected_name, dataset.name)
def test_increase_permissions(self):
dataset = self.singlet_dataset
dataset.groups_allowed.clear()
self.assertFalse(dataset.shared_with_everyone)
user = dataset.user
client = Client()
client.force_login(user)
response = client.post(reverse('dataset_view',
kwargs=dict(dataset_id=dataset.id)),
dict(name='synglet',
permissions_1='Everyone'))
if response.status_code != 302:
self.assertEqual({}, response.context['form'].errors)
dataset.refresh_from_db()
self.assertTrue(dataset.shared_with_everyone)
def test_source_container_run_permissions(self):
""" Dataset can't have more permissions than source container run. """
user = self.singlet_dataset.user
family = ContainerFamily.objects.create(user=user)
container = family.containers.create(user=user)
app = container.apps.create()
argument = app.arguments.create(type='O')
run = app.runs.create(user=user)
dataset = self.singlet_dataset
dataset.groups_allowed.clear()
run.datasets.create(dataset=dataset,
argument=argument)
self.assertFalse(dataset.shared_with_everyone)
expected_errors = {'permissions': ['Select a valid choice. Everyone '
'is not one of the available '
'choices.']}
user = dataset.user
client = Client()
client.force_login(user)
response = client.post(reverse('dataset_view',
kwargs=dict(dataset_id=dataset.id)),
dict(name='synglet',
permissions_1='Everyone'))
self.assertEqual(200, response.status_code) # Form error, not redirect
self.assertEqual(expected_errors,
response.context['dataset_form'].errors)
dataset.refresh_from_db()
self.assertFalse(dataset.shared_with_everyone)
def test_bulk_upload(self):
file1 = SimpleUploadedFile("file1.txt", b"Content of file 1.")
file2 = SimpleUploadedFile("file2.txt", b"Content of file 2.")
client = Client()
client.force_login(self.myUser)
response = client.post(reverse('datasets_add_bulk'),
dict(dataset_files=[file1, file2],
compound_datatype='__raw__',
permissions_1='Everyone'))
self.assertEqual(200, response.status_code) # Form error, not redirect
old_form = response.context.get('bulkAddDatasetForm')
if old_form is not None:
self.assertEqual([], old_form.errors)
self.fail('Should not have old form.')
self.assertEqual(2, response.context['num_files_added'])
dataset2, dataset1 = Dataset.objects.all()[:2]
self.assertRegex(dataset1.name, r'file1\.txt.*')
self.assertRegex(dataset2.name, r'file2\.txt.*')
self.assertTrue(dataset1.is_uploaded)
def test_archive_upload(self):
bytes_file = BytesIO()
with ZipFile(bytes_file, "w") as f:
f.writestr("foo.txt", b"The first file.")
f.writestr("bar.txt", b"The second file.")
uploading_file = SimpleUploadedFile("file1.zip", bytes_file.getvalue())
client = Client()
client.force_login(self.myUser)
response = client.post(reverse('datasets_add_archive'),
dict(dataset_file=uploading_file,
compound_datatype='__raw__',
permissions_1='Everyone'))
self.assertEqual(200, response.status_code) # Form error, not redirect
old_form = response.context.get('archiveAddDatasetForm')
if old_form is not None:
self.assertEqual({}, old_form.errors)
self.assertEqual([], old_form.non_field_errors())
self.fail('Should not have old form.')
self.assertEqual(2, response.context['num_files_added'])
dataset2, dataset1 = Dataset.objects.all()[:2]
self.assertRegex(dataset1.name, r'foo\.txt.*')
self.assertRegex(dataset2.name, r'bar\.txt.*')
self.assertTrue(dataset1.is_uploaded)
def test_unique_filename(self):
example_dataset = Dataset(name="asdf_jkl.example.txt", id=987654321)
unique_name = example_dataset.unique_filename()
self.assertEqual(unique_name, "asdf_jkl.example_987654321.txt")
@skipIfDBFeature('is_mocked')
class DatasetWithFileTests(TestCase):
def setUp(self):
self.myUser = User.objects.create_user('john',
'[email protected]',
'johnpassword')
self.singlet_dataset = Dataset.create_dataset(
os.path.join(samplecode_path, "singlet_cdt_large.csv"),
self.myUser,
groups_allowed=[everyone_group()],
name="singlet",
description="lol")
self.raw_dataset = Dataset.create_dataset(
os.path.join(samplecode_path, "step_0_raw.fasta"),
user=self.myUser,
groups_allowed=[everyone_group()],
name="raw_DS",
description="lol")
def tearDown(self):
tools.clean_up_all_files()
def test_Dataset_check_MD5(self):
old_md5 = "7dc85e11b5c02e434af5bd3b3da9938e"
new_md5 = "d41d8cd98f00b204e9800998ecf8427e"
self.assertEqual(self.raw_dataset.compute_md5(), old_md5)
# Initially, no change to the raw dataset has occured, so the md5 check will pass
self.assertEqual(self.raw_dataset.clean(), None)
# The contents of the file are changed, disrupting file integrity
self.raw_dataset.dataset_file.close()
self.raw_dataset.dataset_file.open(mode='w')
self.raw_dataset.dataset_file.close()
self.assertRaisesRegex(
ValidationError,
re.escape(
'File integrity of "{}" lost. Current checksum "{}" does not equal expected '
'checksum "{}"'.format(self.raw_dataset, new_md5, old_md5)),
self.raw_dataset.clean)
def test_Dataset_filename_MD5_clash(self):
ds1, ds2 = Dataset.objects.all()[:2]
ds1.name = ds2.name
ds1.MD5_checksum = ds2.MD5_checksum
ds1.save()
msg = "A Dataset with that name and MD5 already exists"
self.assertRaisesRegex(ValidationError, msg, ds1.validate_uniqueness_on_upload)
# noinspection DuplicatedCode
class DatasetApiMockTests(BaseTestCases.ApiTestCase):
def setUp(self):
self.mock_viewset(DatasetViewSet)
super(DatasetApiMockTests, self).setUp()
# num_cols = 12
self.list_path = reverse("dataset-list")
self.list_view, _, _ = resolve(self.list_path)
self.detail_pk = 43
self.detail_path = reverse("dataset-detail",
kwargs={'pk': self.detail_pk})
self.redaction_path = reverse("dataset-redaction-plan",
kwargs={'pk': self.detail_pk})
self.removal_path = reverse("dataset-removal-plan",
kwargs={'pk': self.detail_pk})
self.detail_view, _, _ = resolve(self.detail_path)
self.redaction_view, _, _ = resolve(self.redaction_path)
self.removal_view, _, _ = resolve(self.removal_path)
tz = timezone.get_current_timezone()
apples_date = timezone.make_aware(datetime(2017, 1, 1), tz)
apples = Dataset(pk=42,
name='apples',
description='chosen',
date_created=apples_date,
is_uploaded=True,
user=self.kive_kive_user)
cherries_date = timezone.make_aware(datetime(2017, 1, 2), tz)
cherries = Dataset(pk=43,
name='cherries',
date_created=cherries_date,
is_uploaded=True,
MD5_checksum='1234',
user=self.kive_kive_user)
bananas_date = timezone.make_aware(datetime(2017, 1, 3), tz)
bananas = Dataset(pk=44,
name='bananas',
date_created=bananas_date,
user=self.kive_kive_user)
Dataset.objects.add(apples,
cherries,
bananas)
def test_list(self):
"""
Test the API list view.
"""
request = self.factory.get(self.list_path)
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 3)
self.assertEqual(response.data[2]['name'], 'bananas')
def test_filter_smart(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=smart&filters[0][val]=ch")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data[0]['name'], 'cherries')
self.assertEqual(response.data[1]['description'], 'chosen')
def test_filter_name(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=name&filters[0][val]=ch")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], 'cherries')
def test_filter_description(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=description&filters[0][val]=ch")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['description'], 'chosen')
def test_filter_user(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=user&filters[0][val]=kive")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 3)
def test_filter_uploaded(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=uploaded")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 2)
def test_filter_md5(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=md5&filters[0][val]=1234")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], 'cherries')
def test_filter_date(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=createdafter&filters[0][val]=02 Jan 2017 0:00" +
"&filters[1][key]=createdbefore&filters[1][val]=02 Jan 2017 0:00")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], 'cherries')
def test_filter_unknown(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=bogus&filters[0][val]=kive")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual({u'detail': u'Unknown filter key: bogus'},
response.data)
@skipIfDBFeature('is_mocked')
class PurgeDataTests(TestCase):
@staticmethod
def create_dataset(is_uploaded=False, name='Test name', description='Test description'):
with tempfile.TemporaryFile() as f:
f.write('I am a file!'.encode())
f.seek(0)
dataset = Dataset.create_dataset(
file_path=None,
user=kive_user(),
users_allowed=None,
groups_allowed=None,
cdt=None,
keep_file=True,
name=name,
description=description,
file_source=None,
check=True,
file_handle=f,
is_uploaded=is_uploaded
)
return dataset
@staticmethod
def create_container():
user = kive_user()
family = ContainerFamily.objects.create(user=user)
container = Container.objects.create(family=family, user=user)
return container
@staticmethod
def create_app(container):
app = container.apps.create(memory=200, threads=1)
app.write_inputs('test_input')
app.write_outputs('test_output')
return app
@staticmethod
def create_run(app):
run = app.runs.create(name='test_run', user=kive_user())
return run
@staticmethod
def add_dataset_to_run(app, run, dataset, atype='input'):
if atype == 'input':
aatype = ContainerArgument.INPUT
elif atype == 'output':
aatype = ContainerArgument.OUTPUT
else:
raise UserWarning('Must provide a string, either "input" or "output"')
run.datasets.create(
argument=app.arguments.get(
type=aatype,
position=1
),
dataset=dataset
)
run.save(schedule=False)
def test_find_orphans(self):
datasets = {
'orphan':
self.create_dataset(name='Orphan name',
description='Orphan description'),
'input_dataset':
self.create_dataset(is_uploaded=True,
name='Input name',
description='Input description'),
'output_dataset':
self.create_dataset(name='Output name',
description='Output description'),
'unused_dataset':
self.create_dataset(is_uploaded=True,
name='Unused name',
description='Unused description')
}
for i in range(20):
datasets['orphan_{}'.format(i)] = self.create_dataset(
name='Orphan {}'.format(i),
description='Orphan description {}'.format(i)
)
container = self.create_container()
app = self.create_app(container)
run = self.create_run(app)
self.add_dataset_to_run(app, run, datasets['input_dataset'])
self.add_dataset_to_run(app, run, datasets['output_dataset'], atype='output')
orphans = find_orphans.Command.find_orphans()
ids_and_paths = []
# Verify the input and output datasets exist
self.dataset_exists(datasets['input_dataset'].id, datasets['input_dataset'].dataset_file.path)
self.dataset_exists(datasets['output_dataset'].id, datasets['input_dataset'].dataset_file.path)
# Check all the orphan files and records exist
for orphan in orphans:
_id = orphan.id
try:
path = orphan.dataset_file.path
except ValueError:
path = None
# Verify the orphans exist
self.dataset_exists(_id, path)
ids_and_paths.append((_id, path))
# Remove orphan records and files
find_orphans.Command.remove_orphans(orphans)
for _id, path in ids_and_paths:
# Verify the orphan record and path no longer exist
self.dataset_does_not_exist(_id, path)
# Verify the input and output datasets still exist
self.dataset_exists(datasets['input_dataset'].id, datasets['input_dataset'].dataset_file.path)
self.dataset_exists(datasets['output_dataset'].id, datasets['input_dataset'].dataset_file.path)
def dataset_exists(self, dataset_id, dataset_path):
"""[summary]
Arguments:
dataset {Dataset} -- A Dataset object
"""
if dataset_path:
assert os.path.isfile(dataset_path)
try:
Dataset.objects.get(id=dataset_id)
except Dataset.DoesNotExist:
raise ValidationError('Dataset should exist')
def dataset_does_not_exist(self, dataset_id, dataset_path):
"""[summary]
Arguments:
dataset {Dataset} -- A Dataset object
"""
assert not os.path.isfile(dataset_path)
try:
Dataset.objects.get(id=dataset_id)
except Dataset.DoesNotExist:
pass
# noinspection DuplicatedCode
@skipIfDBFeature('is_mocked')
class DatasetApiTests(BaseTestCases.ApiTestCase):
def setUp(self):
super(DatasetApiTests, self).setUp()
num_cols = 12
self.list_path = reverse("dataset-list")
# This should equal librarian.ajax.DatasetViewSet.as_view({"get": "list"}).
self.list_view, _, _ = resolve(self.list_path)
with tempfile.NamedTemporaryFile() as f:
data = ','.join(map(str, range(num_cols)))
f.write(data.encode())
f.seek(0)
self.test_dataset = Dataset.create_dataset(
file_path=None,
user=self.kive_user,
users_allowed=None,
groups_allowed=None,
cdt=None,
keep_file=True,
name="Test dataset",
description="Test data for a test that tests test data",
file_source=None,
check=True,
file_handle=f,
)
self.test_dataset_path = "{}{}/".format(self.list_path,
self.test_dataset.pk)
self.n_preexisting_datasets = 1
self.detail_pk = self.test_dataset.pk
self.detail_path = reverse("dataset-detail",
kwargs={'pk': self.detail_pk})
self.redaction_path = reverse("dataset-redaction-plan",
kwargs={'pk': self.detail_pk})
self.removal_path = reverse("dataset-removal-plan",
kwargs={'pk': self.detail_pk})
self.detail_view, _, _ = resolve(self.detail_path)
self.redaction_view, _, _ = resolve(self.redaction_path)
self.removal_view, _, _ = resolve(self.removal_path)
def tearDown(self):
for d in Dataset.objects.all():
d.dataset_file.delete()
def test_dataset_add(self):
num_cols = 12
num_files = 2
expected_summaries = [('My cool file 1', True), # name, uploaded
('My cool file 0', True),
('Test dataset', False)]
with tempfile.TemporaryFile() as f:
data = ','.join(map(str, range(num_cols)))
f.write(data.encode())
for i in range(num_files):
f.seek(0, FROM_FILE_END)
f.write('data file {}\n'.format(i).encode())
f.seek(0)
request = self.factory.post(
self.list_path,
{
'name': "My cool file %d" % i,
'description': 'A really cool file',
# No CompoundDatatype -- this is raw.
'dataset_file': f
}
)
force_authenticate(request, user=self.kive_user)
resp = self.list_view(request).render().data
self.assertIsNone(resp.get('errors'))
self.assertEqual(resp['name'], "My cool file %d" % i)
request = self.factory.get(self.list_path)
force_authenticate(request, user=self.kive_user)
resp = self.list_view(request).data
self.assertEqual(len(resp), num_files + self.n_preexisting_datasets)
summaries = [(entry['name'], entry['uploaded'])
for entry in resp]
self.assertEqual(expected_summaries, summaries)
def test_dataset_add_with_blank_externals(self):
""" Browser API leaves external dir and path blank. """
f = SimpleUploadedFile("example.txt", b"File contents")
request = self.factory.post(
self.list_path,
dict(name="Some file",
external_path='',
externalfiledirectory='',
dataset_file=f))
force_authenticate(request, user=self.kive_user)
resp = self.list_view(request).render().data
self.assertIsNone(resp.get('errors'))
self.assertIsNone(resp.get('non_field_errors'))
self.assertEqual(resp['name'], "Some file")
def test_dataset_removal_plan(self):
request = self.factory.get(self.removal_path)
force_authenticate(request, user=self.kive_user)
response = self.removal_view(request, pk=self.detail_pk)
self.assertEqual(response.data['Datasets'], 1)
self.assertEqual(response.data['Containers'], 0)
def test_dataset_removal(self):
start_count = Dataset.objects.all().count()
request = self.factory.delete(self.detail_path)
force_authenticate(request, user=self.kive_user)
response = self.detail_view(request, pk=self.detail_pk)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
end_count = Dataset.objects.all().count()
self.assertEqual(end_count, start_count - 1)
def test_dataset_redaction_plan(self):
request = self.factory.get(self.redaction_path)
force_authenticate(request, user=self.kive_user)
response = self.redaction_view(request, pk=self.detail_pk)
self.assertEqual(response.data['Datasets'], 1)
self.assertEqual(response.data['OutputLogs'], 0)
def test_dataset_redaction(self):
request = self.factory.patch(self.detail_path,
{'is_redacted': "true"})
force_authenticate(request, user=self.kive_user)
response = self.detail_view(request, pk=self.detail_pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
dataset = Dataset.objects.get(pk=self.detail_pk)
self.assertTrue(dataset.is_redacted())
def test_dataset_purge(self):
request = self.factory.patch(self.detail_path,
json.dumps({'dataset_file': None}),
content_type='application/json')
force_authenticate(request, user=self.kive_user)
response = self.detail_view(request, pk=self.detail_pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
dataset = Dataset.objects.get(pk=self.detail_pk)
self.assertFalse(dataset.has_data())
def test_dataset_purge_again(self):
# Purge the dataset file.
Dataset.objects.get(pk=self.detail_pk).dataset_file.delete(save=True)
# Now send a request to purge it again. Should do nothing.
request = self.factory.patch(self.detail_path,
json.dumps({'dataset_file': None}),
content_type='application/json')
force_authenticate(request, user=self.kive_user)
response = self.detail_view(request, pk=self.detail_pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
dataset = Dataset.objects.get(pk=self.detail_pk)
self.assertFalse(dataset.has_data())
def test_dataset_view_purged(self):
dataset = Dataset.objects.get(id=self.detail_pk)
dataset.dataset_file.delete(save=True)
request = self.factory.get(self.detail_path)
force_authenticate(request, user=self.kive_user)
response = self.detail_view(request, pk=self.detail_pk)
self.assertEqual(
response.data['description'],
"Test data for a test that tests test data")
self.assertFalse(response.data['has_data'])
self.assertFalse(response.data['is_redacted'])
# noinspection DuplicatedCode
@skipIfDBFeature('is_mocked')
class DatasetSerializerTests(TestCase):
"""
Tests of DatasetSerializer.
"""
def setUp(self):
self.factory = APIRequestFactory()
self.list_path = reverse("dataset-list")
self.myUser = User.objects.create_user('john',
'[email protected]',
'johnpassword')
self.kive_user = kive_user()
self.duck_context = DuckContext()
num_cols = 12
self.raw_file_contents = ','.join(map(str, range(num_cols))).encode()
self.kive_file_contents = """col1
foo
bar
baz
"""
self.data_to_serialize = {
"name": "SerializedData",
"description": "Dataset for testing deserialization",
"users_allowed": [],
"groups_allowed": []
}
# An external file directory.
self.working_dir = tempfile.mkdtemp()
self.efd = ExternalFileDirectory(
name="WorkingDirectory",
path=self.working_dir
)
self.efd.save()
# An external file.
_, self.ext_fn = tempfile.mkstemp(dir=self.working_dir)
with open(self.ext_fn, "wb") as f:
f.write(self.raw_file_contents)
self.csv_file_temp_open_mode = "w+t"
def tearDown(self):
shutil.rmtree(self.working_dir)
def test_validate(self):
"""
Test validating a new Dataset.
"""
with tempfile.TemporaryFile() as f:
f.write(self.raw_file_contents)
f.seek(0)
self.data_to_serialize["dataset_file"] = File(f, name="bla")
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
self.assertTrue(ds.is_valid())
def test_validate_with_users_allowed(self):
"""
Test validating a new Dataset with users allowed.
"""
with tempfile.TemporaryFile() as f:
f.write(self.raw_file_contents)
f.seek(0)
self.data_to_serialize["dataset_file"] = File(f, name="bla")
self.data_to_serialize["users_allowed"].append(self.myUser.username)
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
self.assertTrue(ds.is_valid())
def test_validate_with_groups_allowed(self):
"""
Test validating a new Dataset with groups allowed.
"""
with tempfile.TemporaryFile() as f:
f.write(self.raw_file_contents)
f.seek(0)
self.data_to_serialize["dataset_file"] = File(f, name="bla")
self.data_to_serialize["groups_allowed"].append(everyone_group().name)
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
self.assertTrue(ds.is_valid())
def test_validate_externally_backed(self):
"""
Test validating a new Dataset with external backing.
"""
self.data_to_serialize["externalfiledirectory"] = self.efd.name
self.data_to_serialize["external_path"] = self.ext_fn
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
self.assertTrue(ds.is_valid())
def test_validate_externally_backed_no_efd(self):
"""
If external_path is present, externalfiledirectory should be also.
"""
self.data_to_serialize["external_path"] = self.ext_fn
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
self.assertFalse(ds.is_valid())
self.assertListEqual(ds.errors["non_field_errors"],
["externalfiledirectory must be specified"])
def test_validate_externally_backed_no_external_path(self):
"""
If externalfiledirectory is present, external_path should be also.
"""
self.data_to_serialize["externalfiledirectory"] = self.efd.name
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
self.assertFalse(ds.is_valid())
self.assertListEqual(ds.errors["non_field_errors"],
["external_path must be specified"])
def test_validate_dataset_file_specified(self):
"""
If dataset_file is specified, external_path and externalfiledirectory should not be.
"""
self.data_to_serialize["externalfiledirectory"] = self.efd.name
self.data_to_serialize["external_path"] = self.ext_fn
with tempfile.TemporaryFile() as f:
f.write(self.raw_file_contents)
f.seek(0)
self.data_to_serialize["dataset_file"] = File(f, name="bla")
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
self.assertFalse(ds.is_valid())
self.assertSetEqual(
set([str(e) for e in ds.errors["non_field_errors"]]),
{
"external_path should not be specified if dataset_file is",
"externalfiledirectory should not be specified if dataset_file is"
}
)
def test_create(self):
"""
Test creating a Dataset.
"""
with tempfile.TemporaryFile() as f:
f.write(self.raw_file_contents)
f.seek(0)
self.data_to_serialize["dataset_file"] = File(f, name="bla")
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
ds.is_valid()
dataset = ds.save()
# Probe the Dataset to make sure everything looks fine.
self.assertEqual(dataset.name, self.data_to_serialize["name"])
self.assertEqual(dataset.description, self.data_to_serialize["description"])
self.assertEqual(dataset.user, self.kive_user)
self.assertTrue(bool(dataset.dataset_file))
def test_create_do_not_retain(self):
"""
Test creating a Dataset but without retaining a file in the DB.
"""
with tempfile.TemporaryFile() as f:
f.write(self.raw_file_contents)
f.seek(0)
self.data_to_serialize["dataset_file"] = File(f, name="bla")
self.data_to_serialize["save_in_db"] = False
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
ds.is_valid()
dataset = ds.save()
# Probe the Dataset to make sure everything looks fine.
self.assertEqual(dataset.name, self.data_to_serialize["name"])
self.assertEqual(dataset.description, self.data_to_serialize["description"])
self.assertEqual(dataset.user, self.kive_user)
self.assertFalse(bool(dataset.dataset_file))
def test_create_with_users_allowed(self):
"""
Test validating a new Dataset with users allowed.
"""
with tempfile.TemporaryFile() as f:
f.write(self.raw_file_contents)
f.seek(0)
self.data_to_serialize["dataset_file"] = File(f, name="bla")
self.data_to_serialize["users_allowed"].append(self.myUser.username)
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
ds.is_valid()
dataset = ds.save()
self.assertListEqual(list(dataset.users_allowed.all()),
[self.myUser])
def test_create_with_groups_allowed(self):
"""
Test validating a new Dataset with groups allowed.
"""
with tempfile.TemporaryFile() as f:
f.write(self.raw_file_contents)
f.seek(0)
self.data_to_serialize["dataset_file"] = File(f, name="bla")
self.data_to_serialize["groups_allowed"].append(everyone_group().name)
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
ds.is_valid()
dataset = ds.save()
self.assertListEqual(list(dataset.groups_allowed.all()),
[everyone_group()])
def test_create_externally_backed(self):
"""
Test creating a Dataset from external data.
"""
self.data_to_serialize["externalfiledirectory"] = self.efd.name
self.data_to_serialize["external_path"] = os.path.basename(self.ext_fn)
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
ds.is_valid()
dataset = ds.save()
# Probe the Dataset to make sure everything looks fine.
self.assertEqual(dataset.name, self.data_to_serialize["name"])
self.assertEqual(dataset.description, self.data_to_serialize["description"])
self.assertEqual(dataset.user, self.kive_user)
self.assertEqual(dataset.external_path, os.path.basename(self.ext_fn))
self.assertEqual(dataset.externalfiledirectory, self.efd)
self.assertFalse(bool(dataset.dataset_file))
def test_create_externally_backed_internal_copy(self):
"""
Test creating a Dataset from external data and keeping an internal copy.
"""
self.data_to_serialize["externalfiledirectory"] = self.efd.name
self.data_to_serialize["external_path"] = os.path.basename(self.ext_fn)
self.data_to_serialize["save_in_db"] = True
ds = DatasetSerializer(
data=self.data_to_serialize,
context=self.duck_context
)
ds.is_valid()
dataset = ds.save()
# Probe the Dataset to make sure everything looks fine.
self.assertEqual(dataset.name, self.data_to_serialize["name"])
self.assertEqual(dataset.description, self.data_to_serialize["description"])
self.assertEqual(dataset.user, self.kive_user)
self.assertEqual(dataset.external_path, os.path.basename(self.ext_fn))
self.assertEqual(dataset.externalfiledirectory, self.efd)
self.assertTrue(bool(dataset.dataset_file))
dataset.dataset_file.open("rb")
with dataset.dataset_file:
self.assertEqual(dataset.dataset_file.read(), self.raw_file_contents)
# noinspection DuplicatedCode
class ExternalFileDirectoryApiMockTests(BaseTestCases.ApiTestCase):
def setUp(self):
self.mock_viewset(ExternalFileDirectoryViewSet)
super(ExternalFileDirectoryApiMockTests, self).setUp()
self.list_path = reverse("externalfiledirectory-list")
self.detail_pk = 43
self.detail_path = reverse("externalfiledirectory-detail",
kwargs={'pk': self.detail_pk})
self.list_view, _, _ = resolve(self.list_path)
self.detail_view, _, _ = resolve(self.detail_path)
ExternalFileDirectory.objects.add(ExternalFileDirectory(id=42,
name="apples",
path="/bank/apples"),
ExternalFileDirectory(id=43,
name="cherries",
path="/dock/cherries"),
ExternalFileDirectory(id=44,
name="bananas",
path="/dock/bananas"))
def test_list(self):
"""
Test the API list view.
"""
request = self.factory.get(self.list_path)
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 3)
self.assertEqual(response.data[2]['name'], 'bananas')
def test_filter_smart(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=smart&filters[0][val]=ban")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data[0]['name'], 'bananas')
self.assertEqual(response.data[1]['path'], '/bank/apples')
def test_filter_name(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=name&filters[0][val]=ban")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], 'bananas')
def test_filter_path(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=path&filters[0][val]=bank")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['path'], '/bank/apples')
def test_filter_unknown(self):
"""
Test the API list view.
"""
request = self.factory.get(
self.list_path + "?filters[0][key]=bogus&filters[0][val]=kive")
force_authenticate(request, user=self.kive_user)
response = self.list_view(request, pk=None)
self.assertEqual({u'detail': u'Unknown filter key: bogus'},
response.data)
def test_detail(self):
request = self.factory.get(self.detail_path)
force_authenticate(request, user=self.kive_user)
response = self.detail_view(request, pk=self.detail_pk)
self.assertEqual(response.data['name'], 'cherries')
@patch('os.walk')
def test_list_files(self, mock_walk):
mock_walk.return_value = [('/dock/cherries', [], ['foo.txt', 'bar.txt'])]
expected_data = {
'url': u'http://testserver/api/externalfiledirectories/43/',
'pk': 43,
'list_files': [('/dock/cherries/foo.txt', '[cherries]/foo.txt'),
('/dock/cherries/bar.txt', '[cherries]/bar.txt')],
'name': u'cherries',
'path': u'/dock/cherries'
}
path = reverse("externalfiledirectory-list-files",
kwargs={'pk': self.detail_pk})
view, _, _ = resolve(path)
request = self.factory.get(path)
force_authenticate(request, user=self.kive_user)
response = view(request, pk=self.detail_pk)
self.assertDictEqual(expected_data, response.data)
# noinspection DuplicatedCode
@skipIfDBFeature('is_mocked')
class ExternalFileTests(TestCase):
def setUp(self):
self.myUser = User.objects.create_user('john',
'[email protected]',
'johnpassword')
self.working_dir = tempfile.mkdtemp()
self.efd = ExternalFileDirectory(
name="WorkingDirectory",
path=self.working_dir
)
self.efd.save()
self.ext1_path = "ext1.txt"
self.ext1_contents = "First test file"
with open(os.path.join(self.working_dir, self.ext1_path), "wb") as f:
f.write(self.ext1_contents.encode())
self.ext2_path = "ext2.txt"
self.ext2_contents = "Second test file"
with open(os.path.join(self.working_dir, self.ext2_path), "wb") as f:
f.write(self.ext2_contents.encode())
os.makedirs(os.path.join(self.working_dir, "ext_subdir"))
os.makedirs(os.path.join(self.working_dir, "ext_subdir2"))
self.ext_sub1_path = os.path.join("ext_subdir", "ext_sub1.txt")
self.ext_sub1_contents = "Test file in subdirectory"
with open(os.path.join(self.working_dir, self.ext_sub1_path), "wb") as f:
f.write(self.ext_sub1_contents.encode())
self.external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext1_path),
user=self.myUser,
externalfiledirectory=self.efd
)
self.external_file_ds_no_internal = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext1_path),
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd
)
self.external_file_ds_subdir = Dataset.create_dataset(
os.path.join(self.working_dir, "ext_subdir", "ext_sub1.txt"),
user=self.myUser,
externalfiledirectory=self.efd
)
self.non_external_dataset = Dataset(
user=self.myUser,
name="foo",
description="Foo",
dataset_file=ContentFile("Foo")
)
self.non_external_dataset.save()
def tearDown(self):
shutil.rmtree(self.working_dir)
def test_save(self):
"""Calling save() normalizes the path."""
new_working_dir = tempfile.mkdtemp()
unnamed_efd = ExternalFileDirectory(name="TestSaveDir", path="{}/./".format(new_working_dir))
unnamed_efd.save()
self.assertEqual(unnamed_efd.path, os.path.normpath(new_working_dir))
shutil.rmtree(new_working_dir)
def test_list_files(self):
expected_list = [
(os.path.join(self.working_dir, self.ext1_path), "[WorkingDirectory]/{}".format(self.ext1_path)),
(os.path.join(self.working_dir, "ext2.txt"), "[WorkingDirectory]/ext2.txt"),
(os.path.join(self.working_dir, "ext_subdir", "ext_sub1.txt"),
"[WorkingDirectory]/ext_subdir/ext_sub1.txt")
]
self.assertSetEqual(set(expected_list), set(self.efd.list_files()))
def test_create_dataset_external_file(self):
"""
Create a Dataset from an external file, making a copy in the database.
"""
external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext1_path),
user=self.myUser,
externalfiledirectory=self.efd
)
self.assertEqual(external_file_ds.external_path, self.ext1_path)
external_file_ds.dataset_file.open("r")
with external_file_ds.dataset_file:
self.assertEqual(external_file_ds.dataset_file.read(), self.ext1_contents)
with open(os.path.join(self.working_dir, self.ext1_path), "rb") as f:
self.assertEqual(file_access_utils.compute_md5(f), external_file_ds.MD5_checksum)
def test_create_dataset_external_file_no_internal_copy(self):
"""
Create a Dataset from an external file without making a copy in the database.
"""
external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext1_path),
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd
)
self.assertEqual(external_file_ds.external_path, self.ext1_path)
self.assertFalse(bool(external_file_ds.dataset_file))
with open(os.path.join(self.working_dir, self.ext1_path), "rb") as f:
self.assertEqual(file_access_utils.compute_md5(f), external_file_ds.MD5_checksum)
def test_create_dataset_external_file_subdirectory(self):
"""
Create a Dataset from an external file in a subdirectory of the external file directory.
"""
external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext_sub1_path),
user=self.myUser,
externalfiledirectory=self.efd
)
self.assertEqual(external_file_ds.externalfiledirectory, self.efd)
self.assertEqual(external_file_ds.external_path, self.ext_sub1_path)
external_file_ds.dataset_file.open("r")
with external_file_ds.dataset_file:
self.assertEqual(external_file_ds.dataset_file.read(), self.ext_sub1_contents)
with open(os.path.join(self.working_dir, self.ext_sub1_path), "rb") as f:
self.assertEqual(file_access_utils.compute_md5(f), external_file_ds.MD5_checksum)
def test_get_file_handle(self):
"""
Test retrieving a file handle.
"""
ext_sub1_path = os.path.join(self.working_dir, "ext_subdir", "ext_sub1.txt")
external_file_ds = Dataset.create_dataset(
ext_sub1_path,
user=self.myUser,
externalfiledirectory=self.efd
)
# Where possible get_file_handle uses the internal copy.
with external_file_ds.get_open_file_handle("r") as data_handle:
self.assertEqual(data_handle, external_file_ds.dataset_file)
# It falls back on the external copy.
external_file_ds.dataset_file.delete()
with external_file_ds.get_open_file_handle('r') as external_file_handle:
self.assertEqual(os.path.abspath(external_file_handle.name), ext_sub1_path)
def test_get_file_handle_subdirectory(self):
"""
Test retrieving a file handle on a Dataset with a file in a subdirectory.
"""
# Where possible get_file_handle uses the internal copy.
with self.external_file_ds.get_open_file_handle('r') as data_handle:
self.assertEqual(data_handle, self.external_file_ds.dataset_file)
# It falls back on the external copy.
with self.external_file_ds_no_internal.get_open_file_handle('r') as external_file_handle:
self.assertEqual(
os.path.abspath(external_file_handle.name),
os.path.abspath(os.path.join(self.working_dir, self.ext1_path))
)
def test_external_absolute_path(self):
"""
Retrieve the external absolute path of an externally-backed Dataset.
"""
ext1_path = os.path.join(self.working_dir, self.ext1_path)
ext_sub1_path = os.path.join(self.working_dir, self.ext_sub1_path)
self.assertEqual(self.external_file_ds.external_absolute_path(), ext1_path)
self.assertEqual(self.external_file_ds_no_internal.external_absolute_path(), ext1_path)
self.assertEqual(self.external_file_ds_subdir.external_absolute_path(), ext_sub1_path)
self.assertIsNone(self.non_external_dataset.external_absolute_path())
def test_has_data(self):
"""
Dataset factors in presence/absence of external files when checking for data.
"""
self.assertTrue(self.external_file_ds.has_data())
self.assertTrue(self.external_file_ds_no_internal.has_data())
self.assertTrue(self.external_file_ds_subdir.has_data())
# We make an externally-backed Dataset to mess with.
ext_path = "ext_test_has_data.txt"
ext_contents = "File has data"
with open(os.path.join(self.working_dir, ext_path), "wb") as f:
f.write(ext_contents.encode())
external_path = os.path.join(self.working_dir, ext_path)
external_file_ds_no_internal = Dataset.create_dataset(
external_path,
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd
)
# Delete this file.
os.remove(external_path)
assert not external_file_ds_no_internal.has_data()
expected_error = r"No such file or directory: .*ext_test_has_data\.txt"
with self.assertRaisesRegex(IOError, expected_error):
external_file_ds_no_internal.has_data(raise_errors=True)
# Now test when the file exists but is unreadable.
with open(os.path.join(self.working_dir, ext_path), "wb") as f:
f.write(ext_contents.encode())
self.assertTrue(external_file_ds_no_internal.has_data())
os.chmod(external_path, stat.S_IWUSR | stat.S_IXUSR)
assert not external_file_ds_no_internal.has_data()
expected_error = r"Permission denied: .*ext_test_has_data\.txt"
with self.assertRaisesRegex(IOError, expected_error):
external_file_ds_no_internal.has_data(raise_errors=True)
def test_has_no_data(self):
""" Purged dataset should not raise exception from has_data. """
self.external_file_ds_no_internal.external_path = ''
self.external_file_ds_no_internal.externalfiledirectory = None
self.assertFalse(self.external_file_ds_no_internal.has_data())
self.assertFalse(self.external_file_ds_no_internal.has_data(raise_errors=True))
def test_clean_efd_external_path_both_set(self):
"""
Both or neither of externalfiledirectory and external_path are set.
"""
self.external_file_ds.clean()
self.external_file_ds.externalfiledirectory = None
self.assertRaisesRegex(
ValidationError,
"Both externalfiledirectory and external_path should be set or neither should be set",
self.external_file_ds.clean
)
self.external_file_ds.externalfiledirectory = self.efd
self.external_file_ds.external_path = ""
self.assertRaisesRegex(
ValidationError,
"Both externalfiledirectory and external_path should be set or neither should be set",
self.external_file_ds.clean
)
# Reduce this to a purely internal Dataset.
self.external_file_ds.externalfiledirectory = None
self.external_file_ds.clean()
def test_external_file_redact_this(self):
"""
Externally-backed Datasets should have external_path and externalfiledirectory cleared on redaction.
"""
self.external_file_ds.redact_this()
self.external_file_ds.refresh_from_db()
self.assertEqual(self.external_file_ds.external_path, "")
self.assertIsNone(self.external_file_ds.externalfiledirectory)
def test_file_check_passes(self):
external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext1_path),
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd)
expected_log_messages = ''
start_time = timezone.now()
with capture_log_stream(logging.ERROR,
'librarian.Dataset') as mocked_stderr:
Dataset.external_file_check()
log_messages = mocked_stderr.getvalue()
end_time = timezone.now()
external_file_ds.refresh_from_db()
self.assertGreaterEqual(external_file_ds.last_time_checked, start_time)
self.assertLessEqual(external_file_ds.last_time_checked, end_time)
self.assertFalse(external_file_ds.is_external_missing)
self.assertMultiLineEqual(expected_log_messages, log_messages)
def test_file_check_missing_one(self):
Dataset.objects.all().delete() # Remove existing datasets.
external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext1_path),
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd)
external_file_ds.last_time_checked = timezone.now() - timedelta(minutes=1)
external_file_ds.save()
os.remove(external_file_ds.external_absolute_path())
expected_log_messages = """\
Missing 1 external dataset. Most recent from {}, last checked a minute ago.
""".format(external_file_ds.external_absolute_path())
start_time = timezone.now()
with capture_log_stream(logging.ERROR,
'librarian.Dataset') as mocked_stderr:
Dataset.external_file_check()
log_messages = mocked_stderr.getvalue()
external_file_ds.refresh_from_db()
self.assertLess(external_file_ds.last_time_checked, start_time)
self.assertTrue(external_file_ds.is_external_missing)
self.assertMultiLineEqual(expected_log_messages, log_messages)
def test_file_check_missing_two(self):
Dataset.objects.all().delete() # Remove existing datasets.
external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext1_path),
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd)
external_file_ds.last_time_checked = timezone.now() - timedelta(minutes=5)
external_file_ds.save()
os.remove(external_file_ds.external_absolute_path())
external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext2_path),
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd)
external_file_ds.last_time_checked = timezone.now() - timedelta(minutes=4)
external_file_ds.save()
os.remove(external_file_ds.external_absolute_path())
expected_log_messages = """\
Missing 2 external datasets. Most recent from {}, last checked 4 minutes ago.
""".format(external_file_ds.external_absolute_path())
with capture_log_stream(logging.ERROR,
'librarian.Dataset') as mocked_stderr:
Dataset.external_file_check()
log_messages = mocked_stderr.getvalue().replace(u'\xa0', ' ')
self.assertMultiLineEqual(expected_log_messages, log_messages)
def test_file_check_batches(self):
Dataset.objects.all().delete() # Remove existing datasets.
for _ in range(10):
Dataset.create_dataset(
os.path.join(self.working_dir, self.ext1_path),
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd)
external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext2_path),
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd)
external_file_ds.last_time_checked = timezone.now() - timedelta(minutes=4)
external_file_ds.save()
os.remove(external_file_ds.external_absolute_path())
expected_log_messages = """\
Missing 1 external dataset. Most recent from {}, last checked 4 minutes ago.
""".format(external_file_ds.external_absolute_path())
with capture_log_stream(logging.ERROR,
'librarian.Dataset') as mocked_stderr:
Dataset.external_file_check(batch_size=10)
log_messages = mocked_stderr.getvalue().replace(u'\xa0', ' ')
self.assertMultiLineEqual(expected_log_messages, log_messages)
def test_file_check_file_restored(self):
external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext1_path),
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd)
external_file_ds.is_external_missing = True
external_file_ds.save()
expected_log_messages = ''
start_time = timezone.now()
with capture_log_stream(logging.ERROR,
'librarian.Dataset') as mocked_stderr:
Dataset.external_file_check()
log_messages = mocked_stderr.getvalue()
end_time = timezone.now()
external_file_ds.refresh_from_db()
self.assertGreaterEqual(external_file_ds.last_time_checked, start_time)
self.assertLessEqual(external_file_ds.last_time_checked, end_time)
self.assertFalse(external_file_ds.is_external_missing)
self.assertMultiLineEqual(expected_log_messages, log_messages)
def test_file_check_still_missing(self):
external_file_ds = Dataset.create_dataset(
os.path.join(self.working_dir, self.ext2_path),
user=self.myUser,
keep_file=False,
externalfiledirectory=self.efd)
external_file_ds.is_external_missing = True
external_file_ds.save()
os.remove(external_file_ds.external_absolute_path())
expected_log_messages = ''
start_time = timezone.now()
with capture_log_stream(logging.ERROR,
'librarian.Dataset') as mocked_stderr:
Dataset.external_file_check()
log_messages = mocked_stderr.getvalue()
external_file_ds.refresh_from_db()
self.assertLess(external_file_ds.last_time_checked, start_time)
self.assertTrue(external_file_ds.is_external_missing)
self.assertMultiLineEqual(expected_log_messages, log_messages)
|
|
'''
OptimizerForHDPFullVarModel.py
Model Notation
--------
Dirichlet-Multinomial model with K+1 possible components
v := K-length vector with entries in [0,1]
beta := K+1-length vector with entries in [0,1]
entries must sum to unity. sum(beta) = 1.
alpha0 := scalar, alpha0 > 0
Generate stick breaking fractions v
v[k] ~ Beta(1, alpha0)
Then deterministically obtain beta
beta[k] = v[k] prod(1 - v[:k]), k = 1, 2, ... K
beta[K+1] = prod_k=1^K 1-v[k]
Then draw observed probability vectors
pi[d] ~ Dirichlet(gamma * beta), for d = 1, 2, ... D
CONSTRAINED Optimization Problem
----------
q(v) = Beta( v | u1, u0)
u* = argmax_u E_q[log p(pi | v) + log p( v ) - log q(v) ], subject to 0 <= u
UNCONSTRAINED Problem
----------
c* = argmax_c E_q[log p(pi | v) + log p( v ) - log q(v) ], u = exp(c), c is real
'''
import warnings
import numpy as np
import scipy.optimize
import scipy.io
from scipy.special import gammaln, digamma, polygamma
import datetime
import logging
import itertools
Log = logging.getLogger('bnpy')
EPS = 10*np.finfo(float).eps
lowTriIDsDict = dict()
def get_lowTriIDs(K):
if K in lowTriIDsDict:
return lowTriIDsDict[K]
else:
ltIDs = np.tril_indices(K, -1)
lowTriIDsDict[K] = ltIDs
return ltIDs
def get_lowTriIDs_flat(K):
if K in lowTriIDsDict:
return lowTriIDsDict[K]
else:
ltIDs = np.tril_indices(K, -1)
lowTriIDsDict[K] = np.ravel_multi_index(ltIDs, (K,K))
return ltIDs
def estimate_u_multiple_tries(sumLogPi=None, nDoc=0, gamma=1.0, alpha0=1.0,
initu=None, initU=None, approx_grad=False,
fList=[1e7, 1e8, 1e10], **kwargs):
''' Estimate 2K-vector "u" via gradient descent,
gracefully using multiple restarts with progressively weaker tolerances
until one succeeds
Returns
--------
u : 2K-vector of positive parameters
fofu : scalar value of minimization objective
Info : dict
Raises
--------
ValueError with FAILURE in message if all restarts fail
'''
K = sumLogPi.size - 1
if initU is not None:
initu = initU
if initu is not None and not np.allclose(initu[-K:], alpha0):
uList = [initu, None]
else:
uList = [None]
nOverflow = 0
u = None
Info = dict()
msg = ''
for trial, myTuple in enumerate(itertools.product(uList, fList)):
initu, factr = myTuple
try:
u, fofu, Info = estimate_u(sumLogPi, nDoc, gamma, alpha0,
initu=initu, factr=factr, approx_grad=approx_grad)
Info['nRestarts'] = trial
Info['factr'] = factr
Info['msg'] = Info['task']
del Info['grad']
del Info['task']
break
except ValueError as err:
if str(err).count('FAILURE') == 0:
raise err
msg = str(err)
if str(err).count('overflow') > 0:
nOverflow += 1
if u is None:
raise ValueError("FAILURE! " + msg)
Info['nOverflow'] = nOverflow
return u, fofu, Info
def estimate_u(sumLogPi=None, nDoc=0, gamma=1.0, alpha0=1.0, initu=None, approx_grad=False, factr=1.0e7, **kwargs):
''' Run gradient optimization to estimate best v for specified problem
Returns
--------
vhat : K-vector of values, 0 < v < 1
fofvhat: objective function value at vhat
Info : dict with info about estimation algorithm
Raises
--------
ValueError on an overflow, any detection of NaN, or failure to converge
'''
sumLogPi = np.squeeze(np.asarray(sumLogPi, dtype=np.float64))
assert sumLogPi.ndim == 1
K = sumLogPi.size - 1
if initu is None:
initu = np.hstack( [np.ones(K), alpha0*np.ones(K)])
assert initu.size == 2*K
initc = np.log(initu)
myFunc = lambda c: objFunc_c(c, sumLogPi, nDoc, gamma, alpha0)
myGrad = lambda c: objGrad_c(c, sumLogPi, nDoc, gamma, alpha0)
with warnings.catch_warnings():
warnings.filterwarnings('error', category=RuntimeWarning,
message='overflow')
try:
chat, fhat, Info = scipy.optimize.fmin_l_bfgs_b(myFunc, initc,
fprime=myGrad, disp=None,
approx_grad=approx_grad,
factr=factr,
**kwargs)
except RuntimeWarning:
raise ValueError("FAILURE: overflow!" )
except AssertionError:
raise ValueError("FAILURE: NaN found!")
if Info['warnflag'] > 1:
raise ValueError("FAILURE: " + Info['task'])
uhat = np.exp(chat)
Info['initu'] = initu
Info['objFunc'] = lambda u: objFunc_u(u, sumLogPi, nDoc, gamma, alpha0)
Info['gradFunc'] = lambda u: objGrad_u(u, sumLogPi, nDoc, gamma, alpha0)
return uhat, fhat, Info
########################################################### Objective/gradient
########################################################### in terms of u
def objFunc_u(u, sumLogPi, nDoc, gamma, alpha0):
''' Returns scalar value of constrained objective function
Args
-------
u := 2K-vector of real numbers, subject to 0 < u
Returns
-------
f := -1 * L(u), where L is ELBO objective function (log posterior prob)
'''
assert not np.any(np.isnan(u))
# PREPARE building-block expectations
u1, u0 = _unpack(u)
E = _calcExpectations(u1, u0)
# CALCULATE each term in the function
K = u1.size
kvec = K+1 - np.arange(1, K+1)
Elog_pmq_v = np.sum(gammaln(u1) + gammaln(u0) - gammaln(u1 + u0)) \
+ np.inner(1.0 - u1, E['logv']) \
+ np.inner(alpha0 - u0, E['log1-v'])
if nDoc > 0:
Elogp_pi_C = np.sum(E['logv']) + np.inner(kvec, E['log1-v'])
Elogp_pi = np.inner(gamma * E['beta'] - 1, sumLogPi/nDoc)
Elog_pmq_v = Elog_pmq_v/nDoc
else:
Elogp_pi_C = 0
Elogp_pi = 0
elbo = Elogp_pi_C + Elogp_pi + Elog_pmq_v
return -1.0*elbo
def objGrad_u(u, sumLogPi, nDoc, gamma, alpha0):
''' Returns 2K-vector gradient of the constrained objective
Args
-------
u := 2K-vector of real numbers, subject to 0 < u
Returns
-------
g := 2K-vector of real numbers,
g[k] = gradient of -1 * L(u) with respect to u[k]
'''
assert not np.any(np.isnan(u))
# UNPACK AND PREPARE building-block quantities
u1, u0 = _unpack(u)
K = u1.size
E = _calcExpectations(u1, u0)
dU1, dU0 = _calcGradients(u1, u0, E=E)
kvec = K + 1 - np.arange(1, K+1)
digammaU1 = digamma(u1)
digammaU0 = digamma(u0)
digammaBoth = digamma(u1+u0)
dU1_Elogpmq_v = digammaU1 - digammaBoth \
+ (1 - u1) * dU1['Elogv'] \
- E['logv'] \
+ (alpha0 - u0) * dU1['Elog1-v']
dU0_Elogpmq_v = digammaU0 - digammaBoth \
+ (1 - u1) * dU0['Elogv'] \
- E['log1-v'] \
+ (alpha0 - u0) * dU0['Elog1-v']
if nDoc > 0:
dU1_Elogp_pi = dU1['Elogv'] + kvec * dU1['Elog1-v'] \
+ gamma * np.dot(dU1['Ebeta'], sumLogPi/nDoc)
dU0_Elogp_pi = dU0['Elogv'] + kvec * dU0['Elog1-v'] \
+ gamma * np.dot(dU0['Ebeta'], sumLogPi/nDoc)
dU1_Elogpmq_v /= nDoc
dU0_Elogpmq_v /= nDoc
else:
dU1_Elogp_pi = 0
dU0_Elogp_pi = 0
gvecU1 = dU1_Elogp_pi + dU1_Elogpmq_v
gvecU0 = dU0_Elogp_pi + dU0_Elogpmq_v
gvecU = np.hstack([gvecU1, gvecU0])
return -1.0 * gvecU
########################################################### calcExpectations
###########################################################
def _calcExpectations(u1, u0):
''' Calculate expectations of v and beta(v)
under the model v[k] ~ Beta(U1[k], U0[k])
'''
E = dict()
E['v'] = u1 / (u1 + u0)
E['1-v'] = u0 / (u1 + u0)
assert not np.any(np.isnan(E['v']))
E['beta'] = v2beta(E['v'])
digammaBoth = digamma(u1 + u0)
E['logv'] = digamma(u1) - digammaBoth
E['log1-v'] = digamma(u0) - digammaBoth
return E
def _calcGradients(u1, u0, E):
'''
'''
dU1 = dict()
dU0 = dict()
K = u1.size
uboth = u1 + u0
polygamma1Both = polygamma(1, uboth)
dU1['Elogv'] = polygamma(1, u1) - polygamma1Both
dU0['Elogv'] = -polygamma1Both
dU1['Elog1-v'] = -polygamma1Both
dU0['Elog1-v'] = polygamma(1,u0) - polygamma1Both
Q1 = u1 / (uboth * uboth)
Q0 = u0 / (uboth * uboth)
dU1_Ebeta = np.tile(E['beta'], (K,1))
dU1_Ebeta /= E['1-v'][:,np.newaxis]
diagIDs = np.diag_indices(K)
dU1_Ebeta[diagIDs] /= -E['v']/E['1-v']
# Slow way to force lower-triangle of dU1 to be all zeros
#lowTriIDs = np.tril_indices(K, -1)
#dU1_Ebeta[lowTriIDs] = 0
# Fast way
lowTriIDs = get_lowTriIDs(K)
dU1_Ebeta[lowTriIDs] = 0
# Fastest way
#lowTriIDs = get_lowTriIDs_flat(K)
#dU1_Ebeta.ravel()[flatlowTriIDs] = 0
dU0_Ebeta = dU1_Ebeta * Q1[:,np.newaxis]
dU1_Ebeta *= -1 * Q0[:,np.newaxis]
dU1['Ebeta'] = dU1_Ebeta
dU0['Ebeta'] = dU0_Ebeta
return dU1, dU0
########################################################### Objective/gradient
########################################################### in terms of c
def objFunc_c(c, *args):
''' Returns scalar value of unconstrained objective function
Args
-------
c := 2*K-vector of real numbers
Returns
-------
f := -1 * L( v2c(c) ), where L is ELBO objective (log posterior)
'''
return objFunc_u(np.exp(c), *args)
def objGrad_c(c, *args):
''' Returns K-vector gradient of unconstrained objective function
Args
-------
c := K-vector of real numbers
Returns
-------
g := K-vector of real numbers,
g[k] = gradient of -1 * L( v2c(c) ) with respect to c[k]
'''
u = np.exp(c)
dfdu = objGrad_u(u, *args)
dudc = u
dfdc = dfdu * dudc
return dfdc
########################################################### Utility funcs
###########################################################
def _unpack(u):
K = u.size/2
u1 = u[:K]
u0 = u[K:]
return u1, u0
########################################################### Transform funcs
########################################################### u2v, u2beta
def u2v(u):
u1, u0 = _unpack(u)
return u1 / (u1 + u0)
def u2beta(u):
u1, u0 = _unpack(u)
v = u1 / (u1 + u0)
return v2beta(v)
########################################################### Transform funcs
########################################################### v2beta, beta2v
def v2beta(v):
''' Convert to stick-breaking fractions v to probability vector beta
Args
--------
v : K-len vector, v[k] in interval [0, 1]
Returns
--------
beta : K+1-len vector, with positive entries that sum to 1
'''
v = np.asarray(v)
beta = np.hstack([1.0, np.cumprod(1-v)])
beta[:-1] *= v
return beta
def beta2v( beta ):
''' Convert probability vector beta to stick-breaking fractions v
Args
--------
beta : K+1-len vector, with positive entries that sum to 1
Returns
--------
v : K-len vector, v[k] in interval [0, 1]
'''
beta = np.asarray(beta)
K = beta.size
v = np.zeros(K-1)
cb = beta.copy()
for k in range(K-1):
cb[k] = 1 - cb[k]/np.prod( cb[:k] )
v[k] = beta[k]/np.prod( cb[:k] )
# Force away from edges 0 or 1 for numerical stability
v = np.maximum(v,EPS)
v = np.minimum(v,1-EPS)
return v
|
|
"""
Support for Sonarr.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.sonarr/
"""
import logging
import time
from datetime import datetime
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (CONF_API_KEY, CONF_HOST, CONF_PORT)
from homeassistant.const import CONF_MONITORED_CONDITIONS
from homeassistant.const import CONF_SSL
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
CONF_DAYS = 'days'
CONF_INCLUDED = 'include_paths'
CONF_UNIT = 'unit'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8989
DEFAULT_DAYS = '1'
DEFAULT_UNIT = 'GB'
SENSOR_TYPES = {
'diskspace': ['Disk Space', 'GB', 'mdi:harddisk'],
'queue': ['Queue', 'Episodes', 'mdi:download'],
'upcoming': ['Upcoming', 'Episodes', 'mdi:television'],
'wanted': ['Wanted', 'Episodes', 'mdi:television'],
'series': ['Series', 'Shows', 'mdi:television'],
'commands': ['Commands', 'Commands', 'mdi:code-braces']
}
ENDPOINTS = {
'diskspace': 'http{0}://{1}:{2}/api/diskspace?apikey={3}',
'queue': 'http{0}://{1}:{2}/api/queue?apikey={3}',
'upcoming': 'http{0}://{1}:{2}/api/calendar?apikey={3}&start={4}&end={5}',
'wanted': 'http{0}://{1}:{2}/api/wanted/missing?apikey={3}',
'series': 'http{0}://{1}:{2}/api/series?apikey={3}',
'commands': 'http{0}://{1}:{2}/api/command?apikey={3}'
}
# Support to Yottabytes for the future, why not
BYTE_SIZES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]):
vol.All(cv.ensure_list, [vol.In(list(SENSOR_TYPES.keys()))]),
vol.Optional(CONF_INCLUDED, default=[]): cv.ensure_list,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DAYS, default=DEFAULT_DAYS): cv.string,
vol.Optional(CONF_UNIT, default=DEFAULT_UNIT): vol.In(BYTE_SIZES)
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Sonarr platform."""
conditions = config.get(CONF_MONITORED_CONDITIONS)
add_devices(
[SonarrSensor(hass, config, sensor) for sensor in conditions]
)
return True
class SonarrSensor(Entity):
"""Implemention of the Sonarr sensor."""
def __init__(self, hass, conf, sensor_type):
"""Create Sonarr entity."""
from pytz import timezone
self.conf = conf
self.host = conf.get(CONF_HOST)
self.port = conf.get(CONF_PORT)
self.apikey = conf.get(CONF_API_KEY)
self.included = conf.get(CONF_INCLUDED)
self.days = int(conf.get(CONF_DAYS))
self.ssl = 's' if conf.get(CONF_SSL) else ''
# Object data
self._tz = timezone(str(hass.config.time_zone))
self.type = sensor_type
self._name = SENSOR_TYPES[self.type][0]
if self.type == 'diskspace':
self._unit = conf.get(CONF_UNIT)
else:
self._unit = SENSOR_TYPES[self.type][1]
self._icon = SENSOR_TYPES[self.type][2]
# Update sensor
self.update()
def update(self):
"""Update the data for the sensor."""
start = get_date(self._tz)
end = get_date(self._tz, self.days)
res = requests.get(
ENDPOINTS[self.type].format(
self.ssl, self.host, self.port, self.apikey, start, end),
timeout=5)
if res.status_code == 200:
if self.type in ['upcoming', 'queue', 'series', 'commands']:
if self.days == 1 and self.type == 'upcoming':
# Sonarr API returns an empty array if start and end dates
# are the same, so we need to filter to just today
self.data = list(
filter(
lambda x: x['airDate'] == str(start),
res.json()
)
)
else:
self.data = res.json()
self._state = len(self.data)
elif self.type == 'wanted':
data = res.json()
res = requests.get('{}&pageSize={}'.format(
ENDPOINTS[self.type].format(
self.ssl, self.host, self.port, self.apikey),
data['totalRecords']), timeout=5)
self.data = res.json()['records']
self._state = len(self.data)
elif self.type == 'diskspace':
# If included paths are not provided, use all data
if self.included == []:
self.data = res.json()
else:
# Filter to only show lists that are included
self.data = list(
filter(
lambda x: x['path'] in self.included,
res.json()
)
)
self._state = '{:.2f}'.format(
to_unit(
sum([data['freeSpace'] for data in self.data]),
self._unit
)
)
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format('Sonarr', self._name)
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of the sensor."""
return self._unit
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attributes = {}
if self.type == 'upcoming':
for show in self.data:
attributes[show['series']['title']] = 'S{:02d}E{:02d}'.format(
show['seasonNumber'],
show['episodeNumber']
)
elif self.type == 'queue':
for show in self.data:
attributes[show['series']['title'] + ' S{:02d}E{:02d}'.format(
show['episode']['seasonNumber'],
show['episode']['episodeNumber']
)] = '{:.2f}%'.format(100*(1-(show['sizeleft']/show['size'])))
elif self.type == 'wanted':
for show in self.data:
attributes[show['series']['title'] + ' S{:02d}E{:02d}'.format(
show['seasonNumber'], show['episodeNumber']
)] = show['airDate']
elif self.type == 'commands':
for command in self.data:
attributes[command['name']] = command['state']
elif self.type == 'diskspace':
for data in self.data:
attributes[data['path']] = '{:.2f}/{:.2f}{} ({:.2f}%)'.format(
to_unit(data['freeSpace'], self._unit),
to_unit(data['totalSpace'], self._unit),
self._unit, (
to_unit(data['freeSpace'], self._unit) /
to_unit(data['totalSpace'], self._unit) * 100
)
)
elif self.type == 'series':
for show in self.data:
attributes[show['title']] = '{}/{} Episodes'.format(
show['episodeFileCount'],
show['episodeCount']
)
return attributes
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
def get_date(zone, offset=0):
"""Get date based on timezone and offset of days."""
day = 60 * 60 * 24
return datetime.date(
datetime.fromtimestamp(time.time() + day*offset, tz=zone)
)
def to_unit(value, unit):
"""Convert bytes to give unit."""
return value / 1024**BYTE_SIZES.index(unit)
|
|
"""Tests for the SmartThings component init module."""
from uuid import uuid4
from aiohttp import ClientConnectionError, ClientResponseError
from asynctest import Mock, patch
from pysmartthings import InstalledAppStatus, OAuthToken
import pytest
from homeassistant.components import cloud, smartthings
from homeassistant.components.smartthings.const import (
CONF_CLOUDHOOK_URL, CONF_INSTALLED_APP_ID, CONF_REFRESH_TOKEN,
DATA_BROKERS, DOMAIN, EVENT_BUTTON, SIGNAL_SMARTTHINGS_UPDATE,
SUPPORTED_PLATFORMS)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from tests.common import MockConfigEntry
async def test_migration_creates_new_flow(
hass, smartthings_mock, config_entry):
"""Test migration deletes app and creates new flow."""
config_entry.version = 1
config_entry.add_to_hass(hass)
await smartthings.async_migrate_entry(hass, config_entry)
await hass.async_block_till_done()
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
assert not hass.config_entries.async_entries(DOMAIN)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]['handler'] == 'smartthings'
assert flows[0]['context'] == {'source': 'import'}
async def test_unrecoverable_api_errors_create_new_flow(
hass, config_entry, smartthings_mock):
"""
Test a new config flow is initiated when there are API errors.
401 (unauthorized): Occurs when the access token is no longer valid.
403 (forbidden/not found): Occurs when the app or installed app could
not be retrieved/found (likely deleted?)
"""
config_entry.add_to_hass(hass)
smartthings_mock.app.side_effect = \
ClientResponseError(None, None, status=401)
# Assert setup returns false
result = await smartthings.async_setup_entry(hass, config_entry)
assert not result
# Assert entry was removed and new flow created
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(DOMAIN)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]['handler'] == 'smartthings'
assert flows[0]['context'] == {'source': 'import'}
hass.config_entries.flow.async_abort(flows[0]['flow_id'])
async def test_recoverable_api_errors_raise_not_ready(
hass, config_entry, smartthings_mock):
"""Test config entry not ready raised for recoverable API errors."""
config_entry.add_to_hass(hass)
smartthings_mock.app.side_effect = \
ClientResponseError(None, None, status=500)
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_scenes_api_errors_raise_not_ready(
hass, config_entry, app, installed_app, smartthings_mock):
"""Test if scenes are unauthorized we continue to load platforms."""
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.scenes.side_effect = \
ClientResponseError(None, None, status=500)
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_connection_errors_raise_not_ready(
hass, config_entry, smartthings_mock):
"""Test config entry not ready raised for connection errors."""
config_entry.add_to_hass(hass)
smartthings_mock.app.side_effect = ClientConnectionError()
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_base_url_no_longer_https_does_not_load(
hass, config_entry, app, smartthings_mock):
"""Test base_url no longer valid creates a new flow."""
hass.config.api.base_url = 'http://0.0.0.0'
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
# Assert setup returns false
result = await smartthings.async_setup_entry(hass, config_entry)
assert not result
async def test_unauthorized_installed_app_raises_not_ready(
hass, config_entry, app, installed_app,
smartthings_mock):
"""Test config entry not ready raised when the app isn't authorized."""
config_entry.add_to_hass(hass)
installed_app.installed_app_status = InstalledAppStatus.PENDING
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
with pytest.raises(ConfigEntryNotReady):
await smartthings.async_setup_entry(hass, config_entry)
async def test_scenes_unauthorized_loads_platforms(
hass, config_entry, app, installed_app,
device, smartthings_mock, subscription_factory):
"""Test if scenes are unauthorized we continue to load platforms."""
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.side_effect = \
ClientResponseError(None, None, status=403)
mock_token = Mock()
mock_token.access_token.return_value = str(uuid4())
mock_token.refresh_token.return_value = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [subscription_factory(capability)
for capability in device.capabilities]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries,
'async_forward_entry_setup') as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_config_entry_loads_platforms(
hass, config_entry, app, installed_app,
device, smartthings_mock, subscription_factory, scene):
"""Test config entry loads properly and proxies to platforms."""
config_entry.add_to_hass(hass)
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.return_value = [scene]
mock_token = Mock()
mock_token.access_token.return_value = str(uuid4())
mock_token.refresh_token.return_value = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [subscription_factory(capability)
for capability in device.capabilities]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(hass.config_entries,
'async_forward_entry_setup') as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_config_entry_loads_unconnected_cloud(
hass, config_entry, app, installed_app,
device, smartthings_mock, subscription_factory, scene):
"""Test entry loads during startup when cloud isn't connected."""
config_entry.add_to_hass(hass)
hass.data[DOMAIN][CONF_CLOUDHOOK_URL] = "https://test.cloud"
hass.config.api.base_url = 'http://0.0.0.0'
smartthings_mock.app.return_value = app
smartthings_mock.installed_app.return_value = installed_app
smartthings_mock.devices.return_value = [device]
smartthings_mock.scenes.return_value = [scene]
mock_token = Mock()
mock_token.access_token.return_value = str(uuid4())
mock_token.refresh_token.return_value = str(uuid4())
smartthings_mock.generate_tokens.return_value = mock_token
subscriptions = [subscription_factory(capability)
for capability in device.capabilities]
smartthings_mock.subscriptions.return_value = subscriptions
with patch.object(
hass.config_entries, 'async_forward_entry_setup') as forward_mock:
assert await smartthings.async_setup_entry(hass, config_entry)
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_unload_entry(hass, config_entry):
"""Test entries are unloaded correctly."""
connect_disconnect = Mock()
smart_app = Mock()
smart_app.connect_event.return_value = connect_disconnect
broker = smartthings.DeviceBroker(
hass, config_entry, Mock(), smart_app, [], [])
broker.connect()
hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id] = broker
with patch.object(hass.config_entries, 'async_forward_entry_unload',
return_value=True) as forward_mock:
assert await smartthings.async_unload_entry(hass, config_entry)
assert connect_disconnect.call_count == 1
assert config_entry.entry_id not in hass.data[DOMAIN][DATA_BROKERS]
# Assert platforms unloaded
await hass.async_block_till_done()
assert forward_mock.call_count == len(SUPPORTED_PLATFORMS)
async def test_remove_entry(hass, config_entry, smartthings_mock):
"""Test that the installed app and app are removed up."""
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_cloudhook(hass, config_entry, smartthings_mock):
"""Test that the installed app, app, and cloudhook are removed up."""
# Arrange
config_entry.add_to_hass(hass)
hass.data[DOMAIN][CONF_CLOUDHOOK_URL] = "https://test.cloud"
# Act
with patch.object(cloud, 'async_is_logged_in',
return_value=True) as mock_async_is_logged_in, \
patch.object(cloud, 'async_delete_cloudhook') \
as mock_async_delete_cloudhook:
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
assert mock_async_is_logged_in.call_count == 1
assert mock_async_delete_cloudhook.call_count == 1
async def test_remove_entry_app_in_use(hass, config_entry, smartthings_mock):
"""Test app is not removed if in use by another config entry."""
# Arrange
config_entry.add_to_hass(hass)
data = config_entry.data.copy()
data[CONF_INSTALLED_APP_ID] = str(uuid4())
entry2 = MockConfigEntry(version=2, domain=DOMAIN, data=data)
entry2.add_to_hass(hass)
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_already_deleted(
hass, config_entry, smartthings_mock):
"""Test handles when the apps have already been removed."""
# Arrange
smartthings_mock.delete_installed_app.side_effect = ClientResponseError(
None, None, status=403)
smartthings_mock.delete_app.side_effect = ClientResponseError(
None, None, status=403)
# Act
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_installedapp_api_error(
hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the installed app."""
# Arrange
smartthings_mock.delete_installed_app.side_effect = \
ClientResponseError(None, None, status=500)
# Act
with pytest.raises(ClientResponseError):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_installedapp_unknown_error(
hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the installed app."""
# Arrange
smartthings_mock.delete_installed_app.side_effect = Exception
# Act
with pytest.raises(Exception):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 0
async def test_remove_entry_app_api_error(
hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the app."""
# Arrange
smartthings_mock.delete_app.side_effect = \
ClientResponseError(None, None, status=500)
# Act
with pytest.raises(ClientResponseError):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_remove_entry_app_unknown_error(
hass, config_entry, smartthings_mock):
"""Test raises exceptions removing the app."""
# Arrange
smartthings_mock.delete_app.side_effect = Exception
# Act
with pytest.raises(Exception):
await smartthings.async_remove_entry(hass, config_entry)
# Assert
assert smartthings_mock.delete_installed_app.call_count == 1
assert smartthings_mock.delete_app.call_count == 1
async def test_broker_regenerates_token(
hass, config_entry):
"""Test the device broker regenerates the refresh token."""
token = Mock(OAuthToken)
token.refresh_token = str(uuid4())
stored_action = None
def async_track_time_interval(hass, action, interval):
nonlocal stored_action
stored_action = action
with patch('homeassistant.components.smartthings'
'.async_track_time_interval',
new=async_track_time_interval):
broker = smartthings.DeviceBroker(
hass, config_entry, token, Mock(), [], [])
broker.connect()
assert stored_action
await stored_action(None) # pylint:disable=not-callable
assert token.refresh.call_count == 1
assert config_entry.data[CONF_REFRESH_TOKEN] == token.refresh_token
async def test_event_handler_dispatches_updated_devices(
hass, config_entry, device_factory, event_request_factory,
event_factory):
"""Test the event handler dispatches updated devices."""
devices = [
device_factory('Bedroom 1 Switch', ['switch']),
device_factory('Bathroom 1', ['switch']),
device_factory('Sensor', ['motionSensor']),
device_factory('Lock', ['lock'])
]
device_ids = [devices[0].device_id, devices[1].device_id,
devices[2].device_id, devices[3].device_id]
event = event_factory(devices[3].device_id, capability='lock',
attribute='lock', value='locked',
data={'codeId': '1'})
request = event_request_factory(device_ids=device_ids, events=[event])
config_entry.data[CONF_INSTALLED_APP_ID] = request.installed_app_id
called = False
def signal(ids):
nonlocal called
called = True
assert device_ids == ids
async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)
broker = smartthings.DeviceBroker(
hass, config_entry, Mock(), Mock(), devices, [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert called
for device in devices:
assert device.status.values['Updated'] == 'Value'
assert devices[3].status.attributes['lock'].value == 'locked'
assert devices[3].status.attributes['lock'].data == {'codeId': '1'}
async def test_event_handler_ignores_other_installed_app(
hass, config_entry, device_factory, event_request_factory):
"""Test the event handler dispatches updated devices."""
device = device_factory('Bedroom 1 Switch', ['switch'])
request = event_request_factory([device.device_id])
called = False
def signal(ids):
nonlocal called
called = True
async_dispatcher_connect(hass, SIGNAL_SMARTTHINGS_UPDATE, signal)
broker = smartthings.DeviceBroker(
hass, config_entry, Mock(), Mock(), [device], [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert not called
async def test_event_handler_fires_button_events(
hass, config_entry, device_factory, event_factory,
event_request_factory):
"""Test the event handler fires button events."""
device = device_factory('Button 1', ['button'])
event = event_factory(device.device_id, capability='button',
attribute='button', value='pushed')
request = event_request_factory(events=[event])
config_entry.data[CONF_INSTALLED_APP_ID] = request.installed_app_id
called = False
def handler(evt):
nonlocal called
called = True
assert evt.data == {
'component_id': 'main',
'device_id': device.device_id,
'location_id': event.location_id,
'value': 'pushed',
'name': device.label,
'data': None
}
hass.bus.async_listen(EVENT_BUTTON, handler)
broker = smartthings.DeviceBroker(
hass, config_entry, Mock(), Mock(), [device], [])
broker.connect()
# pylint:disable=protected-access
await broker._event_handler(request, None, None)
await hass.async_block_till_done()
assert called
|
|
"""
Harvester for NIH.gov Research Portal Online Reporting Tools (RePORTER) for the SHARE Notification Service
Getting weekly summary from ExPORTER on nih.gov, parse XML and normalize the data
An example file: http://exporter.nih.gov/XMLData/final/RePORTER_PRJ_X_FY2015_035.zip
Note: This harvester assigns incorrect dates to some xml files due to an inconsistency in the numbering of week of the
month in the project file names. It is guaranteed that all data are harvested in a sufficiently long time frame
(>1 month).
"""
from __future__ import unicode_literals
import logging
import re
from bs4 import BeautifulSoup
from datetime import date, timedelta
from dateutil.parser import parse
from io import BytesIO
from lxml import etree
from zipfile import ZipFile
from scrapi import requests
from scrapi import settings
from scrapi.base import XMLHarvester
from scrapi.util import copy_to_unicode
from scrapi.linter.document import RawDocument
from scrapi.base.schemas import default_name_parser
from scrapi.base.helpers import compose, single_result, build_properties, datetime_formatter
logger = logging.getLogger(__name__)
def daterange(start_date, end_date):
"""
Get all the dates between the start_date and the end_date
"""
for ordinal in range(start_date.toordinal(), end_date.toordinal()):
yield date.fromordinal(ordinal)
def get_days_of_week(start_date, end_date, day_of_week):
"""
First convert start_date and end_date to have the day of week we require.
Then get all the dates of the specified day of week between the start_date and end_date.
"""
start_date = start_date - timedelta(days=(start_date.weekday() - day_of_week))
end_date = end_date - timedelta(days=(end_date.weekday() - day_of_week))
for ordinal in range(start_date.toordinal(), end_date.toordinal() + 1):
if date.fromordinal(ordinal).weekday() == day_of_week:
yield date.fromordinal(ordinal)
def get_fiscal_year(mydate=date.today()):
"""
Return the current fiscal year. Each fiscal year starts on October 1
"""
if mydate.month < 10:
return mydate.year
else:
return mydate.year + 1
def get_fiscal_years(dates):
"""
Given a range of dates, get unique fiscal years
"""
return tuple(set(map(get_fiscal_year, dates)))
def parse_month_column(month_column, day_of_week):
"""
Given a month column string, return the date of a day (Monday by default) of that week
An example of a month column string: September, 2015 - WEEK 1
"""
month_year, week = iter(map(lambda x: x.strip(), month_column.split('-')))
first_day = parse('1 ' + month_year)
first_day -= timedelta(days=(first_day.weekday() - day_of_week + 7 * (1 if first_day.weekday() - day_of_week <= 0 else 0)))
week = int(re.search('.*([0-9]{1,2})', week).group(1))
mydate = first_day + timedelta(week * 7)
return mydate.date()
def parse_row(row, day_of_week):
"""
Get a row of the ExPORTER table, return the date of a day (Monday by default) of that week, the fiscal year,
and the url of the xml file
To keep the format consistent, if the record is from previous fiscal years, None is returned
"""
row_text = list(map(lambda x: x.text.strip('\t').strip('\n').strip('\r').strip('<td>').strip('</td>'), row))
row_text = list(map(lambda x: x.strip(), row_text))
month_column = row_text[1]
fiscal_year = int(row_text[2])
url = row[3].find('a').get('href')
if month_column.lower() == u"all":
return (None, fiscal_year, url)
elif re.match('[A-Za-z]*, [0-9]{4} - .*', month_column):
date = parse_month_column(month_column, day_of_week)
return (date, fiscal_year, url)
def parse_rows(rows, day_of_week):
"""
A generator to parse all the rows
"""
for row in rows:
yield parse_row(row('td'), day_of_week)
def construct_urls(base_url, start_date, end_date, rows, day_of_week=0):
"""
Given date range, constructs urls of corresponded XML files.
"""
dates = [i for i in get_days_of_week(start_date, end_date, day_of_week)]
fiscal_years = get_fiscal_years(dates)
for data in parse_rows(rows, day_of_week):
if data[0] in dates or (data[0] is None and data[1] in fiscal_years):
yield "".join([base_url, data[2]])
def get_xml_files(urls):
for zip_url in urls:
data = requests.get(zip_url)
zipfile = ZipFile(BytesIO(data.content))
with zipfile.open(zipfile.namelist()[0], 'r') as f:
yield f.read()
def xml_records(files):
for xml_file in files:
records = etree.XML(xml_file).xpath('row')
for record in records:
yield record
def add_affiliation(name, org_name):
name['affiliation'] = [{'name': org_name.text}]
return name
def nih_name_parser(names, org_name):
"""
Takes a list of names and organization names, and attempts to parse them
"""
names = default_name_parser(names)
return list(map(add_affiliation, names, org_name))
class NIHHarvesters(XMLHarvester):
short_name = 'nih'
long_name = 'NIH Research Portal Online Reporting Tools'
url = 'http://exporter.nih.gov/ExPORTER_Catalog.aspx/'
project_base_url = 'https://projectreporter.nih.gov/project_info_description.cfm?aid={}'
foa_base_url = 'http://grants.nih.gov/grants/guide/pa-files/{}.html'
DEFAULT_ENCODING = 'UTF-8'
record_encoding = None
@property
def schema(self):
return {
"contributors": ('//PIS/PI/PI_NAME/node()', '//ORG_NAME', nih_name_parser),
"uris": {
"canonicalUri": ("//APPLICATION_ID/node()", compose(self.construct_project_url, single_result)),
"descriptorUris": ("//APPLICATION_ID/node()", "//FOA_NUMBER/node()",
self.construct_descriptor_uris)
},
"providerUpdatedDateTime": ("AWARD_NOTICE_DATE/node()", compose(datetime_formatter, single_result)),
"title": ('//PROJECT_TITLE/node()', single_result),
"tags": ('//PROJECT_TERMSX/TERM/node()'),
"otherProperties": build_properties(
("applicationID", "//APPLICATION_ID/node()"),
('activity', '//ACTIVITY/node()'),
('administeringIC', '//ADMINISTERING_IC/node()'),
('arraFunded', '//ARRA_FUNDED/node()'),
('budgetStart', '//BUDGET_START/node()'),
('budgetEnd', '//BUDGET_END/node()'),
('FOANumber', '//FOA_NUMBER/node()'),
('fullProjectNumber', '//FULL_PROJECT_NUM/node()'),
('fundingICs', '//FUNDING_ICs/node()'),
('fiscalYear', '//FY/node()'),
('NIHSpendingCats', '//NIH_SPENDING_CATS/@xsi:nil'),
('organizationCity', '//ORG_CITY/node()'),
('organizationCountry', '//ORG_CONTRY/node()'),
('organizationDistrict', '//ORG_DISTRICT/node()'),
('organizationDUNS', '//ORG_DUNS/node()'),
('organizationDept', '//ORG_DEPT/node()'),
('organizationFIPS', '//ORG_FIPS/node()'),
('organizationState', '//ORG_STATE/node()'),
('organizationZipcode', '//ORG_ZIPCODE/node()'),
('ICName', '//IC_NAME/node()'),
('organizationName', '//ORG_NAME/node()'),
('projectStart', '//PROJECT_START/node()'),
('projectEnd', '//PROJECT_END/node()'),
('PHR', '//PHR/node()'),
('serialNumber', '//SERIAL_NUMBER/node()'),
('studySection', '//STUDY_SECTION/node()'),
('studySectionName', '//STUDY_SECTION_NAME/node()'),
('supportYear', '//SUPPORT_YEAR/node()'),
('suffix', '//SUFFIX/node()'),
('subProjectID', '//SUBPROJECT_ID/@xsi:nil'),
('totalCost', '//TOTAL_COST/node()'),
('totalCostSubProject', '//TOTAL_COST_SUB_PROJECT/node()'),
('coreProjectNumber', '//CORE_PROJECT_NUM/node()'),
('CFDACode', '//CFDA_CODE/node()'),
('programOfficerName', '//PROGRAM_OFFICER_NAME/node()'),
('edInstType', '//ED_INST_TYPE/node()'),
('awardNoticeDate', '//AWARD_NOTICE_DATE/node()'),
('fundingMechanism', '//FUNDING_MECHANISM/node()')
)
}
def construct_project_url(self, application_id):
return self.project_base_url.format(application_id)
def construct_descriptor_uris(self, application_id, foa_number):
return [
self.project_base_url.format(application_id[0]) if application_id else None,
self.foa_base_url.format(foa_number[0] if foa_number else None)
]
namespaces = {'xsi': "http://www.w3.org/2001/XMLSchema-instance"}
def harvest(self, start_date=None, end_date=None):
"""
Return a list of RawDocuments
"""
start_date = start_date or date.today() - timedelta(settings.DAYS_BACK)
end_date = end_date or date.today()
base_url = 'http://exporter.nih.gov/'
table_url = 'http://exporter.nih.gov/ExPORTER_Catalog.aspx/'
# get ExPORTER page html and rows storing records
html = requests.get(table_url).content
soup = BeautifulSoup(html, 'lxml')
table = soup.find('table', id="ContentPlaceHolder1_ProjectData_dgProjectData")
rows = table.find_all('tr', class_="row_bg")
urls = [i for i in construct_urls(base_url, start_date, end_date, rows)]
return [
RawDocument({
'doc': etree.tostring(record, encoding=self.DEFAULT_ENCODING),
'source': self.short_name,
'docID': copy_to_unicode(record.xpath('.//APPLICATION_ID/node()', namespaces=self.namespaces)[0]),
'filetype': 'xml'
}) for record in xml_records(get_xml_files(urls))
]
|
|
from flask import render_template, flash, redirect, url_for, g, jsonify, request, abort
from flask.ext.login import login_user, logout_user, current_user, login_required
from flask.ext.sqlalchemy import get_debug_queries
from datetime import datetime
from app import app, db, lm
from app import socketio
from config import DATABASE_QUERY_TIMEOUT
from .decorators import auth_required
from .forms import SignupForm, LoginForm, EditForm, PostForm, CommentForm
from .models import User, Post
from .utils import OAuthSignIn, PhotoPage, MembersPage, SignupPage, LoginPage
from flask.views import MethodView
import os
import json
basedir = os.path.abspath(os.path.dirname(__file__))
@socketio.on('my broadcast event', namespace='/greatpic')
def followers(message):
if "followers" in message['data']:
socketio.emit('followup', {'data': message['data']}, broadcast=True, namespace='/greatpic', include_self=False)
elif "votes" in message['data']:
socketio.emit('voteup', {'data': message['data']}, broadcast=True, namespace='/greatpic', include_self=False)
@app.route('/', methods=['GET'])
@app.route('/home/', methods=['GET'])
def home():
if current_user.is_authenticated():
return redirect(url_for('photos', category="latest"))
else:
return PhotoPage(title="home").render()
class SignupAPI(MethodView):
def post(self):
page = SignupPage(form=SignupForm(), category="signup")
if page.assets['body_form']:
return page.render()
else:
return redirect(url_for("members", nickname=current_user.nickname))
signup_api_view = SignupAPI.as_view('signup') # URLS for MEMBER API
app.add_url_rule('/signup/', view_func=signup_api_view, methods=["GET", "POST"]) # Display and Validate Signup Form
class LoginAPI(MethodView):
def post(self):
page = LoginPage(form=LoginForm(), category="login")
if page.assets['body_form']:
return page.render()
else:
return redirect(url_for("members", nickname=current_user.nickname))
def get(self, get_provider=None, provider=None):
if get_provider is not None: # GET OAUTH PROVIDER
if not current_user.is_anonymous():
return redirect(url_for('home'))
oauth = OAuthSignIn.get_provider(get_provider)
return oauth.authorize()
elif provider is not None: # OAUTH PROVIDER CALLBACK
if not current_user.is_anonymous():
return redirect(url_for('home'))
oauth = OAuthSignIn.get_provider(provider)
nickname, email = oauth.callback()
if email is None:
flash('Authentication failed.')
return redirect("/photos/recent/")
currentuser = User.query.filter_by(email=email.lower()).first()
if not currentuser:
currentuser = User(nickname=nickname, email=email, photo="profile.jpg")
db.session.add(currentuser)
db.session.commit()
login_user(currentuser)
return redirect(request.args.get('next') or '/photos/latest')
else: # LOGIN PAGE
if g.user is not None and g.user.is_authenticated():
return redirect(url_for('photos'))
return LoginPage(category="login").render()
login_api_view = LoginAPI.as_view('login') # Urls for Login API
# Authenticate user
app.add_url_rule('/login/', view_func=login_api_view, methods=["GET", "POST"])
# Oauth login
app.add_url_rule('/login/<get_provider>', view_func=login_api_view, methods=["GET", ])
# Oauth provider callback
app.add_url_rule('/callback/<provider>', view_func=login_api_view, methods=["GET", ])
@app.route('/logout', methods=['GET'])
def logout():
logout_user()
return redirect(url_for('login'))
class MembersAPI(MethodView):
def post(self, category=None, nickname=None):
if category in ["follow", "unfollow"]:
page = MembersPage(nickname=nickname, category=category)
return page.render()
else:
page = MembersPage(form=EditForm(), category=category)
if 'body_form' in page.assets and page.assets['body_form'] is not None:
return page.render()
else:
return redirect(url_for("members", nickname=g.user.nickname))
def get(self, nickname=None, category="latest"):
if nickname is None: # Display all members
page = MembersPage(category=category).render()
return page
else: # Display a single member
if "key" in request.args:
g.user.photo = request.args['key']
db.session.add(g.user)
db.session.commit()
return redirect(url_for("members", nickname=nickname))
elif category in ["follow", "unfollow"]:
MembersPage(nickname=nickname, category=category)
return redirect(url_for('members', nickname=nickname))
else:
return MembersPage(nickname=nickname, category=category).render()
@login_required
def put(self, member_id):
user = User.query.get(member_id)
MembersPage(user, category="update")
@login_required
def patch(self, member_id):
person = User.query.get(member_id)
form = None
if 'photo' in request.json:
category = 'update'
form = EditForm(
photo=request.json['photo'],
nickname=g.user.nickname,
about_me=g.user.about_me
)
else:
if request.json['is_following']:
category = "follow"
else:
category = "unfollow"
page = MembersPage(person=person, category=category, form=form)
rendered_page = page.render()
return rendered_page
@login_required
def delete(self, nickname):
pass
members_api_view = MembersAPI.as_view('members') # URLS for MEMBER API
app.add_url_rule('/members/', # Read all members
view_func=members_api_view, methods=["GET"])
app.add_url_rule('/members/<int:member_id>',
view_func=members_api_view, methods=['PUT'])
app.add_url_rule('/members/<int:member_id>',
view_func=members_api_view, methods=['PATCH'])
app.add_url_rule("/members/<any('all', 'latest', 'update', 'upload'):category>/",
view_func=members_api_view, methods=["GET", "POST"])
app.add_url_rule('/members/<nickname>/', # Read, Update and Destroy a single member
view_func=members_api_view, methods=["GET", "POST"]) # Update or Delete a single post
app.add_url_rule('/members/<nickname>/<category>/', # Get photos of a given category for a given member
view_func=members_api_view, methods=["GET", "POST"])
class PhotoAPI(MethodView):
@login_required
def post(self, post_id=None, category=None):
if category:
page = PhotoPage(post_id=post_id, category=category, form=CommentForm())
if page.assets['body_form'] or (page.assets['category'] and page.assets['category'] == "comment"):
return redirect('/photos/' + str(post_id))
else:
return redirect(url_for("photos", category="latest"))
else:
page = PhotoPage(category="upload", form=PostForm())
if page.assets['body_form']:
return page.render()
else:
return redirect(url_for("photos", category="latest"))
def get(self, post_id=None, category=None):
if current_user.is_authenticated() or category != "upload":
if post_id is None:
page = PhotoPage(category=category).render()
return page
else:
return PhotoPage(post_id=post_id, category=category).render()
else:
return LoginPage(title="login").render()
# Update Post
@auth_required
def put(self, post_id):
form = PostForm()
if form.validate_on_submit():
update_post = Post.query.get(post_id)
update_post.body = form.data['body']
db.session.commit()
response = update_post.json_view()
response['updatedsuccess'] = True
return json.dumps(response)
else:
result = {'updatedsuccess': False}
return json.dumps(result)
# Delete Post
@login_required
def delete(self, post_id):
post = Post.query.get(post_id)
db.session.delete(post)
db.session.commit()
result = {'deletedsuccess': True}
return json.dumps(result)
@login_required
def patch(self, post_id):
form = None
if 'has_voted' in request.json:
category = "vote"
elif 'body' in request.json:
category = "updatephoto"
post = Post.query.get(post_id)
form = PostForm(
body=request.json['body'],
photo=post.photo,
category=post.category
)
else:
category = "comment"
form = CommentForm(comment=request.json['comment'])
page = PhotoPage(post_id=post_id, category=category, form=form)
rendered_page = page.render()
return rendered_page
photo_api_view = PhotoAPI.as_view('photos')
# Read all posts for a given page, Create a new post
app.add_url_rule('/photos/',
view_func=photo_api_view, methods=["GET", "POST"])
# Get photos of a given category
app.add_url_rule('/photos/<any("all", "latest", "popular", "starred", "upload", "home", "architecture", "event",'
'"family", "fantasy", "fashion", "landscape", "macro", "portrait", "street", "sport", "travel",'
'"wildlife"):category>/',
view_func=photo_api_view, methods=["GET", "POST"])
# Get, Update, or Delete a single post
app.add_url_rule('/photos/<int:post_id>',
view_func=photo_api_view, methods=["GET", "PUT", "DELETE"])
app.add_url_rule('/photos/<int:post_id>',
view_func=photo_api_view, methods=['PATCH'])
# Vote or comment on a single post
app.add_url_rule('/photos/<int:post_id>/<any("vote", "comment"):category>',
view_func=photo_api_view, methods=["GET", "POST"])
@app.context_processor
def inject_static_url():
local_static_url = app.static_url_path
static_url = 'https://s3.amazonaws.com/aperturus/static/'
if os.environ.get('HEROKU') is not None:
local_static_url = static_url
if not static_url.endswith('/'):
static_url += '/'
if not local_static_url.endswith('/'):
local_static_url += '/'
return dict(
static_url=static_url,
local_static_url=local_static_url
)
@lm.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated():
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@app.after_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= DATABASE_QUERY_TIMEOUT:
app.logger.warning(
"SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" %
(query.statement, query.parameters, query.duration,
query.context))
return response
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html', error=error), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html', error=error), 500
def redirect_url(default='photos'):
return request.args.get('next') or \
request.referrer or \
url_for(default)
def make_public_picture(picture):
new_picture = {}
for field in picture:
if field == 'id':
new_picture['uri'] = url_for('picture_api', picture_id=picture['id'], _external=True)
else:
new_picture[field] = picture[field]
return new_picture
pictures = [
{
'id': 1,
'owner': u'Bill',
'description': u'Taken in the city'
},
{
'id': 2,
'owner': u'Bill',
'description': u'Taken in the country'
}
]
class PictureAPI(MethodView):
def get(self, picture_id):
if picture_id is None:
return jsonify({'pictures': [make_public_picture(picture) for picture in pictures]})
else:
picture = [picture for picture in pictures if picture['id'] == picture_id]
if len(picture) == 0:
abort(404)
return jsonify({'picture': make_public_picture(picture[0])})
@auth_required
def post(self):
if not request.json or 'owner' not in request.json:
abort(400)
picture = {
'id': pictures[-1]['id'] + 1,
'owner': request.json['owner'],
'description': request.json.get('description', ""),
}
pictures.append(picture)
return jsonify({'picture': picture}), 201
@auth_required
def delete(self, picture_id):
picture = [picture for picture in pictures if picture['id'] == picture_id]
if len(picture) == 0:
abort(404)
pictures.remove(picture[0])
return jsonify({'result': True})
@auth_required
def put(self, picture_id):
picture = [picture for picture in pictures if picture['id'] == picture_id]
if len(picture) == 0:
abort(404)
if not request.json:
abort(400)
if 'owner' in request.json and type(request.json['owner']) != unicode:
abort(400)
if 'description' in request.json and type(request.json['description']) is not unicode:
abort(400)
picture[0]['owner'] = request.json.get('owner', picture[0]['owner'])
picture[0]['description'] = request.json.get('description', picture[0]['description'])
return jsonify({'picture': make_public_picture(picture[0])})
user_view = PictureAPI.as_view('picture_api')
app.add_url_rule('/pictures/', defaults={'picture_id': None}, view_func=user_view, methods=['GET'])
app.add_url_rule('/pictures/', view_func=user_view, methods=['POST'])
app.add_url_rule('/pictures/<int:picture_id>', view_func=user_view, methods=['GET', 'PUT', 'DELETE'])
# curl -i http://localhost:8000/pictures/
# curl -i http://localhost:8000/pictures/2
# curl -i -H "Content-Type: application/json" -X POST -d '{"owner":"John","description":"Taken at the seashore"}'
# http://localhost:8000/pictures/
# curl -i -H "Content-Type: application/json" -X PUT -d '{"description":"eating out"}' http://localhost:8000/pictures/2
# curl -i -H "Content-Type: application/json" -X DELETE http://localhost:8000/pictures/2
|
|
<<<<<<< HEAD
<<<<<<< HEAD
from test.support import run_unittest, check_warnings
import cgi
import os
import sys
import tempfile
import unittest
import warnings
from collections import namedtuple
from io import StringIO, BytesIO
class HackedSysModule:
# The regression test will have real values in sys.argv, which
# will completely confuse the test of the cgi module
argv = []
stdin = sys.stdin
cgi.sys = HackedSysModule()
class ComparableException:
def __init__(self, err):
self.err = err
def __str__(self):
return str(self.err)
def __eq__(self, anExc):
if not isinstance(anExc, Exception):
return NotImplemented
return (self.err.__class__ == anExc.__class__ and
self.err.args == anExc.args)
def __getattr__(self, attr):
return getattr(self.err, attr)
def do_test(buf, method):
env = {}
if method == "GET":
fp = None
env['REQUEST_METHOD'] = 'GET'
env['QUERY_STRING'] = buf
elif method == "POST":
fp = BytesIO(buf.encode('latin-1')) # FieldStorage expects bytes
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
env['CONTENT_LENGTH'] = str(len(buf))
else:
raise ValueError("unknown method: %s" % method)
try:
return cgi.parse(fp, env, strict_parsing=1)
except Exception as err:
return ComparableException(err)
parse_strict_test_cases = [
("", ValueError("bad query field: ''")),
("&", ValueError("bad query field: ''")),
("&&", ValueError("bad query field: ''")),
(";", ValueError("bad query field: ''")),
(";&;", ValueError("bad query field: ''")),
# Should the next few really be valid?
("=", {}),
("=&=", {}),
("=;=", {}),
# This rest seem to make sense
("=a", {'': ['a']}),
("&=a", ValueError("bad query field: ''")),
("=a&", ValueError("bad query field: ''")),
("=&a", ValueError("bad query field: 'a'")),
("b=a", {'b': ['a']}),
("b+=a", {'b ': ['a']}),
("a=b=a", {'a': ['b=a']}),
("a=+b=a", {'a': [' b=a']}),
("&b=a", ValueError("bad query field: ''")),
("b&=a", ValueError("bad query field: 'b'")),
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
{'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
'cuyer': ['r'],
'expire': ['964546263'],
'kid': ['130003.300038'],
'lobale': ['en-US'],
'order_id': ['0bb2e248638833d48cb7fed300000f1b'],
'ss': ['env'],
'view': ['bustomer'],
}),
("group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse",
{'SUBMIT': ['Browse'],
'_assigned_to': ['31392'],
'_category': ['100'],
'_status': ['1'],
'group_id': ['5470'],
'set': ['custom'],
})
]
def norm(seq):
return sorted(seq, key=repr)
def first_elts(list):
return [p[0] for p in list]
def first_second_elts(list):
return [(p[0], p[1][0]) for p in list]
def gen_result(data, environ):
encoding = 'latin-1'
fake_stdin = BytesIO(data.encode(encoding))
fake_stdin.seek(0)
form = cgi.FieldStorage(fp=fake_stdin, environ=environ, encoding=encoding)
result = {}
for k, v in dict(form).items():
result[k] = isinstance(v, list) and form.getlist(k) or v.value
return result
class CgiTests(unittest.TestCase):
def test_parse_multipart(self):
fp = BytesIO(POSTDATA.encode('latin1'))
env = {'boundary': BOUNDARY.encode('latin1'),
'CONTENT-LENGTH': '558'}
result = cgi.parse_multipart(fp, env)
expected = {'submit': [b' Add '], 'id': [b'1234'],
'file': [b'Testing 123.\n'], 'title': [b'']}
self.assertEqual(result, expected)
def test_fieldstorage_properties(self):
fs = cgi.FieldStorage()
self.assertFalse(fs)
self.assertIn("FieldStorage", repr(fs))
self.assertEqual(list(fs), list(fs.keys()))
fs.list.append(namedtuple('MockFieldStorage', 'name')('fieldvalue'))
self.assertTrue(fs)
def test_fieldstorage_invalid(self):
self.assertRaises(TypeError, cgi.FieldStorage, "not-a-file-obj",
environ={"REQUEST_METHOD":"PUT"})
self.assertRaises(TypeError, cgi.FieldStorage, "foo", "bar")
fs = cgi.FieldStorage(headers={'content-type':'text/plain'})
self.assertRaises(TypeError, bool, fs)
def test_escape(self):
# cgi.escape() is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'cgi\.escape',
DeprecationWarning)
self.assertEqual("test & string", cgi.escape("test & string"))
self.assertEqual("<test string>", cgi.escape("<test string>"))
self.assertEqual(""test string"", cgi.escape('"test string"', True))
def test_strict(self):
for orig, expect in parse_strict_test_cases:
# Test basic parsing
d = do_test(orig, "GET")
self.assertEqual(d, expect, "Error parsing %s method GET" % repr(orig))
d = do_test(orig, "POST")
self.assertEqual(d, expect, "Error parsing %s method POST" % repr(orig))
env = {'QUERY_STRING': orig}
fs = cgi.FieldStorage(environ=env)
if isinstance(expect, dict):
# test dict interface
self.assertEqual(len(expect), len(fs))
self.assertCountEqual(expect.keys(), fs.keys())
##self.assertEqual(norm(expect.values()), norm(fs.values()))
##self.assertEqual(norm(expect.items()), norm(fs.items()))
self.assertEqual(fs.getvalue("nonexistent field", "default"), "default")
# test individual fields
for key in expect.keys():
expect_val = expect[key]
self.assertIn(key, fs)
if len(expect_val) > 1:
self.assertEqual(fs.getvalue(key), expect_val)
else:
self.assertEqual(fs.getvalue(key), expect_val[0])
def test_log(self):
cgi.log("Testing")
cgi.logfp = StringIO()
cgi.initlog("%s", "Testing initlog 1")
cgi.log("%s", "Testing log 2")
self.assertEqual(cgi.logfp.getvalue(), "Testing initlog 1\nTesting log 2\n")
if os.path.exists("/dev/null"):
cgi.logfp = None
cgi.logfile = "/dev/null"
cgi.initlog("%s", "Testing log 3")
self.addCleanup(cgi.closelog)
cgi.log("Testing log 4")
def test_fieldstorage_readline(self):
# FieldStorage uses readline, which has the capacity to read all
# contents of the input file into memory; we use readline's size argument
# to prevent that for files that do not contain any newlines in
# non-GET/HEAD requests
class TestReadlineFile:
def __init__(self, file):
self.file = file
self.numcalls = 0
def readline(self, size=None):
self.numcalls += 1
if size:
return self.file.readline(size)
else:
return self.file.readline()
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
f = TestReadlineFile(tempfile.TemporaryFile("wb+"))
self.addCleanup(f.close)
f.write(b'x' * 256 * 1024)
f.seek(0)
env = {'REQUEST_METHOD':'PUT'}
fs = cgi.FieldStorage(fp=f, environ=env)
self.addCleanup(fs.file.close)
# if we're not chunking properly, readline is only called twice
# (by read_binary); if we are chunking properly, it will be called 5 times
# as long as the chunksize is 1 << 16.
self.assertGreater(f.numcalls, 2)
f.close()
def test_fieldstorage_multipart(self):
#Test basic FieldStorage multipart parsing
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH': '558'}
fp = BytesIO(POSTDATA.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 4)
expect = [{'name':'id', 'filename':None, 'value':'1234'},
{'name':'title', 'filename':None, 'value':''},
{'name':'file', 'filename':'test.txt', 'value':b'Testing 123.\n'},
{'name':'submit', 'filename':None, 'value':' Add '}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_non_ascii(self):
#Test basic FieldStorage multipart parsing
env = {'REQUEST_METHOD':'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH':'558'}
for encoding in ['iso-8859-1','utf-8']:
fp = BytesIO(POSTDATA_NON_ASCII.encode(encoding))
fs = cgi.FieldStorage(fp, environ=env,encoding=encoding)
self.assertEqual(len(fs.list), 1)
expect = [{'name':'id', 'filename':None, 'value':'\xe7\xf1\x80'}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_maxline(self):
# Issue #18167
maxline = 1 << 16
self.maxDiff = None
def check(content):
data = """---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
%s
---123--
""".replace('\n', '\r\n') % content
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'REQUEST_METHOD': 'POST',
}
self.assertEqual(gen_result(data, environ),
{'upload': content.encode('latin1')})
check('x' * (maxline - 1))
check('x' * (maxline - 1) + '\r')
check('x' * (maxline - 1) + '\r' + 'y' * (maxline - 1))
def test_fieldstorage_multipart_w3c(self):
# Test basic FieldStorage multipart parsing (W3C sample)
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY_W3),
'CONTENT_LENGTH': str(len(POSTDATA_W3))}
fp = BytesIO(POSTDATA_W3.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 2)
self.assertEqual(fs.list[0].name, 'submit-name')
self.assertEqual(fs.list[0].value, 'Larry')
self.assertEqual(fs.list[1].name, 'files')
files = fs.list[1].value
self.assertEqual(len(files), 2)
expect = [{'name': None, 'filename': 'file1.txt', 'value': b'... contents of file1.txt ...'},
{'name': None, 'filename': 'file2.gif', 'value': b'...contents of file2.gif...'}]
for x in range(len(files)):
for k, exp in expect[x].items():
got = getattr(files[x], k)
self.assertEqual(got, exp)
_qs_result = {
'key1': 'value1',
'key2': ['value2x', 'value2y'],
'key3': 'value3',
'key4': 'value4'
}
def testQSAndUrlEncode(self):
data = "key2=value2x&key3=value3&key4=value4"
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'QUERY_STRING': 'key1=value1&key2=value2y',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormData(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormDataFile(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
this is the content of the fake file
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
result = self._qs_result.copy()
result.update({
'upload': b'this is the content of the fake file\n'
})
v = gen_result(data, environ)
self.assertEqual(result, v)
def test_deprecated_parse_qs(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qs is deprecated, use urllib.parse.'
'parse_qs instead', DeprecationWarning)):
self.assertEqual({'a': ['A1'], 'B': ['B3'], 'b': ['B2']},
cgi.parse_qs('a=A1&b=B2&B=B3'))
def test_deprecated_parse_qsl(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qsl is deprecated, use urllib.parse.'
'parse_qsl instead', DeprecationWarning)):
self.assertEqual([('a', 'A1'), ('b', 'B2'), ('B', 'B3')],
cgi.parse_qsl('a=A1&b=B2&B=B3'))
def test_parse_header(self):
self.assertEqual(
cgi.parse_header("text/plain"),
("text/plain", {}))
self.assertEqual(
cgi.parse_header("text/vnd.just.made.this.up ; "),
("text/vnd.just.made.this.up", {}))
self.assertEqual(
cgi.parse_header("text/plain;charset=us-ascii"),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"'),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"; another=opt'),
("text/plain", {"charset": "us-ascii", "another": "opt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="silly.txt"'),
("attachment", {"filename": "silly.txt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name"'),
("attachment", {"filename": "strange;name"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name";size=123;'),
("attachment", {"filename": "strange;name", "size": "123"}))
self.assertEqual(
cgi.parse_header('form-data; name="files"; filename="fo\\"o;bar"'),
("form-data", {"name": "files", "filename": 'fo"o;bar'}))
BOUNDARY = "---------------------------721837373350705526688164684"
POSTDATA = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
1234
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="title"
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="file"; filename="test.txt"
Content-Type: text/plain
Testing 123.
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="submit"
Add\x20
-----------------------------721837373350705526688164684--
"""
POSTDATA_NON_ASCII = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
\xe7\xf1\x80
-----------------------------721837373350705526688164684
"""
# http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4
BOUNDARY_W3 = "AaB03x"
POSTDATA_W3 = """--AaB03x
Content-Disposition: form-data; name="submit-name"
Larry
--AaB03x
Content-Disposition: form-data; name="files"
Content-Type: multipart/mixed; boundary=BbC04y
--BbC04y
Content-Disposition: file; filename="file1.txt"
Content-Type: text/plain
... contents of file1.txt ...
--BbC04y
Content-Disposition: file; filename="file2.gif"
Content-Type: image/gif
Content-Transfer-Encoding: binary
...contents of file2.gif...
--BbC04y--
--AaB03x--
"""
def test_main():
run_unittest(CgiTests)
if __name__ == '__main__':
test_main()
=======
from test.support import run_unittest, check_warnings
import cgi
import os
import sys
import tempfile
import unittest
import warnings
from collections import namedtuple
from io import StringIO, BytesIO
class HackedSysModule:
# The regression test will have real values in sys.argv, which
# will completely confuse the test of the cgi module
argv = []
stdin = sys.stdin
cgi.sys = HackedSysModule()
class ComparableException:
def __init__(self, err):
self.err = err
def __str__(self):
return str(self.err)
def __eq__(self, anExc):
if not isinstance(anExc, Exception):
return NotImplemented
return (self.err.__class__ == anExc.__class__ and
self.err.args == anExc.args)
def __getattr__(self, attr):
return getattr(self.err, attr)
def do_test(buf, method):
env = {}
if method == "GET":
fp = None
env['REQUEST_METHOD'] = 'GET'
env['QUERY_STRING'] = buf
elif method == "POST":
fp = BytesIO(buf.encode('latin-1')) # FieldStorage expects bytes
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
env['CONTENT_LENGTH'] = str(len(buf))
else:
raise ValueError("unknown method: %s" % method)
try:
return cgi.parse(fp, env, strict_parsing=1)
except Exception as err:
return ComparableException(err)
parse_strict_test_cases = [
("", ValueError("bad query field: ''")),
("&", ValueError("bad query field: ''")),
("&&", ValueError("bad query field: ''")),
(";", ValueError("bad query field: ''")),
(";&;", ValueError("bad query field: ''")),
# Should the next few really be valid?
("=", {}),
("=&=", {}),
("=;=", {}),
# This rest seem to make sense
("=a", {'': ['a']}),
("&=a", ValueError("bad query field: ''")),
("=a&", ValueError("bad query field: ''")),
("=&a", ValueError("bad query field: 'a'")),
("b=a", {'b': ['a']}),
("b+=a", {'b ': ['a']}),
("a=b=a", {'a': ['b=a']}),
("a=+b=a", {'a': [' b=a']}),
("&b=a", ValueError("bad query field: ''")),
("b&=a", ValueError("bad query field: 'b'")),
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
{'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
'cuyer': ['r'],
'expire': ['964546263'],
'kid': ['130003.300038'],
'lobale': ['en-US'],
'order_id': ['0bb2e248638833d48cb7fed300000f1b'],
'ss': ['env'],
'view': ['bustomer'],
}),
("group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse",
{'SUBMIT': ['Browse'],
'_assigned_to': ['31392'],
'_category': ['100'],
'_status': ['1'],
'group_id': ['5470'],
'set': ['custom'],
})
]
def norm(seq):
return sorted(seq, key=repr)
def first_elts(list):
return [p[0] for p in list]
def first_second_elts(list):
return [(p[0], p[1][0]) for p in list]
def gen_result(data, environ):
encoding = 'latin-1'
fake_stdin = BytesIO(data.encode(encoding))
fake_stdin.seek(0)
form = cgi.FieldStorage(fp=fake_stdin, environ=environ, encoding=encoding)
result = {}
for k, v in dict(form).items():
result[k] = isinstance(v, list) and form.getlist(k) or v.value
return result
class CgiTests(unittest.TestCase):
def test_parse_multipart(self):
fp = BytesIO(POSTDATA.encode('latin1'))
env = {'boundary': BOUNDARY.encode('latin1'),
'CONTENT-LENGTH': '558'}
result = cgi.parse_multipart(fp, env)
expected = {'submit': [b' Add '], 'id': [b'1234'],
'file': [b'Testing 123.\n'], 'title': [b'']}
self.assertEqual(result, expected)
def test_fieldstorage_properties(self):
fs = cgi.FieldStorage()
self.assertFalse(fs)
self.assertIn("FieldStorage", repr(fs))
self.assertEqual(list(fs), list(fs.keys()))
fs.list.append(namedtuple('MockFieldStorage', 'name')('fieldvalue'))
self.assertTrue(fs)
def test_fieldstorage_invalid(self):
self.assertRaises(TypeError, cgi.FieldStorage, "not-a-file-obj",
environ={"REQUEST_METHOD":"PUT"})
self.assertRaises(TypeError, cgi.FieldStorage, "foo", "bar")
fs = cgi.FieldStorage(headers={'content-type':'text/plain'})
self.assertRaises(TypeError, bool, fs)
def test_escape(self):
# cgi.escape() is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'cgi\.escape',
DeprecationWarning)
self.assertEqual("test & string", cgi.escape("test & string"))
self.assertEqual("<test string>", cgi.escape("<test string>"))
self.assertEqual(""test string"", cgi.escape('"test string"', True))
def test_strict(self):
for orig, expect in parse_strict_test_cases:
# Test basic parsing
d = do_test(orig, "GET")
self.assertEqual(d, expect, "Error parsing %s method GET" % repr(orig))
d = do_test(orig, "POST")
self.assertEqual(d, expect, "Error parsing %s method POST" % repr(orig))
env = {'QUERY_STRING': orig}
fs = cgi.FieldStorage(environ=env)
if isinstance(expect, dict):
# test dict interface
self.assertEqual(len(expect), len(fs))
self.assertCountEqual(expect.keys(), fs.keys())
##self.assertEqual(norm(expect.values()), norm(fs.values()))
##self.assertEqual(norm(expect.items()), norm(fs.items()))
self.assertEqual(fs.getvalue("nonexistent field", "default"), "default")
# test individual fields
for key in expect.keys():
expect_val = expect[key]
self.assertIn(key, fs)
if len(expect_val) > 1:
self.assertEqual(fs.getvalue(key), expect_val)
else:
self.assertEqual(fs.getvalue(key), expect_val[0])
def test_log(self):
cgi.log("Testing")
cgi.logfp = StringIO()
cgi.initlog("%s", "Testing initlog 1")
cgi.log("%s", "Testing log 2")
self.assertEqual(cgi.logfp.getvalue(), "Testing initlog 1\nTesting log 2\n")
if os.path.exists("/dev/null"):
cgi.logfp = None
cgi.logfile = "/dev/null"
cgi.initlog("%s", "Testing log 3")
self.addCleanup(cgi.closelog)
cgi.log("Testing log 4")
def test_fieldstorage_readline(self):
# FieldStorage uses readline, which has the capacity to read all
# contents of the input file into memory; we use readline's size argument
# to prevent that for files that do not contain any newlines in
# non-GET/HEAD requests
class TestReadlineFile:
def __init__(self, file):
self.file = file
self.numcalls = 0
def readline(self, size=None):
self.numcalls += 1
if size:
return self.file.readline(size)
else:
return self.file.readline()
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
f = TestReadlineFile(tempfile.TemporaryFile("wb+"))
self.addCleanup(f.close)
f.write(b'x' * 256 * 1024)
f.seek(0)
env = {'REQUEST_METHOD':'PUT'}
fs = cgi.FieldStorage(fp=f, environ=env)
self.addCleanup(fs.file.close)
# if we're not chunking properly, readline is only called twice
# (by read_binary); if we are chunking properly, it will be called 5 times
# as long as the chunksize is 1 << 16.
self.assertGreater(f.numcalls, 2)
f.close()
def test_fieldstorage_multipart(self):
#Test basic FieldStorage multipart parsing
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH': '558'}
fp = BytesIO(POSTDATA.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 4)
expect = [{'name':'id', 'filename':None, 'value':'1234'},
{'name':'title', 'filename':None, 'value':''},
{'name':'file', 'filename':'test.txt', 'value':b'Testing 123.\n'},
{'name':'submit', 'filename':None, 'value':' Add '}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_non_ascii(self):
#Test basic FieldStorage multipart parsing
env = {'REQUEST_METHOD':'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH':'558'}
for encoding in ['iso-8859-1','utf-8']:
fp = BytesIO(POSTDATA_NON_ASCII.encode(encoding))
fs = cgi.FieldStorage(fp, environ=env,encoding=encoding)
self.assertEqual(len(fs.list), 1)
expect = [{'name':'id', 'filename':None, 'value':'\xe7\xf1\x80'}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_maxline(self):
# Issue #18167
maxline = 1 << 16
self.maxDiff = None
def check(content):
data = """---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
%s
---123--
""".replace('\n', '\r\n') % content
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'REQUEST_METHOD': 'POST',
}
self.assertEqual(gen_result(data, environ),
{'upload': content.encode('latin1')})
check('x' * (maxline - 1))
check('x' * (maxline - 1) + '\r')
check('x' * (maxline - 1) + '\r' + 'y' * (maxline - 1))
def test_fieldstorage_multipart_w3c(self):
# Test basic FieldStorage multipart parsing (W3C sample)
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY_W3),
'CONTENT_LENGTH': str(len(POSTDATA_W3))}
fp = BytesIO(POSTDATA_W3.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 2)
self.assertEqual(fs.list[0].name, 'submit-name')
self.assertEqual(fs.list[0].value, 'Larry')
self.assertEqual(fs.list[1].name, 'files')
files = fs.list[1].value
self.assertEqual(len(files), 2)
expect = [{'name': None, 'filename': 'file1.txt', 'value': b'... contents of file1.txt ...'},
{'name': None, 'filename': 'file2.gif', 'value': b'...contents of file2.gif...'}]
for x in range(len(files)):
for k, exp in expect[x].items():
got = getattr(files[x], k)
self.assertEqual(got, exp)
_qs_result = {
'key1': 'value1',
'key2': ['value2x', 'value2y'],
'key3': 'value3',
'key4': 'value4'
}
def testQSAndUrlEncode(self):
data = "key2=value2x&key3=value3&key4=value4"
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'QUERY_STRING': 'key1=value1&key2=value2y',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormData(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormDataFile(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
this is the content of the fake file
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
result = self._qs_result.copy()
result.update({
'upload': b'this is the content of the fake file\n'
})
v = gen_result(data, environ)
self.assertEqual(result, v)
def test_deprecated_parse_qs(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qs is deprecated, use urllib.parse.'
'parse_qs instead', DeprecationWarning)):
self.assertEqual({'a': ['A1'], 'B': ['B3'], 'b': ['B2']},
cgi.parse_qs('a=A1&b=B2&B=B3'))
def test_deprecated_parse_qsl(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qsl is deprecated, use urllib.parse.'
'parse_qsl instead', DeprecationWarning)):
self.assertEqual([('a', 'A1'), ('b', 'B2'), ('B', 'B3')],
cgi.parse_qsl('a=A1&b=B2&B=B3'))
def test_parse_header(self):
self.assertEqual(
cgi.parse_header("text/plain"),
("text/plain", {}))
self.assertEqual(
cgi.parse_header("text/vnd.just.made.this.up ; "),
("text/vnd.just.made.this.up", {}))
self.assertEqual(
cgi.parse_header("text/plain;charset=us-ascii"),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"'),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"; another=opt'),
("text/plain", {"charset": "us-ascii", "another": "opt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="silly.txt"'),
("attachment", {"filename": "silly.txt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name"'),
("attachment", {"filename": "strange;name"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name";size=123;'),
("attachment", {"filename": "strange;name", "size": "123"}))
self.assertEqual(
cgi.parse_header('form-data; name="files"; filename="fo\\"o;bar"'),
("form-data", {"name": "files", "filename": 'fo"o;bar'}))
BOUNDARY = "---------------------------721837373350705526688164684"
POSTDATA = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
1234
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="title"
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="file"; filename="test.txt"
Content-Type: text/plain
Testing 123.
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="submit"
Add\x20
-----------------------------721837373350705526688164684--
"""
POSTDATA_NON_ASCII = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
\xe7\xf1\x80
-----------------------------721837373350705526688164684
"""
# http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4
BOUNDARY_W3 = "AaB03x"
POSTDATA_W3 = """--AaB03x
Content-Disposition: form-data; name="submit-name"
Larry
--AaB03x
Content-Disposition: form-data; name="files"
Content-Type: multipart/mixed; boundary=BbC04y
--BbC04y
Content-Disposition: file; filename="file1.txt"
Content-Type: text/plain
... contents of file1.txt ...
--BbC04y
Content-Disposition: file; filename="file2.gif"
Content-Type: image/gif
Content-Transfer-Encoding: binary
...contents of file2.gif...
--BbC04y--
--AaB03x--
"""
def test_main():
run_unittest(CgiTests)
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
from test.support import run_unittest, check_warnings
import cgi
import os
import sys
import tempfile
import unittest
import warnings
from collections import namedtuple
from io import StringIO, BytesIO
class HackedSysModule:
# The regression test will have real values in sys.argv, which
# will completely confuse the test of the cgi module
argv = []
stdin = sys.stdin
cgi.sys = HackedSysModule()
class ComparableException:
def __init__(self, err):
self.err = err
def __str__(self):
return str(self.err)
def __eq__(self, anExc):
if not isinstance(anExc, Exception):
return NotImplemented
return (self.err.__class__ == anExc.__class__ and
self.err.args == anExc.args)
def __getattr__(self, attr):
return getattr(self.err, attr)
def do_test(buf, method):
env = {}
if method == "GET":
fp = None
env['REQUEST_METHOD'] = 'GET'
env['QUERY_STRING'] = buf
elif method == "POST":
fp = BytesIO(buf.encode('latin-1')) # FieldStorage expects bytes
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
env['CONTENT_LENGTH'] = str(len(buf))
else:
raise ValueError("unknown method: %s" % method)
try:
return cgi.parse(fp, env, strict_parsing=1)
except Exception as err:
return ComparableException(err)
parse_strict_test_cases = [
("", ValueError("bad query field: ''")),
("&", ValueError("bad query field: ''")),
("&&", ValueError("bad query field: ''")),
(";", ValueError("bad query field: ''")),
(";&;", ValueError("bad query field: ''")),
# Should the next few really be valid?
("=", {}),
("=&=", {}),
("=;=", {}),
# This rest seem to make sense
("=a", {'': ['a']}),
("&=a", ValueError("bad query field: ''")),
("=a&", ValueError("bad query field: ''")),
("=&a", ValueError("bad query field: 'a'")),
("b=a", {'b': ['a']}),
("b+=a", {'b ': ['a']}),
("a=b=a", {'a': ['b=a']}),
("a=+b=a", {'a': [' b=a']}),
("&b=a", ValueError("bad query field: ''")),
("b&=a", ValueError("bad query field: 'b'")),
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
{'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
'cuyer': ['r'],
'expire': ['964546263'],
'kid': ['130003.300038'],
'lobale': ['en-US'],
'order_id': ['0bb2e248638833d48cb7fed300000f1b'],
'ss': ['env'],
'view': ['bustomer'],
}),
("group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse",
{'SUBMIT': ['Browse'],
'_assigned_to': ['31392'],
'_category': ['100'],
'_status': ['1'],
'group_id': ['5470'],
'set': ['custom'],
})
]
def norm(seq):
return sorted(seq, key=repr)
def first_elts(list):
return [p[0] for p in list]
def first_second_elts(list):
return [(p[0], p[1][0]) for p in list]
def gen_result(data, environ):
encoding = 'latin-1'
fake_stdin = BytesIO(data.encode(encoding))
fake_stdin.seek(0)
form = cgi.FieldStorage(fp=fake_stdin, environ=environ, encoding=encoding)
result = {}
for k, v in dict(form).items():
result[k] = isinstance(v, list) and form.getlist(k) or v.value
return result
class CgiTests(unittest.TestCase):
def test_parse_multipart(self):
fp = BytesIO(POSTDATA.encode('latin1'))
env = {'boundary': BOUNDARY.encode('latin1'),
'CONTENT-LENGTH': '558'}
result = cgi.parse_multipart(fp, env)
expected = {'submit': [b' Add '], 'id': [b'1234'],
'file': [b'Testing 123.\n'], 'title': [b'']}
self.assertEqual(result, expected)
def test_fieldstorage_properties(self):
fs = cgi.FieldStorage()
self.assertFalse(fs)
self.assertIn("FieldStorage", repr(fs))
self.assertEqual(list(fs), list(fs.keys()))
fs.list.append(namedtuple('MockFieldStorage', 'name')('fieldvalue'))
self.assertTrue(fs)
def test_fieldstorage_invalid(self):
self.assertRaises(TypeError, cgi.FieldStorage, "not-a-file-obj",
environ={"REQUEST_METHOD":"PUT"})
self.assertRaises(TypeError, cgi.FieldStorage, "foo", "bar")
fs = cgi.FieldStorage(headers={'content-type':'text/plain'})
self.assertRaises(TypeError, bool, fs)
def test_escape(self):
# cgi.escape() is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'cgi\.escape',
DeprecationWarning)
self.assertEqual("test & string", cgi.escape("test & string"))
self.assertEqual("<test string>", cgi.escape("<test string>"))
self.assertEqual(""test string"", cgi.escape('"test string"', True))
def test_strict(self):
for orig, expect in parse_strict_test_cases:
# Test basic parsing
d = do_test(orig, "GET")
self.assertEqual(d, expect, "Error parsing %s method GET" % repr(orig))
d = do_test(orig, "POST")
self.assertEqual(d, expect, "Error parsing %s method POST" % repr(orig))
env = {'QUERY_STRING': orig}
fs = cgi.FieldStorage(environ=env)
if isinstance(expect, dict):
# test dict interface
self.assertEqual(len(expect), len(fs))
self.assertCountEqual(expect.keys(), fs.keys())
##self.assertEqual(norm(expect.values()), norm(fs.values()))
##self.assertEqual(norm(expect.items()), norm(fs.items()))
self.assertEqual(fs.getvalue("nonexistent field", "default"), "default")
# test individual fields
for key in expect.keys():
expect_val = expect[key]
self.assertIn(key, fs)
if len(expect_val) > 1:
self.assertEqual(fs.getvalue(key), expect_val)
else:
self.assertEqual(fs.getvalue(key), expect_val[0])
def test_log(self):
cgi.log("Testing")
cgi.logfp = StringIO()
cgi.initlog("%s", "Testing initlog 1")
cgi.log("%s", "Testing log 2")
self.assertEqual(cgi.logfp.getvalue(), "Testing initlog 1\nTesting log 2\n")
if os.path.exists("/dev/null"):
cgi.logfp = None
cgi.logfile = "/dev/null"
cgi.initlog("%s", "Testing log 3")
self.addCleanup(cgi.closelog)
cgi.log("Testing log 4")
def test_fieldstorage_readline(self):
# FieldStorage uses readline, which has the capacity to read all
# contents of the input file into memory; we use readline's size argument
# to prevent that for files that do not contain any newlines in
# non-GET/HEAD requests
class TestReadlineFile:
def __init__(self, file):
self.file = file
self.numcalls = 0
def readline(self, size=None):
self.numcalls += 1
if size:
return self.file.readline(size)
else:
return self.file.readline()
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
f = TestReadlineFile(tempfile.TemporaryFile("wb+"))
self.addCleanup(f.close)
f.write(b'x' * 256 * 1024)
f.seek(0)
env = {'REQUEST_METHOD':'PUT'}
fs = cgi.FieldStorage(fp=f, environ=env)
self.addCleanup(fs.file.close)
# if we're not chunking properly, readline is only called twice
# (by read_binary); if we are chunking properly, it will be called 5 times
# as long as the chunksize is 1 << 16.
self.assertGreater(f.numcalls, 2)
f.close()
def test_fieldstorage_multipart(self):
#Test basic FieldStorage multipart parsing
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH': '558'}
fp = BytesIO(POSTDATA.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 4)
expect = [{'name':'id', 'filename':None, 'value':'1234'},
{'name':'title', 'filename':None, 'value':''},
{'name':'file', 'filename':'test.txt', 'value':b'Testing 123.\n'},
{'name':'submit', 'filename':None, 'value':' Add '}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_non_ascii(self):
#Test basic FieldStorage multipart parsing
env = {'REQUEST_METHOD':'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH':'558'}
for encoding in ['iso-8859-1','utf-8']:
fp = BytesIO(POSTDATA_NON_ASCII.encode(encoding))
fs = cgi.FieldStorage(fp, environ=env,encoding=encoding)
self.assertEqual(len(fs.list), 1)
expect = [{'name':'id', 'filename':None, 'value':'\xe7\xf1\x80'}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_maxline(self):
# Issue #18167
maxline = 1 << 16
self.maxDiff = None
def check(content):
data = """---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
%s
---123--
""".replace('\n', '\r\n') % content
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'REQUEST_METHOD': 'POST',
}
self.assertEqual(gen_result(data, environ),
{'upload': content.encode('latin1')})
check('x' * (maxline - 1))
check('x' * (maxline - 1) + '\r')
check('x' * (maxline - 1) + '\r' + 'y' * (maxline - 1))
def test_fieldstorage_multipart_w3c(self):
# Test basic FieldStorage multipart parsing (W3C sample)
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY_W3),
'CONTENT_LENGTH': str(len(POSTDATA_W3))}
fp = BytesIO(POSTDATA_W3.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 2)
self.assertEqual(fs.list[0].name, 'submit-name')
self.assertEqual(fs.list[0].value, 'Larry')
self.assertEqual(fs.list[1].name, 'files')
files = fs.list[1].value
self.assertEqual(len(files), 2)
expect = [{'name': None, 'filename': 'file1.txt', 'value': b'... contents of file1.txt ...'},
{'name': None, 'filename': 'file2.gif', 'value': b'...contents of file2.gif...'}]
for x in range(len(files)):
for k, exp in expect[x].items():
got = getattr(files[x], k)
self.assertEqual(got, exp)
_qs_result = {
'key1': 'value1',
'key2': ['value2x', 'value2y'],
'key3': 'value3',
'key4': 'value4'
}
def testQSAndUrlEncode(self):
data = "key2=value2x&key3=value3&key4=value4"
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'QUERY_STRING': 'key1=value1&key2=value2y',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormData(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormDataFile(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
this is the content of the fake file
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
result = self._qs_result.copy()
result.update({
'upload': b'this is the content of the fake file\n'
})
v = gen_result(data, environ)
self.assertEqual(result, v)
def test_deprecated_parse_qs(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qs is deprecated, use urllib.parse.'
'parse_qs instead', DeprecationWarning)):
self.assertEqual({'a': ['A1'], 'B': ['B3'], 'b': ['B2']},
cgi.parse_qs('a=A1&b=B2&B=B3'))
def test_deprecated_parse_qsl(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qsl is deprecated, use urllib.parse.'
'parse_qsl instead', DeprecationWarning)):
self.assertEqual([('a', 'A1'), ('b', 'B2'), ('B', 'B3')],
cgi.parse_qsl('a=A1&b=B2&B=B3'))
def test_parse_header(self):
self.assertEqual(
cgi.parse_header("text/plain"),
("text/plain", {}))
self.assertEqual(
cgi.parse_header("text/vnd.just.made.this.up ; "),
("text/vnd.just.made.this.up", {}))
self.assertEqual(
cgi.parse_header("text/plain;charset=us-ascii"),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"'),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"; another=opt'),
("text/plain", {"charset": "us-ascii", "another": "opt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="silly.txt"'),
("attachment", {"filename": "silly.txt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name"'),
("attachment", {"filename": "strange;name"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name";size=123;'),
("attachment", {"filename": "strange;name", "size": "123"}))
self.assertEqual(
cgi.parse_header('form-data; name="files"; filename="fo\\"o;bar"'),
("form-data", {"name": "files", "filename": 'fo"o;bar'}))
BOUNDARY = "---------------------------721837373350705526688164684"
POSTDATA = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
1234
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="title"
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="file"; filename="test.txt"
Content-Type: text/plain
Testing 123.
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="submit"
Add\x20
-----------------------------721837373350705526688164684--
"""
POSTDATA_NON_ASCII = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
\xe7\xf1\x80
-----------------------------721837373350705526688164684
"""
# http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4
BOUNDARY_W3 = "AaB03x"
POSTDATA_W3 = """--AaB03x
Content-Disposition: form-data; name="submit-name"
Larry
--AaB03x
Content-Disposition: form-data; name="files"
Content-Type: multipart/mixed; boundary=BbC04y
--BbC04y
Content-Disposition: file; filename="file1.txt"
Content-Type: text/plain
... contents of file1.txt ...
--BbC04y
Content-Disposition: file; filename="file2.gif"
Content-Type: image/gif
Content-Transfer-Encoding: binary
...contents of file2.gif...
--BbC04y--
--AaB03x--
"""
def test_main():
run_unittest(CgiTests)
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
import os
import scipy as sp
import gzip
import h5py
import sys
from ldpred import sum_stats_parsers
from ldpred import reporting
from ldpred import util
from ldpred import plinkfiles
from plinkio import plinkfile
import time
def _verify_coord_data_(data_dict):
"""
Verify that merged data is ok
"""
num_snps = len(data_dict['raw_snps_ref'])
assert num_snps ==len(data_dict['snp_stds_ref']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['snp_means_ref']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['freqs_ref']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['ps']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['positions']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['nts']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['sids']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['betas']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['log_odds']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['ns']), 'Inconsistencies in coordinated data sizes'
if 'raw_snps_val' in data_dict:
assert num_snps ==len(data_dict['raw_snps_val']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['snp_stds_val']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['snp_means_val']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['freqs_val']), 'Inconsistencies in coordinated data sizes'
def write_coord_data(cord_data_g, coord_dict, debug=False):
_verify_coord_data_(coord_dict)
if debug:
print('Storing coordinated data to HDF5 file.')
ofg = cord_data_g.create_group(coord_dict['chrom'])
ofg.create_dataset('raw_snps_ref', data=coord_dict['raw_snps_ref'], compression='lzf')
ofg.create_dataset('snp_stds_ref', data=coord_dict['snp_stds_ref'])
ofg.create_dataset('snp_means_ref', data=coord_dict['snp_means_ref'])
ofg.create_dataset('freqs_ref', data=coord_dict['freqs_ref'])
if 'raw_snps_val' in coord_dict:
ofg.create_dataset('raw_snps_val', data=coord_dict['raw_snps_val'], compression='lzf')
ofg.create_dataset('snp_stds_val', data=coord_dict['snp_stds_val'])
ofg.create_dataset('snp_means_val', data=coord_dict['snp_means_val'])
ofg.create_dataset('freqs_val', data=coord_dict['freqs_val'])
ofg.create_dataset('log_odds_prs', data=coord_dict['log_odds_prs'])
ofg.create_dataset('ps', data=coord_dict['ps'])
ofg.create_dataset('positions', data=coord_dict['positions'])
ofg.create_dataset('nts', data=sp.array(coord_dict['nts'],dtype=util.nts_dtype))
ofg.create_dataset('sids', data=sp.array(coord_dict['sids'],dtype=util.sids_dtype))
ofg.create_dataset('betas', data=coord_dict['betas'])
ofg.create_dataset('log_odds', data=coord_dict['log_odds'])
ofg.create_dataset('ns', data=coord_dict['ns'])
if coord_dict['genetic_map'] is not None:
ofg.create_dataset('genetic_map', data=coord_dict['genetic_map'])
def write_parameter_data(p_dict, h5f, debug=False):
if debug:
print('Storing parameter information in coordinated file.')
print (p_dict)
pg = h5f.create_group('parameters')
if p_dict['N'] is not None:
pg.create_dataset('N', data=p_dict['N'])
pg.create_dataset('only_hm3', data=p_dict['only_hm3'])
pg.create_dataset('eff_type', data=p_dict['eff_type'])
pg.create_dataset('skip_coordination', data=p_dict['skip_coordination'])
pg.create_dataset('match_genomic_pos', data=p_dict['match_genomic_pos'])
pg.create_dataset('maf', data=p_dict['maf'])
pg.create_dataset('max_freq_discrep', data=p_dict['max_freq_discrep'])
pg.create_dataset('ssf_format', data=p_dict['ssf_format'])
pg.create_dataset('rs', data=p_dict['rs'])
pg.create_dataset('A1', data=p_dict['A1'])
pg.create_dataset('A2', data=p_dict['A2'])
pg.create_dataset('pos', data=p_dict['pos'])
pg.create_dataset('info', data=p_dict['info'])
pg.create_dataset('chr', data=p_dict['chr'])
pg.create_dataset('reffreq', data=p_dict['reffreq'])
pg.create_dataset('pval', data=p_dict['pval'])
pg.create_dataset('eff', data=p_dict['eff'])
pg.create_dataset('se', data=p_dict['se'])
pg.create_dataset('ncol', data=p_dict['ncol'])
if p_dict['case_freq'] is not None:
pg.create_dataset('case_freq', data=p_dict['case_freq'])
if p_dict['control_freq'] is not None:
pg.create_dataset('control_freq', data=p_dict['control_freq'])
if p_dict['case_n'] is not None:
pg.create_dataset('case_n', data=p_dict['case_n'])
if p_dict['control_n'] is not None:
pg.create_dataset('control_n', data=p_dict['control_n'])
pg.create_dataset('z_from_se', data=p_dict['z_from_se'])
def get_snp_stds(raw_snps):
return sp.std(raw_snps, axis=1, dtype='float32')
def get_mean_sample_size(n, cord_data_g):
if n is None:
all_ns = []
for chrom_str in util.chromosomes_list:
if chrom_str in cord_data_g:
g = cord_data_g[chrom_str]
all_ns.extend(g['ns'][...])
assert all_ns is not None, 'Sample size missing. Please use --N flag, or ensure they are parsed as part of the summary statistics.'
mean_n = sp.mean(all_ns)
else:
mean_n = n
return mean_n
def filter_coord_data(cd, filter_mask):
data_keys = ['raw_snps', 'snp_stds', 'snp_means', 'freqs' ,'ps', 'ns', 'positions', 'nts', 'sids','betas','log_odds']
if 'raw_snps_val' in cd.keys():
data_keys.extend(['raw_snps_val', 'snp_stds_val', 'snp_means_val', 'freqs_val'])
for k in data_keys:
cd[k] = cd[k][filter_mask]
def coordinate_datasets(reference_genotype_file, hdf5_file, summary_dict,
validation_genotype_file=None,
genetic_map_dir=None,
min_maf=0.01,
skip_coordination=False,
max_freq_discrep = 0.15,
debug=False):
summary_dict[3.9]={'name':'dash', 'value':'Coordination'}
t0 = time.time()
if validation_genotype_file is not None:
print('Coordinating datasets (Summary statistics, LD reference genotypes, and Validation genotypes).')
else:
print('Coordinating datasets (Summary statistics and LD reference genotypes).')
plinkf = plinkfile.PlinkFile(reference_genotype_file)
# Figure out chromosomes and positions.
if debug:
print('Parsing plinkf_dict_val reference genotypes')
loci = plinkf.get_loci()
plinkf.close()
summary_dict[4]={'name':'Num individuals in LD Reference data:','value':plinkfiles.get_num_indivs(reference_genotype_file)}
summary_dict[4.1]={'name':'SNPs in LD Reference data:','value':len(loci)}
gf_chromosomes = [l.chromosome for l in loci]
chromosomes = sp.unique(gf_chromosomes)
chromosomes.sort()
chr_dict = plinkfiles.get_chrom_dict(loci, chromosomes, debug)
if validation_genotype_file is not None:
if debug:
print('Parsing LD validation bim file')
plinkf_val = plinkfile.PlinkFile(validation_genotype_file)
# Loads only the individuals...
plinkf_dict_val = plinkfiles.get_phenotypes(plinkf_val)
loci_val = plinkf_val.get_loci()
plinkf_val.close()
summary_dict[5]={'name':'SNPs in Validation data:','value':len(loci_val)}
chr_dict_val = plinkfiles.get_chrom_dict(loci_val, chromosomes, debug)
# Open HDF5 file and prepare out data
assert not 'iids' in hdf5_file, 'Something is wrong with the HDF5 file, no individuals IDs were found.'
if plinkf_dict_val['has_phenotype']:
hdf5_file.create_dataset('y', data=plinkf_dict_val['phenotypes'])
summary_dict[6]={'name':'Num validation phenotypes:','value':plinkf_dict_val['num_individs']}
hdf5_file.create_dataset('fids', data=sp.array(plinkf_dict_val['fids'], dtype=util.fids_dtype))
hdf5_file.create_dataset('iids', data=sp.array(plinkf_dict_val['iids'], dtype=util.iids_dtype))
maf_adj_risk_scores = sp.zeros(plinkf_dict_val['num_individs'])
# Now summary statistics
ssf = hdf5_file['sum_stats']
cord_data_g = hdf5_file.create_group('cord_data')
chromosomes_found = set()
num_snps_common_before_filtering =0
num_snps_common_after_filtering =0
tot_num_non_matching_nts = 0
tot_num_non_supported_nts = 0
tot_num_ambig_nts = 0
tot_num_freq_discrep_filtered_snps = 0
tot_num_maf_filtered_snps = 0
tot_g_ss_nt_concord_count = 0
tot_num_flipped_nts = 0
if validation_genotype_file is not None:
tot_g_vg_nt_concord_count = 0
tot_vg_ss_nt_concord_count = 0
# Now iterate over chromosomes
chrom_i = 0
for chrom in chromosomes:
chrom_i +=1
if not debug:
sys.stdout.write('\r%0.2f%%' % (100.0 * (float(chrom_i) / (len(chromosomes)+1))))
sys.stdout.flush()
try:
chr_str = 'chrom_%d' % chrom
ssg = ssf[chr_str]
except Exception as err_str:
if debug:
print(err_str)
print('Did not find chromosome %d in SS dataset.'%chrom)
continue
if debug:
print('Coordinating data for chromosome %s' % chr_str)
chromosomes_found.add(chrom)
#Get summary statistics chromosome group
ssg = ssf['chrom_%d' % chrom]
ss_sids = (ssg['sids'][...]).astype(util.sids_u_dtype)
if validation_genotype_file is not None:
chrom_d_val = chr_dict_val[chr_str]
vg_sids = chrom_d_val['sids']
common_sids = sp.intersect1d(ss_sids, vg_sids)
# A map from sid to index for validation data
vg_sid_dict = {}
for i, sid in enumerate(vg_sids):
vg_sid_dict[sid] = i
else:
common_sids = ss_sids
# A map from sid to index for summary stats
ss_sid_dict = {}
for i, sid in enumerate(ss_sids):
ss_sid_dict[sid] = i
#The indices to retain for the LD reference genotypes
chrom_d = chr_dict[chr_str]
g_sids = chrom_d['sids']
common_sids = sp.intersect1d(common_sids, g_sids)
# A map from sid to index for LD reference data
g_sid_dict = {}
for i, sid in enumerate(g_sids):
g_sid_dict[sid] = i
if debug:
print('Found %d SNPs on chrom %d that were common across all datasets' % (len(common_sids), chrom))
print('Ordering SNPs by genomic positions (based on LD reference genotypes).')
g_snp_map = []
for sid in common_sids:
g_snp_map.append(g_sid_dict[sid])
# order by positions (based on LD reference file)
g_positions = sp.array(chrom_d['positions'])[g_snp_map]
order = sp.argsort(g_positions)
g_snp_map = sp.array(g_snp_map)[order]
g_snp_map = g_snp_map.tolist()
common_sids = sp.array(common_sids)[order]
# Get the ordered sum stats SNPs indices.
ss_snp_map = []
for sid in common_sids:
ss_snp_map.append(ss_sid_dict[sid])
# Get the ordered validation SNPs indices
if validation_genotype_file is not None:
vg_snp_map = []
for sid in common_sids:
vg_snp_map.append(vg_sid_dict[sid])
vg_nts = sp.array(chrom_d_val['nts'])
vg_nts_ok = sp.array(vg_nts)[vg_snp_map]
g_nts = sp.array(chrom_d['nts'])
ss_nts = (ssg['nts'][...]).astype(util.nts_u_dtype)
betas = ssg['betas'][...]
log_odds = ssg['log_odds'][...]
if 'freqs' in ssg:
ss_freqs = ssg['freqs'][...]
g_ss_nt_concord_count = sp.sum(
g_nts[g_snp_map] == ss_nts[ss_snp_map]) / 2.0
if validation_genotype_file is not None:
vg_ss_nt_concord_count = sp.sum(vg_nts_ok == ss_nts[ss_snp_map]) / 2.0
g_vg_nt_concord_count = sp.sum(g_nts[g_snp_map] == vg_nts_ok) / 2.0
if debug:
print('Nucleotide concordance counts out of %d genotypes, vg-rg: %d ; vg-ss: %d' % (len(g_snp_map), g_vg_nt_concord_count, vg_ss_nt_concord_count))
tot_vg_ss_nt_concord_count += vg_ss_nt_concord_count
tot_g_vg_nt_concord_count += g_vg_nt_concord_count
tot_g_ss_nt_concord_count += g_ss_nt_concord_count
if debug:
print('Nucleotide concordance counts out of %d genotypes, rg-ss: %d' % (len(g_snp_map), g_ss_nt_concord_count))
num_freq_discrep_filtered_snps = 0
num_non_matching_nts = 0
num_non_supported_nts = 0
num_ambig_nts = 0
# Identifying which SNPs have nucleotides that are ok..
ok_nts = []
ok_indices = {'g': [], 'ss': []}
if validation_genotype_file is not None:
ok_indices['vg']=[]
#Now loop over SNPs to coordinate nucleotides.
if validation_genotype_file is not None:
for g_i, vg_i, ss_i in zip(g_snp_map, vg_snp_map, ss_snp_map):
# To make sure, is the SNP id the same?
assert g_sids[g_i] == vg_sids[vg_i] == ss_sids[ss_i], 'Some issues with coordinating the genotypes.'
g_nt = g_nts[g_i]
if not skip_coordination:
vg_nt = vg_nts[vg_i]
ss_nt = ss_nts[ss_i]
# Is the nucleotide ambiguous.
g_nt = [g_nts[g_i][0], g_nts[g_i][1]]
if tuple(g_nt) in util.ambig_nts:
num_ambig_nts += 1
continue
# First check if nucleotide is sane?
if (not g_nt[0] in util.valid_nts) or (not g_nt[1] in util.valid_nts):
num_non_supported_nts += 1
continue
os_g_nt = sp.array(
[util.opp_strand_dict[g_nt[0]], util.opp_strand_dict[g_nt[1]]])
flip_nts = False
#Coordination is a bit more complicate when validation genotypes are provided..
if not ((sp.all(g_nt == ss_nt) or sp.all(os_g_nt == ss_nt)) and (sp.all(g_nt == vg_nt) or sp.all(os_g_nt == vg_nt))):
if sp.all(g_nt == vg_nt) or sp.all(os_g_nt == vg_nt):
flip_nts = (g_nt[1] == ss_nt[0] and g_nt[0] == ss_nt[1]) or (
os_g_nt[1] == ss_nt[0] and os_g_nt[0] == ss_nt[1])
# Try flipping the SS nt
if flip_nts:
tot_num_flipped_nts +=1
betas[ss_i] = -betas[ss_i]
log_odds[ss_i] = -log_odds[ss_i]
if 'freqs' in ssg:
ss_freqs[ss_i] = 1 - ss_freqs[ss_i]
else:
if debug:
print("Nucleotides don't match after all?: g_sid=%s, ss_sid=%s, g_i=%d, ss_i=%d, g_nt=%s, ss_nt=%s" % \
(g_sids[g_i], ss_sids[ss_i], g_i,
ss_i, str(g_nt), str(ss_nt)))
num_non_matching_nts += 1
continue
else:
num_non_matching_nts += 1
continue
# Opposite strand nucleotides
# everything seems ok.
ok_indices['g'].append(g_i)
ok_indices['vg'].append(vg_i)
ok_indices['ss'].append(ss_i)
ok_nts.append(g_nt)
else:
for g_i, ss_i in zip(g_snp_map, ss_snp_map):
# To make sure, is the SNP id the same?
assert g_sids[g_i] == ss_sids[ss_i], 'Some issues with coordinating the genotypes.'
g_nt = g_nts[g_i]
if not skip_coordination:
ss_nt = ss_nts[ss_i]
# Is the nucleotide ambiguous.
g_nt = [g_nts[g_i][0], g_nts[g_i][1]]
if tuple(g_nt) in util.ambig_nts:
num_ambig_nts += 1
continue
# First check if nucleotide is sane?
if (not g_nt[0] in util.valid_nts) or (not g_nt[1] in util.valid_nts):
num_non_matching_nts += 1
continue
os_g_nt = sp.array(
[util.opp_strand_dict[g_nt[0]], util.opp_strand_dict[g_nt[1]]])
flip_nts = False
#Coordination is a bit more complicate when validation genotypes are provided..
if not(sp.all(g_nt == ss_nt) or sp.all(os_g_nt == ss_nt)):
flip_nts = (g_nt[1] == ss_nt[0] and g_nt[0] == ss_nt[1]) or (
os_g_nt[1] == ss_nt[0] and os_g_nt[0] == ss_nt[1])
# Try flipping the SS nt
if flip_nts:
tot_num_flipped_nts +=1
betas[ss_i] = -betas[ss_i]
log_odds[ss_i] = -log_odds[ss_i]
if 'freqs' in ssg and ss_freqs[ss_i]>0:
ss_freqs[ss_i] = 1.0 - ss_freqs[ss_i]
else:
if debug:
print("Nucleotides don't match after all?: g_sid=%s, ss_sid=%s, g_i=%d, ss_i=%d, g_nt=%s, ss_nt=%s" % \
(g_sids[g_i], ss_sids[ss_i], g_i,
ss_i, str(g_nt), str(ss_nt)))
num_non_matching_nts += 1
continue
# everything seems ok.
ok_indices['g'].append(g_i)
ok_indices['ss'].append(ss_i)
ok_nts.append(g_nt)
if debug:
print('%d SNPs had ambiguous nucleotides.' % num_ambig_nts)
print('%d SNPs were excluded due to nucleotide issues.' % num_non_matching_nts)
# Resorting by position
positions = sp.array(chrom_d['positions'])[ok_indices['g']]
# Now parse SNPs ..
snp_indices = sp.array(chrom_d['snp_indices'])
# Pinpoint where the SNPs are in the file.
snp_indices = snp_indices[ok_indices['g']]
raw_snps, freqs = plinkfiles.parse_plink_snps(
reference_genotype_file, snp_indices)
snp_stds = get_snp_stds(raw_snps)
snp_means = sp.mean(raw_snps, axis=1, dtype='float32')
betas = betas[ok_indices['ss']]
log_odds = log_odds[ok_indices['ss']]
ns = ssg['ns'][...][ok_indices['ss']]
ps = ssg['ps'][...][ok_indices['ss']]
nts = sp.array(ok_nts)
sids = (ssg['sids'][...]).astype(util.sids_u_dtype)
sids = sids[ok_indices['ss']]
#Storing everything in a dictionary
coord_data_dict = {'chrom': 'chrom_%d' % chrom,
'raw_snps_ref': raw_snps,
'snp_stds_ref': snp_stds,
'snp_means_ref': snp_means,
'freqs_ref': freqs,
'ps': ps,
'ns': ns,
'positions': positions,
'nts': nts,
'sids': sids,
'betas': betas,
'log_odds': log_odds}
#Parse validation genotypes, if available
if validation_genotype_file is not None:
snp_indices_val = sp.array(chrom_d_val['snp_indices'])
# Pinpoint where the SNPs are in the file.
snp_indices_val = snp_indices_val[ok_indices['vg']]
raw_snps_val, freqs_val = plinkfiles.parse_plink_snps(
validation_genotype_file, snp_indices_val)
snp_stds_val = get_snp_stds(raw_snps_val)
snp_means_val = freqs_val * 2
coord_data_dict['raw_snps_val']=raw_snps_val
coord_data_dict['snp_stds_val']=snp_stds_val
coord_data_dict['snp_means_val']=snp_means_val
coord_data_dict['freqs_val']=freqs_val
# Check SNP frequencies, screen for possible problems..
if max_freq_discrep<1 and 'freqs' in ssg:
ss_freqs = ss_freqs[ok_indices['ss']]
ok_freq_snps = sp.logical_or(sp.absolute(ss_freqs - freqs) < max_freq_discrep,sp.absolute(ss_freqs + freqs-1) < max_freq_discrep) #Array of np.bool values
ok_freq_snps = sp.logical_or(ok_freq_snps,ss_freqs<=0) #Only consider SNPs that actually have frequencies
num_freq_discrep_filtered_snps = len(ok_freq_snps)- sp.sum(ok_freq_snps)
assert num_freq_discrep_filtered_snps>=0, "Problems when filtering SNPs with frequency discrepencies"
if num_freq_discrep_filtered_snps>0:
# Filter freq_discrepancy_snps
filter_coord_data(coord_data_dict, ok_freq_snps)
if debug:
print('Filtered %d SNPs due to frequency discrepancies'%num_freq_discrep_filtered_snps)
# Filter minor allele frequency SNPs.
if min_maf>0:
maf_filter = (freqs > min_maf) * (freqs < (1 - min_maf))
num_maf_filtered_snps = len(maf_filter)-sp.sum(maf_filter)
assert num_maf_filtered_snps>=0, "Problems when filtering SNPs with low minor allele frequencies"
if num_maf_filtered_snps>0:
filter_coord_data(coord_data_dict, maf_filter)
if debug:
print('Filtered %d SNPs due to low MAF'%num_maf_filtered_snps)
# Filter any monomorphic SNPs
monomorphic_filter = coord_data_dict['snp_stds_ref'] > 0
num_monomorphic_filtered_snps = len(monomorphic_filter)-sp.sum(monomorphic_filter)
assert num_monomorphic_filtered_snps>=0, "Problems when filtering monomorphic SNPs"
if num_monomorphic_filtered_snps>0:
filter_coord_data(coord_data_dict, monomorphic_filter)
if debug:
print('Filtered %d SNPs due to being monomorphic in LD reference'%num_monomorphic_filtered_snps)
if validation_genotype_file is not None:
maf_adj_prs = sp.dot(log_odds, raw_snps_val)
if debug and plinkf_dict_val['has_phenotype']:
maf_adj_corr = sp.corrcoef(plinkf_dict_val['phenotypes'], maf_adj_prs)[0, 1]
print('Log odds, per genotype PRS correlation w phenotypes for chromosome %d was %0.4f' % (chrom, maf_adj_corr))
maf_adj_risk_scores += maf_adj_prs
coord_data_dict['log_odds_prs']=maf_adj_prs
genetic_map = []
if genetic_map_dir is not None:
with gzip.open(genetic_map_dir + 'chr%d.interpolated_genetic_map.gz' % chrom) as f:
for line in f:
l = line.split()
# if l[0] in sid_set:
# genetic_map.append(l[0])
else:
genetic_map = None
coord_data_dict['genetic_map'] = genetic_map
write_coord_data(cord_data_g, coord_data_dict, debug=debug)
if debug:
print('%d SNPs were retained on chromosome %d.' % (len(sids), chrom))
#Update counters
num_snps_common_before_filtering += len(common_sids)
num_snps_common_after_filtering += len(sids)
tot_num_ambig_nts += num_ambig_nts
tot_num_non_supported_nts += num_non_supported_nts
tot_num_non_matching_nts += num_non_matching_nts
tot_num_freq_discrep_filtered_snps += num_freq_discrep_filtered_snps
tot_num_maf_filtered_snps += num_maf_filtered_snps
if not debug:
sys.stdout.write('\r%0.2f%%\n' % (100.0))
sys.stdout.flush()
# Now calculate the prediction r^2
if validation_genotype_file:
if debug and plinkf_dict_val['has_phenotype']:
maf_adj_corr = sp.corrcoef(
plinkf_dict_val['phenotypes'], maf_adj_risk_scores)[0, 1]
print('Log odds, per PRS correlation for the whole genome was %0.4f (r^2=%0.4f)' % (maf_adj_corr, maf_adj_corr ** 2))
print('Overall nucleotide concordance counts: rg_vg: %d, rg_ss: %d, vg_ss: %d' % (tot_g_vg_nt_concord_count, tot_g_ss_nt_concord_count, tot_vg_ss_nt_concord_count))
else:
if debug:
print('Overall nucleotide concordance counts, rg_ss: %d' % (tot_g_ss_nt_concord_count))
summary_dict[7]={'name':'Num chromosomes used:','value':len(chromosomes_found)}
summary_dict[8]={'name':'SNPs common across datasets:','value':num_snps_common_before_filtering}
if tot_num_non_supported_nts>0 or debug:
summary_dict[8.1]={'name':'SNPs w flipped alleles','value':tot_num_flipped_nts}
summary_dict[9]={'name':'SNPs retained after filtering:','value':num_snps_common_after_filtering}
if tot_num_ambig_nts>0 or debug:
summary_dict[10]={'name':'SNPs w ambiguous nucleotides filtered:','value':tot_num_ambig_nts}
if tot_num_non_supported_nts>0 or debug:
summary_dict[10.1]={'name':'SNPs w unknown/unsupported nucleotides filtered:','value':tot_num_non_supported_nts}
if tot_num_non_matching_nts>0 or debug:
summary_dict[11]={'name':'SNPs w other nucleotide discrepancies filtered:','value':tot_num_non_matching_nts}
if min_maf>0 or debug:
summary_dict[12]={'name':'SNPs w MAF<%0.3f filtered:'%min_maf,'value':tot_num_maf_filtered_snps}
if max_freq_discrep<0.5 or debug:
summary_dict[13]={'name':'SNPs w allele freq discrepancy > %0.3f filtered:'%max_freq_discrep,'value':tot_num_freq_discrep_filtered_snps}
t1 = time.time()
t = (t1 - t0)
summary_dict[13.9]={'name':'dash', 'value':'Running times'}
summary_dict[15]={'name':'Run time for coordinating datasets:','value': '%d min and %0.2f sec'%(t / 60, t % 60)}
def main(p_dict):
bimfile = None
if p_dict['vbim'] is not None:
bimfile = p_dict['vbim']
elif p_dict['vgf'] is not None:
bimfile = p_dict['vgf'] + '.bim'
elif p_dict['gf'] is not None:
bimfile = p_dict['gf'] + '.bim'
else:
print('Set of validation SNPs is missing! Please specify either a validation PLINK genotype file, ' \
'or a PLINK BIM file with the SNPs of interest.')
if os.path.isfile(p_dict['out']):
print('Output file (%s) already exists! Delete, rename it, or use a different output file.'\
% (p_dict['out']))
raise Exception('Output file already exists!')
h5f = h5py.File(p_dict['out'], 'w')
summary_dict = {}
summary_dict[0]={'name':'Summary statistics filename:','value':p_dict['ssf']}
summary_dict[1]={'name':'LD reference genotypes filename:','value':p_dict['gf']}
summary_dict[3]={'name':'Coordinated data output filename:','value':p_dict['out']}
if p_dict['vgf'] is not None:
summary_dict[2]={'name':'Validation genotypes filename:','value':p_dict['vgf']}
sum_stats_parsers.parse_sum_stats(h5f, p_dict, bimfile, summary_dict)
coordinate_datasets(p_dict['gf'], h5f,summary_dict,
validation_genotype_file=p_dict['vgf'],
max_freq_discrep=p_dict['max_freq_discrep'],
min_maf=p_dict['maf'],
skip_coordination=p_dict['skip_coordination'],
debug=p_dict['debug'])
write_parameter_data(p_dict, h5f, debug=False)
h5f.close()
reporting.print_summary(summary_dict, 'Summary of coordination step')
return summary_dict
|
|
#!/usr/bin/env python
"""
objcgo: Cgo (Go lang) wrapper interfaces generattor for Objective-C
"""
import re
from clang.cindex import CursorKind, TypeKind
def get_node_by_kind(kind, node):
cs = filter_kind(kind, node)
assert(len(cs) < 2)
return cs[0] if len(cs) > 0 else None
def filter_kind(kind, node):
return filter_node(lambda c:c.kind == kind, node)
def filter_node(fn, node):
return filter(fn, node.get_children())
def get_info(node, depth=0):
children = [get_info(c, depth+1) for c in node.get_children()]
return { #'id' : get_cursor_id(node),
'enc': node.objc_type_encoding,
'type' : node.type.is_pod(),
'kind' : node.kind,
'usr' : node.get_usr(),
'spelling' : node.spelling,
'disp' : node.displayname,
'is_definition' : node.is_definition(),
'children' : children }
class Typename:
cgo_unacceptable = None
def __init__(self, raw):
self._raw = str(raw)
def __repr__(self):
return self._raw
@staticmethod
def new(node):
cref = get_node_by_kind(CursorKind.OBJC_CLASS_REF, node)
tref = get_node_by_kind(CursorKind.TYPE_REF, node)
if cref: return ObjcClassType(cref.displayname)
if tref: return CType(tref.displayname, False) # FIXME: check const
# Some PARM_DECLs and OBJC_INSTANCE_METHOD_DECLs have no children to detect typename.
# In this case we get a typename information from Objective-C's type encoding.
# see more details in https://developer.apple.com/library/ios/documentation/Cocoa/Conceptual/ObjCRuntimeGuide/Articles/ocrtTypeEncodings.html
enc = node.objc_type_encoding
# Strip all except characters of information of a return type when the kind of the node is OBJC_INSTANCE_METHOD_DECL.
if node.kind == CursorKind.OBJC_INSTANCE_METHOD_DECL:
# We assume that this encode contains '@0:8' for instance methods and does not bit field.
m = re.match('^([^0-9]+)\d+@0:8', enc)
assert(m)
enc = m.group(1)
# strip all additional Objective-C method encodings like `r` or 'n'.
is_const = False
m = re.match('^([rnNoORV]+)(.+)', enc)
if m:
enc = m.group(2)
is_const = 'r' in m.group(1)
encode_map = {
'c': 'char',
'i': 'int',
's': 'short',
'l': 'long',
'q': 'long long',
'C': 'unsigned char',
'I': 'unsigned int',
'S': 'unsigned short',
'L': 'unsigned long',
'Q': 'unsigned long long',
'f': 'float',
'd': 'double',
'B': 'bool',
'*': 'char*',
#'#': 'Class',
#':': 'SEL',
'^v': 'void*' # FIXME
}
if enc == 'v':
return VoidType()
elif enc in encode_map:
return CType(encode_map[enc], is_const)
elif enc == '@':
return ObjcClassType('NSObject')
#print enc, node.displayname
return InvalidType()
@property
def is_cgo_acceptable(self):
return not Typename.is_reject(self._raw)
@staticmethod
def is_reject(raw):
if not Typename.cgo_unacceptable:
Typename.cgo_unacceptable = set([
'va_list',
'unichar',
'SEL',
'IMP',
'Class',
'CGFloat',
'AEDesc',
'AppleEvent',
'AEEventID',
'AEEventClass',
'NSAppleEventManagerSuspensionID',
'NSMethodSignature',
'NSInvocation',
'NSRange',
'NSInteger',
#'NSUInteger',
#'BOOL',
'NSComparisonResult',
'NSLocale',
'NSZone',
'NSStringEncoding',
'NSURLBookmarkCreationOptions',
'NSStringCompareOptions',
'NSTimeInterval',
'NSDecimal',
# NSProxy
'NSProxy',
'NSProtocolChecker',
'NSDistantObject',
# deprecated classes
'NSURLHandle',
'NSURLHandleStatus'
])
return raw in Typename.cgo_unacceptable
@property
def raw(self):
return self._raw
@property
def is_void(self):
return False
@property
def objc_class(self):
return False
def box_value_go(self, value):
#return ret_type + '_' + '(Id(C.' + clazz.raw + '_' + self.name.to_c() + '(' + args_str + ')))'
pass
def to_return_c(self): #FIXME
return self.to_param_c()
def to_param_c(self): #FIXME
return self._raw
def to_go(self, with_package=False):
r = self._raw
#if r == 'id': r = 'NSObject'
if not with_package: return r
return r # FIXME
class InvalidType(Typename):
def __init__(self):
Typename.__init__(self, '*INVALID TYPE*')
@property
def is_cgo_acceptable(self):
return False
class VoidType(Typename):
def __init__(self):
Typename.__init__(self, '*VOID*')
@property
def raw(self): # FIXME
raise AssertionError('Should not be called.')
@property
def is_void(self):
return True
@property
def is_cgo_acceptable(self):
return True
def to_param_c(self):
return 'void'
def to_go(self):
return ''
class CType(Typename):
go_type_map = {
'id': 'Id',
'void*': 'unsafe.Pointer',
'bool': 'C.bool',
'BOOL': 'C.bool',
'float': 'C.float',
'double': 'C.double',
'char': 'C.char',
'short': 'C.short',
'int': 'C.int',
'long': 'C.long',
'long long': 'C.longlong',
'unsigned char': 'C.uchar',
'unsigned short': 'C.ushort',
'unsigned int': 'C.uint',
'unsigned long': 'C.ulong',
'unsigned long long': 'C.ulonglong',
'char*': 'string',
'NSRect': 'NSRect',
'NSPoint': 'NSPoint',
'NSUInteger': 'C.uint',
'NSBackingStoreType': 'C.NSBackingStoreType'
}
def __init__(self, raw, is_const):
Typename.__init__(self, raw)
self.is_const = is_const
@property
def is_cgo_acceptable(self):
return (not Typename.is_reject(self.raw)) and (self.raw in set(CType.go_type_map.keys()) or self.raw in Enum.declared_enumtypes or Typedef.get(self.raw))
def box_value_go(self, value):
#return ret_type + '_' + '(Id(C.' + clazz.raw + '_' + self.name.to_c() + '(' + args_str + ')))'
pass
def to_param_c(self):
type_map = {
'id': 'void*',
'BOOL': 'bool',
'NSUInteger': 'uint',
}
r = self._raw
if r in type_map:
r = type_map[r]
return ('const ' if self.is_const else '') + r
def to_go(self):
r = self._raw
if r in CType.go_type_map:
return CType.go_type_map[r]
return 'C.' + r
class ObjcClassType(Typename):
used_classes = set()
def __init__(self, raw):
if len(raw) == 0:
raise AssertionError('empty string')
Typename.__init__(self, raw)
ObjcClassType.used_classes.add(self.raw)
@property
def objc_class(self):
return True
def to_return_c(self):
return 'void*'
def to_param_c(self):
return 'void*'
class Identifier:
def __init__(self, raw):
self._raw = str(raw)
def __repr__(self):
return self._raw
@property
def raw(self):
return self._raw
class MethodName(Identifier):
def __init__(self, raw):
Identifier.__init__(self, raw)
# getRed:green:blue:alpha: -> getRedGreenBlueAlpha
@property
def _to_camel(self):
return reduce(lambda a,x:a + x[0].upper() + x[1:], filter(lambda x:x!="", self._raw.split(":")))
def to_c(self):
return self._to_camel
def to_go(self):
r = self._to_camel
return r[0].upper() + r[1:]
class ParamName(Identifier):
def __init__(self, raw):
assert(not ':' in raw)
Identifier.__init__(self, raw)
def to_c(self):
return self._raw
def to_go(self):
r = self._raw
if r in set(['type', 'range', 'map', 'make']): return r + '_' # convert to un-reserved word for Go
return r
class PropName(Identifier):
def __init__(self, raw):
assert(not ':' in raw)
Identifier.__init__(self, raw)
# value -> setValue:
def to_setter_selector(self):
return 'set' + self._raw[0].upper() + self._raw[1:] + ':'
class Base:
def __init__(self, node):
self.node = node
class Interface(Base):
declared_classes = set()
def __init__(self, node):
def self_typename(self):
# If current node is a OBJC_CATEGORY_DECL, the displayname of the node is a category name.
# So we fix the interface name by using 'get_usr()' which returns a string containg an interface name.
if node.kind == CursorKind.OBJC_CATEGORY_DECL:
# ignore deprecated categories
if 'Deprecated' in node.displayname: return None
m = re.match("c:objc\((cy|ext)\)([^@]+).+", node.get_usr())
assert(m)
return ObjcClassType(m.group(2))
return ObjcClassType(node.displayname)
def super_typename(self):
c = get_node_by_kind(CursorKind.OBJC_SUPER_CLASS_REF, self.node)
return ObjcClassType(c.displayname) if c else None
def bind(func, val):
return lambda a: func(a, val)
Base.__init__(self, node)
self.typename = self_typename(self)
self.super_typename = super_typename(self)
# return if deprecated class
if not self.typename: return
self.props = map(Property , filter_kind(CursorKind.OBJC_PROPERTY_DECL, node))
self.methods = map(bind(InstanceMethod, self.typename), filter_kind(CursorKind.OBJC_INSTANCE_METHOD_DECL, node))
self.class_methods = map(bind(ClassMethod , self.typename), filter_kind(CursorKind.OBJC_CLASS_METHOD_DECL, node))
map(lambda x:self.link_accessors(x), self.props)
Interface.declared_classes.add(self.typename.raw)
#def __repr__(self):
# return self.name + (' ' + self.super_typename if self.super_typename else '')
def link_accessors(self, prop):
def get_setter_selector(name):
return 'set' + name[0].upper() + name[1:] + ':'
getters = filter(lambda x: prop.name.raw == x.name.raw, self.methods)
setters = filter(lambda x: prop.name.to_setter_selector() == x.name.raw, self.methods)
assert(len(getters) <= 1)
assert(len(setters) <= 1)
if getters: getters[0].set_as_getter(prop)
if setters: setters[0].set_as_setter(prop)
def compile_c(self):
if not self.typename.is_cgo_acceptable: return '\n// ' + self.typename.raw + '\n'
s = ['', '////' + self.typename.raw]
# force remove 'init' from NSObject
if self.typename.raw == 'NSObject':
self.methods = filter(lambda x: x.name.raw != 'init', self.methods)
# output init (default ctor)
init = filter(lambda x: x.name.raw == 'init', self.methods)
assert(len(init) <= 1)
if len(init) == 0:
s.append('void* ' + self.typename.raw + '_init() {')
s.append(' return [[' + self.typename.raw + ' alloc] init];')
s.append('}')
# output other methods
s.append('\n'.join(map(lambda x:x.compile_c(), self.methods)))
s.append('\n'.join(map(lambda x:x.compile_c(), self.class_methods)))
return '\n'.join(s)
def compile_go(self):
if not self.typename.is_cgo_acceptable: return '\n'
s = []
# output struct
s.append('type ' + self.typename.raw + ' struct {')
if self.super_typename:
s.append(' ' + self.super_typename.raw)
else:
s.append(' self Id')
s.append('}')
# output boxing method
s.append('func ' + self.typename.raw + '_(i Id) ' + self.typename.raw + ' {')
if self.super_typename:
s.append(' return ' + self.typename.raw + '{ ' + self.super_typename.to_go() + '_(i) }')
elif self.typename.raw == 'NSObject':
s.append(' return NSObject{ i }')
else:
s.append(' return null') # FIXME
s.append('}')
# output init (default ctor)
init = filter(lambda x: x.name.raw == 'init', self.methods)
assert(len(init) <= 1)
if len(init) == 0:
s.append('func ' + self.typename.raw + '_init() ' + self.typename.raw + ' {')
s.append(' p := ' + 'Id(C.' + self.typename.raw + '_init())')
s.append(' return ' + self.typename.raw + '_(p)')
s.append('}')
# output other methods
s.append('\n'.join(map(lambda x:x.compile_go(), self.methods)))
s.append('\n'.join(map(lambda x:x.compile_go(), self.class_methods)))
return '\n'.join(s)
class Property(Base): # FIXME
def __init__(self, node):
Base.__init__(self, node)
self.typename = Typename.new(self.node)
self.name = PropName(self.node.displayname)
assert(self.typename)
def __repr__(self):
p = '*' if self.node.type.kind == TypeKind.OBJCOBJECTPOINTER else ''
return self.typename + p + ' ' + self.name
class Method(Base):
unacceptalble_methods = set([
# unavaliable
'NSNetService_setIncludesPeerToPeer',
'NSNetServiceBrowser_includesPeerToPeer',
'NSNetServiceBrowser_setIncludesPeerToPeer',
'NSURLSessionConfiguration_setDiscretionary',
'NSNetService_includesPeerToPeer',
'NSURLSessionConfiguration_sessionSendsLaunchEvents',
'NSURLSessionConfiguration_setSessionSendsLaunchEvents',
# deprecated
'NSObject_allowsWeakReference',
'NSObject_retainWeakReference',
'NSObject_URLResourceDataDidBecomeAvailable',
'NSObject_URLResourceDidFinishLoading',
'NSObject_URLResourceDidCancelLoading',
'NSObject_URLResourceDidFailLoadingWithReason',
# un-compilable
'NSURLSessionConfiguration_isDiscretionary',
# black
'NSEvent__addLocalMonitorForEventsMatchingMaskHandler',
'NSExpression_expressionBlock',
'NSArray_indexOfObjectPassingTest',
'NSArray_indexesOfObjectsPassingTest',
'NSDictionary_keysOfEntriesPassingTest',
'NSFileManager_enumeratorAtURLIncludingPropertiesForKeysOptionsErrorHandler',
'NSIndexSet_indexPassingTest',
'NSIndexSet_indexesPassingTest',
'NSOrderedSet_indexOfObjectPassingTest',
'NSOrderedSet_indexesOfObjectsPassingTest',
'NSSet_objectsPassingTest',
'NSPredicate__predicateWithBlock',
])
def __init__(self, node, class_typename, is_static):
Base.__init__(self, node)
self.name = MethodName(self.node.displayname)
self.return_typename = Typename.new(self.node)
self.class_typename = class_typename
self.is_static = is_static
# if return_typename is InvalidType, we change it to VoidType
if isinstance(self.return_typename, InvalidType):
#print 'W:', '+' if is_static else '-', class_typename, self.name
self.return_typename = VoidType()
m = self.name.raw
self.is_ctor = m == 'init' or len(m) > 8 and m[:8] == 'initWith'
self.is_getter = False
self.is_setter = False
self.params = map(Parametor, filter_kind(CursorKind.PARM_DECL, node))
# overwrite return_typename if ctor, because the one is id or instancetype.
if self.is_ctor: self.return_typename = self.class_typename
def __repr__(self):
return str(self.return_typename) + ' ' + str(self.name)
@property
def is_cgo_acceptable(self):
#if self.class_typename.raw + '/' + self.name.raw in Method.unacceptalble_methods: return False
if self._funcname_c() in Method.unacceptalble_methods: return False
if any(map(lambda x:not x.typename.is_cgo_acceptable, self.params)): return False
if self.is_ctor: return True # FIXME: force True
return self.return_typename.is_cgo_acceptable
def get_cgo_rejected_reason(self):
if self._funcname_c() in Method.unacceptalble_methods: return 'unacceptalble-method'
rejected = [] if self.return_typename.is_cgo_acceptable else [self.return_typename]
rejected.extend(map(lambda x:x.name, filter(lambda x:not x.typename.is_cgo_acceptable, self.params)))
return 'REJECT: ' + ' '.join(map(lambda x:str(x), rejected))
def set_as_getter(self, prop):
assert(len(self.params) == 0)
self.is_getter = True
self.prop = prop
self.return_typename = prop.typename
if not self.return_typename:
self.return_typename = self.prop.typename
def set_as_setter(self, prop):
assert(len(self.params) == 1)
self.is_setter = True
self.prop = prop
self.params[0].typename = prop.typename
def _funcname_c(self):
return self.class_typename.raw + ('__' if self.is_static else '_') + self.name.to_c()
def compile_c(self):
is_static = self.is_ctor or self.is_static
s = []
params = map(lambda x:x.to_param_c(), self.params)
if not is_static: params.insert(0, 'void* goobj')
params_str = ', '.join(params)
# This program currently can not handle methods return block object.
# So replaces are skipped in these cases.
if self.name.raw.count(':') == len(self.params):
args_str = (self.name.raw.replace(':', ':%s ') % tuple(map(lambda x:x.to_arg_c(), self.params))).strip()
else:
args_str = self.name.raw
s.append(self.return_typename.to_return_c() + ' ' + self._funcname_c() + '(' + params_str + ') {')
if self.is_static:
if self.is_ctor:
s.append(' return [' + self.class_typename.raw + ' ' + args_str + '];')
raise 'foobar'
elif not self.return_typename.is_void:
s.append(' return [' + self.class_typename.raw + ' ' + args_str + '];')
else:
s.append(' [' + self.class_typename.raw + ' ' + args_str + '];')
else:
if self.is_ctor:
s.append(' return [[' + self.class_typename.raw + ' alloc] ' + args_str + '];')
elif not self.return_typename.is_void:
s.append(' return [(' + self.class_typename.raw + '*)goobj ' + args_str + '];')
else:
s.append(' [(' + self.class_typename.raw + '*)goobj ' + args_str + '];')
s.append('}')
if self.is_cgo_acceptable:
return '\n'.join(s)
else:
return ('//' + self.get_cgo_rejected_reason() + '\n') + '//' + '\n//'.join(s)
def compile_go(self):
is_static = self.is_ctor or self.is_static
ret_type = self.return_typename.to_go()
params_str = ', '.join(map(lambda x:x.to_param_go(), self.params))
args = map(lambda x:x.to_arg_go(), self.params)
if not is_static: args.insert(0, 'goobj.Self()')
args_str = ', '.join(args)
instance = '' if is_static else '(goobj ' + self.class_typename.to_go() + ') '
funcname = self.name.to_go()
if is_static:
funcname = self.class_typename.raw + '_' + funcname[0].lower() + funcname[1:]
s = ['func ' + instance + funcname + '(' + params_str + ') ' + ret_type + ' {']
if self.is_static:
if self.return_typename.is_void:
s.append(' C.' + self._funcname_c() + '(' + args_str + ')')
elif self.return_typename.objc_class:
s.append(' return ' + ret_type + '_(Id(C.' + self._funcname_c() + '(' + args_str + ')))')
elif ret_type == 'NSRect' or ret_type == 'NSPoint' or ret_type == 'Id':
s.append(' return ' + ret_type + '_(C.' + self._funcname_c() + '(' + args_str + '))')
else:
s.append(' return ' + '(C.' + self._funcname_c() + '(' + args_str + '))')
else:
if self.is_ctor or not self.return_typename.is_void or self.is_getter:
if self.return_typename.objc_class:
s.append(' return ' + ret_type + '_' + '(Id(C.' + self._funcname_c() + '(' + args_str + ')))')
elif self.return_typename.raw == 'char*':
s.append(' return C.GoString(C.' + self._funcname_c() + '(' + args_str + '))')
elif ret_type == 'NSRect' or ret_type == 'NSPoint' or ret_type == 'Id':
s.append(' return ' + ret_type + '_(C.' + self._funcname_c() + '(' + args_str + '))')
else:
s.append(' return ' + '(C.' + self._funcname_c() + '(' + args_str + '))')
else:
s.append(' C.' + self._funcname_c() + '(' + args_str + ')')
s.append('}')
if self.is_cgo_acceptable:
return '\n'.join(s)
else:
return ('//' + self.get_cgo_rejected_reason() + '\n') + '//' + '\n//'.join(s)
class InstanceMethod(Method):
def __init__(self, node, class_typename = None):
Method.__init__(self, node, class_typename, False)
class ClassMethod(Method):
def __init__(self, node, class_typename = None):
Method.__init__(self, node, class_typename, True)
class Parametor(Base):
def __init__(self, node):
Base.__init__(self, node)
self.typename = Typename.new(self.node)
self.name = ParamName(self.node.displayname)
def __repr__(self):
return str(self.typename) + '/' + str(self.name)
def to_arg_c(self):
name = self.typename.raw
if not self.typename: return 'FIXME' # FIXME
if self.typename and self.typename.objc_class:
if self.typename.raw == 'NSError':
return '(' + self.typename.raw + '**)&' + self.name.to_c()
else:
return '(' + self.typename.raw + '*)' + self.name.to_c()
if self.typename: return self.name.to_c()
return 'FIXME' # FIXME
def to_param_c(self):
if not self.typename: return 'FIXMEx' # FIXME
return self.typename.to_param_c() + ' ' + self.name.to_c()
def to_arg_go(self):
name = self.name.to_go()
if not self.typename: return 'FIXMEz' # FIXME
if self.typename and self.typename.objc_class: return name + '.Self()'
if self.typename.raw == 'id': return 'unsafe.Pointer(' + name + ')'
if self.typename.raw == 'char*': return 'C.CString(' + name + ')'
if self.typename.raw == 'NSRect': return 'C.CGRectMake(C.CGFloat(%s.X), C.CGFloat(%s.Y), C.CGFloat(%s.Width), C.CGFloat(%s.Height))' % (name, name, name, name)
if self.typename.raw == 'NSPoint': return 'C.CGPointMake(C.CGFloat(%s.X), C.CGFloat(%s.Y))' % (name, name)
if self.typename: return name
return 'FIXME' # FIXME
def to_param_go(self):
name = self.name.to_go()
if not self.typename: return 'FIXMEy' # FIXME
if self.typename.raw == 'id': return name + ' Id'
if self.typename.raw == 'char*': return name + ' string'
if self.typename.raw == 'void*': return name + ' unsafe.Pointer'
return name + ' ' + self.typename.to_go()
class Enum(Base):
declared_enumtypes = set()
deprecated = set([
'NSDataWritingFileProtectionNone',
'NSDataWritingFileProtectionComplete',
'NSDataWritingFileProtectionCompleteUnlessOpen',
'NSDataWritingFileProtectionCompleteUntilFirstUserAuthentication',
'NSDataWritingFileProtectionMask',
'NSURLErrorCancelledReasonUserForceQuitApplication',
'NSURLErrorCancelledReasonBackgroundUpdatesDisabled',
])
def __init__(self, node):
self.name = node.displayname # FIXME: Typename?
self.constants = filter(lambda x:not x in Enum.deprecated, map(lambda x:x.displayname, filter_kind(CursorKind.ENUM_CONSTANT_DECL, node)))
if len(self.name) > 0:
Enum.declared_enumtypes.add(self.name)
class Typedef(Base):
declared_typedefs = {}
deprecated = set([
])
def __init__(self, node):
self.typename = node.displayname # FIXME: Typename?
self.desttype = Typename.new(node)
@staticmethod
def add(node):
td = Typedef(node)
if isinstance(td.desttype, InvalidType):
Typedef.declared_typedefs[td.typename] = td
return td
@staticmethod
def get(ident):
if not ident in Typedef.declared_typedefs: return None
return Typedef.declared_typedefs[ident]
def parse_root(node):
if node.kind == CursorKind.TRANSLATION_UNIT:
(interfaces, enums) = parse_translation_unit(node)
print '''package sample
/*
#cgo CFLAGS: -x objective-c -I../../objc -I../../out
#cgo LDFLAGS: -framework Foundation -framework AppKit
#import <Cocoa/Cocoa.h>
#import <objc/message.h>
#import <objc/runtime.h>
// runtime
const char* CCG_object_getClassName(void* px) {
return object_getClassName(px);
}
// NSObject
const char* NSObject_descripton(void* p) {
return [[(id)p description] UTF8String];
}
'''
print ''.join(map(lambda x:x.compile_c()+'\n', interfaces))
print '\n\n'
print '''*/
import "C"
import "unsafe"
'''
# output enum constants
# print 'const ('
# for e in enums:
# for i in e.constants:
# print ' ' + i + ' = C.' + i
# print ')'
#
print '''
type Id unsafe.Pointer
func Id_(r unsafe.Pointer) Id {
return Id(r)
}
///// struct for Go
type NSRect struct {
X float64
Y float64
Width float64
Height float64
}
func NSRect_(r C.NSRect) NSRect {
return NSRect{float64(r.origin.x), float64(r.origin.y), float64(r.size.width), float64(r.size.height)}
}
type NSPoint struct {
X float64
Y float64
}
func NSPoint_(r C.NSPoint) NSPoint {
return NSPoint{float64(r.x), float64(r.y)}
}
///// additional for Go
func (obj NSObject) Self() unsafe.Pointer {
return unsafe.Pointer(obj.self)
}
func (obj NSObject) String() string {
return C.GoString(C.NSObject_descripton(obj.Self()))
}
func (obj NSObject) GetClassName() string {
p := C.CCG_object_getClassName(obj.Self())
return C.GoString(p)
}
///// END
'''
#
print ''.join(map(lambda x:x.compile_go()+'\n', interfaces))
print '\n'
# create skelton implementations of interfaces that have no interface declaration.
for i in ObjcClassType.used_classes.difference(Interface.declared_classes):
print '''type %s struct {
NSObject
}
func %s_(i Id) %s {
return %s{ NSObject_(i) }
}
''' % (i,i,i,i)
def parse_translation_unit(node):
map(Typedef.add, filter_kind(CursorKind.TYPEDEF_DECL, node))
enums = map(Enum, filter_kind(CursorKind.ENUM_DECL, node))
interfaces = filter(lambda x:x.typename, map(Interface, filter_kind(CursorKind.OBJC_INTERFACE_DECL, node)))
# merge category's methods into classes
categories = filter(lambda x:x.typename, map(Interface, filter_kind(CursorKind.OBJC_CATEGORY_DECL, node)))
for c in categories: # FIXME: create Category class?
for i in interfaces:
if i.typename.raw == c.typename.raw:
i.methods.extend(c.methods)
# FIXME: add class methods, props
return (interfaces, enums)
def create_go_source(node):
parse_root(node)
def main():
from clang.cindex import Index
from pprint import pprint
from optparse import OptionParser, OptionGroup
# TODO: global opts
parser = OptionParser("usage: %prog [options] {filename} [clang-args*]")
parser.disable_interspersed_args()
(opts, args) = parser.parse_args()
if len(args) > 0:
args.append('-c')
args.append('-ObjC')
args.append('-m64')
args.append('-fobjc-arc')
tu = Index.create().parse(None, args)
if tu:
create_go_source(tu.cursor)
else:
parser.error("unable to load input")
else:
parser.error('invalid number arguments')
if __name__ == '__main__':
main()
|
|
"""
Python-Markdown Extension Regression Tests
==========================================
A collection of regression tests to confirm that the included extensions
continue to work as advertised. This used to be accomplished by doctests.
"""
from __future__ import unicode_literals
import unittest
import zmarkdown
class TestExtensionClass(unittest.TestCase):
""" Test markdown.extensions.Extension. """
def setUp(self):
class TestExtension(zmarkdown.extensions.Extension):
config = {
'foo': ['bar', 'Description of foo'],
'bar': ['baz', 'Description of bar']
}
self.ext = TestExtension()
self.ExtKlass = TestExtension
def testGetConfig(self):
self.assertEqual(self.ext.getConfig('foo'), 'bar')
def testGetConfigDefault(self):
self.assertEqual(self.ext.getConfig('baz'), '')
self.assertEqual(self.ext.getConfig('baz', default='missing'), 'missing')
def testGetConfigs(self):
self.assertEqual(self.ext.getConfigs(), {'foo': 'bar', 'bar': 'baz'})
def testGetConfigInfo(self):
self.assertEqual(
dict(self.ext.getConfigInfo()),
dict([
('foo', 'Description of foo'),
('bar', 'Description of bar')
])
)
def testSetConfig(self):
self.ext.setConfig('foo', 'baz')
self.assertEqual(self.ext.getConfigs(), {'foo': 'baz', 'bar': 'baz'})
def testSetConfigWithBadKey(self):
# self.ext.setConfig('bad', 'baz) ==> KeyError
self.assertRaises(KeyError, self.ext.setConfig, 'bad', 'baz')
def testConfigAsKwargsOnInit(self):
ext = self.ExtKlass(foo='baz', bar='blah')
self.assertEqual(ext.getConfigs(), {'foo': 'baz', 'bar': 'blah'})
class TestAbbr(unittest.TestCase):
""" Test abbr extension. """
def setUp(self):
self.md = zmarkdown.ZMarkdown(extensions=['zmarkdown.extensions.abbr'])
def testSimpleAbbr(self):
""" Test Abbreviations. """
text = 'Some text with an ABBR and a REF. Ignore REFERENCE and ref.' + \
'\n\n*[ABBR]: Abbreviation\n' + \
'*[REF]: Abbreviation Reference'
self.assertEqual(
self.md.convert(text),
'<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> '
'and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore '
'REFERENCE and ref.</p>'
)
def testNestedAbbr(self):
""" Test Nested Abbreviations. """
text = '[ABBR](/foo) and _ABBR_\n\n' + \
'*[ABBR]: Abreviation'
self.assertEqual(
self.md.convert(text),
'<p><a href="/foo"><abbr title="Abreviation">ABBR</abbr></a> '
'and <em><abbr title="Abreviation">ABBR</abbr></em></p>'
)
class TestCodeHilite(unittest.TestCase):
""" Test codehilite extension. """
def setUp(self):
self.has_pygments = True
try:
import pygments # noqa
except ImportError:
self.has_pygments = False
def testBasicCodeHilite(self):
text = '\t# A Code Comment'
md = zmarkdown.ZMarkdown(extensions=['zmarkdown.extensions.codehilite'])
if self.has_pygments:
# Pygments can use random lexer here as we did not specify the language
self.assertTrue(md.convert(text).startswith('<div class="codehilite"><pre>'))
else:
self.assertEqual(
md.convert(text),
'<pre class="codehilite"><code># A Code Comment'
'</code></pre>'
)
def testLinenumsTrue(self):
text = '\t# A Code Comment'
md = zmarkdown.ZMarkdown(
extensions=[zmarkdown.extensions.codehilite.CodeHiliteExtension(linenums=True)])
if self.has_pygments:
# Different versions of pygments output slightly different markup.
# So we use 'startwith' and test just enough to confirm that
# pygments received and processed linenums.
self.assertTrue(
md.convert(text).startswith(
'<table class="codehilitetable"><tr><td class="linenos">'
)
)
else:
self.assertEqual(
md.convert(text),
'<pre class="codehilite"><code class="linenums"># A Code Comment'
'</code></pre>'
)
def testLinenumsFalse(self):
text = '\t#!Python\n\t# A Code Comment'
md = zmarkdown.ZMarkdown(
extensions=[zmarkdown.extensions.codehilite.CodeHiliteExtension(linenums=False)])
if self.has_pygments:
self.assertEqual(
md.convert(text),
'<div class="codehilite"><pre>'
'<span></span>'
'<span class="c1"># A Code Comment</span>\n'
'</pre></div>'
)
else:
self.assertEqual(
md.convert(text),
'<pre class="codehilite"><code class="language-python"># A Code Comment'
'</code></pre>'
)
def testLinenumsNone(self):
text = '\t# A Code Comment'
md = zmarkdown.ZMarkdown(
extensions=[zmarkdown.extensions.codehilite.CodeHiliteExtension(linenums=None)])
if self.has_pygments:
# Pygments can use random lexer here as we did not specify the language
self.assertTrue(md.convert(text).startswith('<div class="codehilite"><pre>'))
else:
self.assertEqual(
md.convert(text),
'<pre class="codehilite"><code># A Code Comment'
'</code></pre>'
)
def testLinenumsNoneWithShebang(self):
text = '\t#!Python\n\t# A Code Comment'
md = zmarkdown.ZMarkdown(
extensions=[zmarkdown.extensions.codehilite.CodeHiliteExtension(linenums=None)])
if self.has_pygments:
# Differant versions of pygments output slightly different markup.
# So we use 'startwith' and test just enough to confirm that
# pygments received and processed linenums.
self.assertTrue(
md.convert(text).startswith(
'<table class="codehilitetable"><tr><td class="linenos">'
)
)
else:
self.assertEqual(
md.convert(text),
'<pre class="codehilite"><code class="language-python linenums"># A Code Comment'
'</code></pre>'
)
def testLinenumsNoneWithColon(self):
text = '\t:::Python\n\t# A Code Comment'
md = zmarkdown.ZMarkdown(
extensions=[zmarkdown.extensions.codehilite.CodeHiliteExtension(linenums=None)]
)
if self.has_pygments:
self.assertEqual(
md.convert(text),
'<div class="codehilite"><pre>'
'<span></span>'
'<span class="c1"># A Code Comment</span>\n'
'</pre></div>'
)
else:
self.assertEqual(
md.convert(text),
'<pre class="codehilite"><code class="language-python"># A Code Comment'
'</code></pre>'
)
def testHighlightLinesWithColon(self):
# Test with hl_lines delimited by single or double quotes.
text0 = '\t:::Python hl_lines="2"\n\t#line 1\n\t#line 2\n\t#line 3'
text1 = "\t:::Python hl_lines='2'\n\t#line 1\n\t#line 2\n\t#line 3"
for text in (text0, text1):
md = zmarkdown.ZMarkdown(extensions=['zmarkdown.extensions.codehilite'])
if self.has_pygments:
self.assertEqual(
md.convert(text),
'<div class="codehilite"><pre>'
'<span></span>'
'<span class="c1">#line 1</span>\n'
'<span class="hll"><span class="c1">#line 2</span>\n</span>'
'<span class="c1">#line 3</span>\n'
'</pre></div>'
)
else:
self.assertEqual(
md.convert(text),
'<pre class="codehilite">'
'<code class="language-python">#line 1\n'
'#line 2\n'
'#line 3</code></pre>'
)
def testUsePygmentsFalse(self):
text = '\t:::Python\n\t# A Code Comment'
md = zmarkdown.ZMarkdown(
extensions=[zmarkdown.extensions.codehilite.CodeHiliteExtension(use_pygments=False)]
)
self.assertEqual(
md.convert(text),
'<pre class="codehilite"><code class="language-python"># A Code Comment'
'</code></pre>'
)
class TestFencedCode(unittest.TestCase):
""" Test fenced_code extension. """
def setUp(self):
self.md = zmarkdown.ZMarkdown(extensions=['zmarkdown.extensions.fenced_code'])
self.has_pygments = True
try:
import pygments # noqa
except ImportError:
self.has_pygments = False
def testBasicFence(self):
""" Test Fenced Code Blocks. """
text = '''
A paragraph before a fenced code block:
~~~
Fenced code block
~~~'''
print("-"*20)
print(self.md.convert(text))
print("-"*20)
self.assertEqual(
self.md.convert(text),
'<p>A paragraph before a fenced code block:</p>\n'
'<div><pre><code>Fenced code block\n'
'</code></pre></div>'
)
def testNestedFence(self):
""" Test nested fence. """
text = '''
~~~~~~~~
~~~~
~~~~~~~~'''
self.assertEqual(
self.md.convert(text),
'<div><pre><code>\n\n'
'~~~~\n'
'</code></pre></div>'
)
def testFencedLanguage(self):
""" Test Language Tags. """
text = '''
~~~~{.python}
# Some python code
~~~~'''
self.assertEqual(
self.md.convert(text),
'<div><pre><code class="python"># Some python code\n'
'</code></pre></div>'
)
def testFencedBackticks(self):
""" Test Code Fenced with Backticks. """
text = '''
`````
# Arbitrary code
~~~~~ # these tildes will not close the block
`````'''
self.assertEqual(
self.md.convert(text),
'<div><pre><code># Arbitrary code\n'
'~~~~~ # these tildes will not close the block\n'
'</code></pre></div>'
)
def testFencedCodeWithHighlightLines(self):
""" Test Fenced Code with Highlighted Lines. """
text = '''
```hl_lines="1 3"
line 1
line 2
line 3
```'''
md = zmarkdown.ZMarkdown(
extensions=[
zmarkdown.extensions.codehilite.CodeHiliteExtension(linenums=None, guess_lang=False),
'zmarkdown.extensions.fenced_code'
]
)
if self.has_pygments:
self.assertEqual(
md.convert(text),
'<div><div class="codehilite"><pre>'
'<span></span>'
'<span class="hll">line 1\n</span>'
'line 2\n'
'<span class="hll">line 3\n</span>'
'</pre></div>\n</div>'
)
else:
self.assertEqual(
md.convert(text),
'<div><pre class="codehilite"><code>line 1\n'
'line 2\n'
'line 3</code></pre></div>'
)
def testFencedLanguageAndHighlightLines(self):
""" Test Fenced Code with Highlighted Lines. """
text0 = '''
```.python hl_lines="1 3"
#line 1
#line 2
#line 3
```'''
text1 = '''
~~~{.python hl_lines='1 3'}
#line 1
#line 2
#line 3
~~~'''
for text in (text0, text1):
md = zmarkdown.ZMarkdown(
extensions=[
zmarkdown.extensions.codehilite.CodeHiliteExtension(linenums=None, guess_lang=False),
'zmarkdown.extensions.fenced_code'
]
)
if self.has_pygments:
self.assertEqual(
md.convert(text),
'<div><div class="codehilite"><pre>'
'<span></span>'
'<span class="hll"><span class="c1">#line 1</span>\n</span>'
'<span class="c1">#line 2</span>\n'
'<span class="hll"><span class="c1">#line 3</span>\n</span>'
'</pre></div>\n</div>'
)
else:
self.assertEqual(
md.convert(text),
'<div><pre class="codehilite"><code class="language-python">#line 1\n'
'#line 2\n'
'#line 3</code></pre>\n</div>'
)
|
|
"""
Provides virtual field to access to translation from multilingual model instance.
"""
from django.db.models import ForeignObject
from django.db.models.deletion import DO_NOTHING
from django.db.models.fields.related import OneToOneRel, ReverseSingleRelatedObjectDescriptor
from django.db.models.related import PathInfo
from django.db.models.sql.where import Constraint
from multilingual.languages import get_active, get_fallbacks, FALLBACK_FIELD_SUFFIX
from multilingual.utils import sanitize_language_code
TRANSLATION_FIELD_NAME = 'translation'
class TranslationRel(OneToOneRel):
# Relation is always one-to-one
def __init__(self, field, to, **kwargs):
# Relation is always bound to 'master'
kwargs['field_name'] = 'master'
kwargs['related_name'] = 'master'
# Semi-virtual relation, do nothing on delete
kwargs['on_delete'] = DO_NOTHING
super(TranslationRel, self).__init__(field, to, **kwargs)
def is_hidden(self):
# The related object is always hidden.
return True
class TranslationDescriptor(ReverseSingleRelatedObjectDescriptor):
"""
Descriptor for the `MultilingualModel.translation` fields.
Behaves almost the same as descriptor for nullable one-to-one field.
"""
# Do not cache the field's cache name.
def __init__(self, field_with_rel):
self.field = field_with_rel
@property
def cache_name(self):
return self.field.get_cache_name()
def __get__(self, instance, instance_type=None):
try:
return super(TranslationDescriptor, self).__get__(instance, instance_type)
except self.field.rel.to.DoesNotExist:
# Gotcha: Unlike the one-to-one relation, this relation is bound to the primary key of the multilingual
# object, which is usually not None.
# Because of that we have to make a query to find out if the translation exists or not, whereas
# one-to-one relation finds this out from the value of the relation field.
# Handlng exception, which is enexpectedly raised by query in `ReverseSingleRelatedObjectDescriptor`,
# seems to be better option that complete override of this method.
return None
# Based on 'django.contrib.contenttypes.generic.GenericRelation' and
# 'django.tests.foreign_object.models.ActiveTranslationField'
class TranslationRelation(ForeignObject):
"""
Provides an accessor to related translation.
"""
requires_unique_target = False # This avoids django validation
def __init__(self, to, from_fields=None, to_fields=None, base_name=None, language_code=None,
related_name=None, on_delete=None, **kwargs):
self._base_name = base_name
self._language_code = language_code
# Create the field name
if language_code:
name = '%s_%s' % (base_name, sanitize_language_code(language_code))
else:
name = base_name
kwargs['name'] = name
# Disable any modifications of this field
kwargs['editable'] = False
kwargs['serialize'] = False
# Define 'rel' object
kwargs['rel'] = TranslationRel(self, to)
# Let queries to fill master object into translation cache, e.g. in select_related.
kwargs['unique'] = True
kwargs['null'] = True
if from_fields is None:
from_fields = []
if to_fields is None:
to_fields = ['master']
super(TranslationRelation, self).__init__(to,
from_fields=from_fields,
to_fields=to_fields, **kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
super(TranslationRelation, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, TranslationDescriptor(self))
@property
def language_code(self):
"""
If _language_code is None we are the _current field, so we use the
currently used language for lookups.
"""
if self._language_code is not None:
return self._language_code
return get_active()
def get_cache_name(self):
# The field for active language needs to use the cache for that language
return '_%s_%s_cache' % (self._base_name, sanitize_language_code(self.language_code))
def resolve_related_fields(self):
self.from_fields = [self.model._meta.pk.name]
return super(TranslationRelation, self).resolve_related_fields()
def get_extra_descriptor_filter(self, instance):
return {'language_code': self.language_code}
def get_extra_restriction(self, where_class, alias, related_alias):
# alias - Alias of the joined table (translations)
# related_alias - Alias of the master table
field = self.rel.to._meta.get_field('language_code')
cond = where_class()
cond.add((Constraint(alias, field.column, field), 'exact', self.language_code), 'AND')
return cond
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.rel.to._meta
from_opts = self.model._meta
#XXX: Changed to indirect
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, False)]
# It would be better if field could be derived just from object, but property is currently
# easiest way to enable initiation of multilingual models with translations
# Problem reported to Django: #16508
#class TranslationProxyField(object):
class TranslationProxyField(property):
"""
Provides an easy access to field translations.
Based on code for 'GenericForeignKey' field
"""
def __init__(self, field_name, language_code=None, fallback=False):
self._field_name = field_name
self._language_code = language_code
self._fallback = fallback
name = field_name
if language_code is not None:
name = '%s_%s' % (name, sanitize_language_code(language_code))
if fallback:
name = '%s_%s' % (name, FALLBACK_FIELD_SUFFIX)
self.name = name
super(TranslationProxyField, self).__init__()
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
cls._meta.add_virtual_field(self)
# Connect myself as the descriptor for this field
setattr(cls, name, self)
@property
def field_name(self):
"""
Returns base field name.
"""
return self._field_name
@property
def language_code(self):
"""
Returns effective language code.
"""
if self._language_code is not None:
return self._language_code
return get_active()
def __get__(self, instance, instance_type=None):
"""
Returns field translation or None
"""
if instance is None:
return self
translation_model = self.model._meta.translation_model
# Find language codes to be tried
lang_codes = [self.language_code]
if self._fallback:
lang_codes += get_fallbacks(self.language_code)
for lang_code in lang_codes:
# Find translation
translation_name = '%s_%s' % (TRANSLATION_FIELD_NAME, sanitize_language_code(lang_code))
try:
# Translation is nullable, so it may return None
translation = getattr(instance, translation_name)
except translation_model.DoesNotExist:
translation = None
if translation is None:
# Translation does not exist, try another language
continue
# Once we have the translation object we return what's there
return getattr(translation, self._field_name)
return None
def __set__(self, instance, value):
"""
Sets field translation
"""
translation_model = self.model._meta.translation_model
# Find translation
translation_name = '%s_%s' % (TRANSLATION_FIELD_NAME, sanitize_language_code(self.language_code))
try:
# Translation is nullable, so it may return None
translation = getattr(instance, translation_name)
except translation_model.DoesNotExist:
translation = None
if translation is None:
# Translation does not exist, create one
translation = translation_model(master=instance, language_code=self.language_code)
setattr(instance, translation_name, translation)
# Set the field translation
setattr(translation, self._field_name, value)
|
|
import agents as ag
import envgui as gui
import random
# ______________________________________________________________________________
loc_A, loc_B = (1, 1), (2, 1) # The two locations for the Vacuum world
def RandomVacuumAgent():
"Randomly choose one of the actions from the vacuum environment."
p = ag.RandomAgentProgram(['Right', 'Left', 'Up', 'Down', 'Suck', 'NoOp'])
return ag.Agent(p)
def TableDrivenVacuumAgent():
"[Figure 2.3]"
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
p = ag.TableDrivenAgentProgram(table)
return ag.Agent()
def ReflexVacuumAgent():
"A reflex agent for the two-state vacuum environment. [Figure 2.8]"
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
def ModelBasedVacuumAgent() -> object:
"An agent that keeps track of what locations are clean or dirty."
model = {loc_A: None, loc_B: None}
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
# ______________________________________________________________________________
# Vacuum environment
class Dirt(ag.Thing):
pass
# class Floor(ag.Thing):
# pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(VacuumEnvironment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super(TrivialVacuumEnvironment, self).__init__()
choice = random.randint(0, 3)
if choice % 2: # 1 or 3
self.add_thing(Dirt(), loc_A)
if choice > 1: # 2 or 3
self.add_thing(Dirt(), loc_B)
def percept(self, agent):
"Returns the agent's location, and the location status (Dirty/Clean)."
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
return (agent.location, status)
#
# def execute_action(self, agent, action):
# """Change agent's location and/or location's status; track performance.
# Score 10 for each dirt cleaned; -1 for each move."""
# if action == 'Right':
# agent.location = loc_B
# agent.performance -= 1
# elif action == 'Left':
# agent.location = loc_A
# agent.performance -= 1
# elif action == 'Suck':
# if self.status[agent.location] == 'Dirty':
# agent.performance += 10
# self.status[agent.location] = 'Clean'
#
def add_agent(self, a):
"Agents start in either location at random."
super().add_thing(a, random.choice([loc_A, loc_B]))
# _________________________________________________________________________
# >>> a = ReflexVacuumAgent()
# >>> a.program((loc_A, 'Clean'))
# 'Right'
# >>> a.program((loc_B, 'Clean'))
# 'Left'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
#
# >>> e = TrivialVacuumEnvironment()
# >>> e.add_thing(ModelBasedVacuumAgent())
# >>> e.run(5)
# Produces text-based status output
# v = TrivialVacuumEnvironment()
# a = ModelBasedVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# v.run(10)
# Launch GUI of Trivial Environment
# v = TrivialVacuumEnvironment()
# a = RandomVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# g = gui.EnvGUI(v, 'Vaccuum')
# c = g.getCanvas()
# c.mapImageNames({
# Dirt: 'images/dirt.png',
# ag.Wall: 'images/wall.jpg',
# # Floor: 'images/floor.png',
# ag.Agent: 'images/vacuum.png',
# })
# c.update()
# g.mainloop()
# Launch GUI of more complex environment
v = VacuumEnvironment(5, 4)
#a = ModelBasedVacuumAgent()
a = RandomVacuumAgent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'submissions/Carei/Bold.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""xla is an experimental library that provides XLA support APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.jit.ops import xla_ops
from tensorflow.compiler.jit.ops import xla_ops_grad # pylint: disable=unused-import
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.distribute import summary_op_util
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_XLA_COMPILE_ATTR = '_xla_compile_id'
_MAX_WARNING_LINES = 5
# Operations that indicate some error in the users graph. For example, XLA
# computation should not have any Placeholder op.
_BLACKLISTED_OPS = set([
'Placeholder',
])
# XLA doesn't currently support reading of intermediate tensors, thus some ops
# are not supported.
_UNSUPPORTED_OPS = set([
'AudioSummary',
'AudioSummaryV2',
'HistogramSummary',
'ImageSummary',
'MergeSummary',
'Print',
'ScalarSummary',
'TensorSummary',
'TensorSummaryV2',
])
@tf_export('xla.experimental.compile')
def compile(computation, inputs=None): # pylint: disable=redefined-builtin
"""Builds an operator that compiles and runs `computation` with XLA.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors.
`computation` may return a list of operations and tensors. Tensors must
come before operations in the returned list. The return value of
`compile` is a list of tensors corresponding to the tensors from the
output of `computation`.
All `Operation`s returned from `computation` will be executed when
evaluating any of the returned output tensors.
inputs: A list of inputs or `None` (equivalent to an empty list). Each input
can be a nested structure containing values that are convertible to
tensors. Note that passing an N-dimension list of compatible values will
result in a N-dimension list of scalar tensors rather than a single Rank-N
tensors. If you need different behavior, convert part of inputs to tensors
with `tf.convert_to_tensor`.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
Raises:
RuntimeError: if called when eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('xla.experimental.compile is not supported when eager '
'execution is enabled. Try use it inside tf.function.')
# pylint: disable=protected-access
return _compile_internal(computation, inputs)
class XLACompileContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside an XLA computation cluster.
THIS IS ONLY FOR TENSORFLOW INTERNAL IMPLEMENTATION, DO NO USE DIRECTLY.
The primary role of `XLACompileContext` is to mark operators inside a
xla.compile() computation with attribute "_xla_compile_id=XYZ", where XYZ is
a unique name.
`ControlFlowContext` is used to perform the annotation since it integrates
with Tensorflow constructs like ResourceVariables. For example, if a
`ResourceVariable` is constructed inside a xla.compile() block, the
`ResourceVariable` implementation can use
`with ops.control_dependencies(None)` to build the variable's definition
outside the compiled computation.
"""
def __init__(self, name, pivot):
"""Builds a new XLACompileContext.
Args:
name: a unique name for the context, used to populate the
`_xla_compile_id` attribute.
pivot: a pivot node. Nodes in the XLACompileContext that do not have any
inputs will have a control dependency on the pivot node. This ensures
that nodes are correctly included in any enclosing control flow
contexts.
"""
super(XLACompileContext, self).__init__()
self._name = name
self._name_as_bytes = compat.as_bytes(name)
self._unsupported_ops = []
self._pivot = pivot
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = '\n'.join([
' %s (%s)' % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]
])
logging.warning('%d unsupported operations found: \n%s',
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning('... and %d more',
len(self._unsupported_ops) - _MAX_WARNING_LINES)
def _RemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
internal_control_inputs = []
external_control_inputs = []
for x in op.control_inputs:
# pylint: disable=protected-access
is_internal_op = False
ctxt = x._get_control_flow_context()
while ctxt is not None:
if ctxt == self:
is_internal_op = True
break
ctxt = ctxt._outer_context
if is_internal_op:
internal_control_inputs.append(x)
else:
external_control_inputs.append(x)
# pylint: enable=protected-access
# pylint: disable=protected-access
op._remove_all_control_inputs()
op._add_control_inputs(internal_control_inputs)
# pylint: enable=protected-access
return internal_control_inputs, external_control_inputs
def AddOp(self, op):
"""Create op in XLACompileContext and notifies outer context recursively."""
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
logging.error(
'Operation of type %s (%s) is not supported in XLA. Execution will '
'fail if this op is used in the graph. ', op.type, op.name)
# TODO(ycao): Automatically disable summaries instead of reporting them.
if op.type in _UNSUPPORTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
'Non-resource Variables are not supported inside XLA computations '
'(operator name: %s)' % op.name)
if _XLA_COMPILE_ATTR in op.node_def.attr:
raise ValueError('XLA compiled computations cannot be nested, (operator '
'name: %s)' % op.name)
op._set_attr(
_XLA_COMPILE_ATTR, attr_value_pb2.AttrValue(s=self._name_as_bytes))
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
# Remove any control edges from outer control flow contexts. These may cause
# mismatched frame errors. An example is when one of op's inputs is
# generated in a different While control flow context.
(internal_control_inputs,
external_control_inputs) = self._RemoveExternalControlEdges(op)
if not op.inputs:
# Add a control edge from the control pivot to this op.
if not internal_control_inputs:
# pylint: disable=protected-access
op._add_control_input(self._pivot)
# pylint: enable=protected-access
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x) # pylint: disable=protected-access
if external_control_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(phawkins): fix that.
with ops.control_dependencies(None):
self.Enter()
external_control_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_control_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_control_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
# pylint: disable=protected-access
context._values.update(output_names)
context = context._outer_context
# pylint: enable=protected-access
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context.
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op):
self.AddOp(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the XLACompileContext to
# be None as the XLACompileContext does not get nested nor does the
# grad_state outside the XLACompileContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
@property
def back_prop(self):
"""Forwards to the enclosing while context, if any."""
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def _compile_internal(computation, inputs=None):
"""Builds graph operators that compiles and symbolically executes computation.
Args:
computation: A Python function that builds the computation to compile and
execute.
inputs: A list of inputs or `None` (equivalent to an empty list). Each input
can be a nested structure containing values that are convertible to
tensors. Note that passing an N-dimension list of compatible values will
result in a N-dimension list of scalar tensors rather than a single Rank-N
tensors. If you need different behavior, convert part of inputs to tensors
with `tf.convert_to_tensor`.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include: 1) None output 2) Single
value output 3) Operation-only outputs
Raises:
ValueError: If any element in computation outputs is neither an operations
or a value that can be converted to tensor.
ValueError: If computation outputs is non-flat and contains any Operations.
TypeError: If `inputs` is not a list or tuple.
"""
if inputs is None:
inputs = []
if not isinstance(inputs, collections.Sequence):
raise TypeError('inputs must be a list')
# Flatten inputs.
flat_inputs = nest.flatten(inputs)
# Converts inputs to Tensors.
flat_inputs = [ops.convert_to_tensor(x) for x in flat_inputs]
cluster_name = ops.get_default_graph().unique_name('cluster')
pivot = control_flow_ops.no_op(name=cluster_name + '/pivot')
context = XLACompileContext(name=cluster_name, pivot=pivot)
try:
context.Enter()
# Add identity ops so even unused inputs are 'consumed' by the
# computation.
flat_inputs = [
array_ops.identity(x, name='input_{}'.format(i))
for i, x in enumerate(flat_inputs)
]
# Re-pack flat_inputs in same structure as 'inputs'.
computation_inputs = nest.pack_sequence_as(
structure=inputs, flat_sequence=flat_inputs)
# Only resource variables work inside an XLA computation, so turn on
# resource variables for the computation.
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
vscope.set_use_resource(True)
with _disable_summary_context():
outputs = computation(*computation_inputs)
# Restore variable scope after computation.
vscope.set_use_resource(saved_use_resource)
outputs_is_flat = is_flat(outputs)
if outputs_is_flat:
output_tensors, control_deps = _postprocess_flat_outputs(outputs)
else:
output_tensors, control_deps = _postprocess_non_flat_outputs(outputs)
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
# When XLA computation returns only operations and no tensors, a NoOp
# dependent on the operations in outputs is returned. Otherwise final
# outputs would be empty and there is no way to trigger returned
# operations.
if not output_tensors:
return control_flow_ops.group(control_deps, name='output_0')
output_tensors = [
xla_ops.xla_cluster_output(o, name='output{}'.format(i))
for i, o in enumerate(output_tensors)
]
with ops.control_dependencies(control_deps):
# Wraps the outputs in identity operators that carries control
# dependencies.
output_tensors = [
array_ops.identity(o, name='output_%d' % i)
for i, o in enumerate(output_tensors)
]
# If `computation` returned non-flat output structure, pack output tensors
# back into same structure.
if not outputs_is_flat:
output_tensors = nest.pack_sequence_as(
structure=outputs, flat_sequence=output_tensors)
return output_tensors
def is_flat(outputs):
"""Checks if outputs is a flat structure.
Following structures and values are considered flat:
1) None
2) A single object
3) A list or tuple of Tensors/Operations
The only structures that this function understands are sequences and
dictionaries. E.g. this means that if outputs contains a single
user-defined Object, it is considered to be flat. Errors are raised later on
if that Object cannot be converted to a Tensor.
Args:
outputs: Output from `computation` inside `xla.compile`.
Returns:
A boolean indicates whether outputs is flat.
"""
# If outputs is a list or tuple, check if it has any nested structure. If
# there is, then outputs is non-flat.
if isinstance(outputs, collections.Sequence):
for o in outputs:
if isinstance(o, collections.Sequence) or isinstance(o, dict):
return False
# If outputs is a dict, it is non-flat.
if isinstance(outputs, dict):
return False
# Getting here means either outputs itself is a single non-structured value
# or it is a flat list of single non-structured values.
return True
def _postprocess_flat_outputs(outputs):
"""Validates flat outputs and adds back device assignments.
Args:
outputs: Output from `computation` inside `xla.compile`.
Returns:
Tensors and Operations extracted from outputs.
"""
# Following code segment is to preserve legacy behavior. Previously we only
# supported flat outputs and thus for consistency it was nice to convert even
# single element into a tuple. But now that we support arbitrary output
# structure, this is no longer necessary.
# TODO(b/121383831): Migrate all legacy use cases and delete this special
# case.
# If the computation returns `None`, make it an empty tuple.
if outputs is None:
outputs = tuple()
# If the computation only returned one value, make it a tuple.
if not isinstance(outputs, collections.Sequence):
outputs = (outputs,)
# Append `no_op` here so that return value of this function always contains
# at least one op that can trigger XlaLaunch node.
outputs += (control_flow_ops.no_op(),)
try:
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
'XLA computation function return values must all either be Operations'
' or convertible to Tensors. Got error: "%s"' % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
'XLA computation function must return zero or more Tensor values '
'followed by zero or more Operations.')
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else ''):
new_output_tensors.append(array_ops.identity(t))
return new_output_tensors, output_operations
def _postprocess_non_flat_outputs(outputs):
"""Validates non-flat outputs and adds back device assignments.
Args:
outputs: Output from `computation` inside `xla.compile`.
Returns:
Tensors extracted from outputs and an empty list because Operations are not
allowed in non-flat outputs..
"""
# Convert all non-Operation outputs to Tensors.
new_output_tensors = []
for o in nest.flatten(outputs):
if isinstance(o, ops.Operation):
raise ValueError(
'xla.compile does not support Operation as return value in non-flat '
'output structure. You can set returned Operations as control '
'dependencies of returned Tensors so Operations are triggered when '
'Tensors are evaluated. Operation found: "%s"' % o.name)
try:
o = ops.convert_to_tensor(o)
except Exception as e:
raise ValueError(
'XLA computation function return values must all either be '
'Operations or convertible to Tensors. Got error: "%s"' % str(e))
# Makes sure even pass-through inputs/outputs are touched in compile
# context by creating an Identity node inside compile context.
with ops.device(o.device if o.device else ''):
new_output_tensors.append(array_ops.identity(o))
return new_output_tensors, []
@contextlib.contextmanager
def _disable_summary_context():
"""Enters a context where all summary ops are skipped.
Summaries are not yet supported in xla.compile(). So we provide this context
manager that can skip creating summary ops. This is a temporary workaround due
to XLA not supporting summary ops.
Yields:
None.
"""
original_skip_summary_func = summary_op_util.skip_summary
summary_op_util.skip_summary = lambda: True
try:
yield
finally:
summary_op_util.skip_summary = original_skip_summary_func
class _CapturedObject(object):
"""A placeholder to capture an object."""
def __init__(self):
self._object = None
def capture(self, o):
if self._object:
raise RuntimeError(
'InternalError: _CapturedObject can capture only once. Please file '
'bug.')
self._object = o
def get(self):
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
scaffold_fn = captured_scaffold_fn.get()
if not scaffold_fn:
return None
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
return scaffold
def check_function_argument_count(func, input_arity, infeed_queue):
"""Validate the number of input arguments to an XLA function.
Args:
func: the Python function that will be called to generate the body of an XLA
computation graph.
input_arity: the number of explicit arguments supplied by the caller.
infeed_queue: if not None, the infeed queue that will supply
additional arguments to the function.
Returns:
None if function can be called with the supplied number of
arguments, or an error string if it cannot.
"""
def format_error(complaint, quantity):
return '%s %d argument%s' % (complaint, quantity, ''
if quantity == 1 else 's')
num_args_supplied = input_arity
if infeed_queue is not None:
num_args_supplied += infeed_queue.number_of_tuple_elements
arg_spec = tf_inspect.getargspec(func)
num_func_args = len(arg_spec.args)
if arg_spec.defaults is None:
num_func_defaults = 0
else:
num_func_defaults = len(arg_spec.defaults)
min_func_args = num_func_args - num_func_defaults
if num_args_supplied < min_func_args:
# The required number of arguments is not enough to call the function.
if num_func_defaults == 0 and arg_spec.varargs is None:
return format_error('exactly', num_func_args)
else:
return format_error('at least', min_func_args)
if arg_spec.varargs is None and num_args_supplied > num_func_args:
# The required number of arguments is too many to call the function.
if num_func_defaults == 0:
return format_error('exactly', num_func_args)
else:
return format_error('at most', num_func_args)
# Reaching here means either
# 1) There are varargs, func can accept any number of arguments greater than
# the minimum.
# 2) Number of supplied arguments falls in range of acceptable argument count
# of func.
return None
|
|
# -*- coding: utf-8 -*-
import logging
import hmac
import hashlib
import uuid
import json
import re
import time
import random
from datetime import datetime
import gzip
from io import BytesIO
import warnings
from .compat import (
compat_urllib_parse, compat_urllib_error,
compat_urllib_request, compat_urllib_parse_urlparse)
from .errors import (
ClientErrorCodes, ClientError, ClientLoginError, ClientLoginRequiredError,
ClientCookieExpiredError, ClientThrottledError)
from .constants import Constants
from .http import ClientCookieJar
from .endpoints import (
AccountsEndpointsMixin, DiscoverEndpointsMixin, FeedEndpointsMixin,
FriendshipsEndpointsMixin, LiveEndpointsMixin, MediaEndpointsMixin,
MiscEndpointsMixin, LocationsEndpointsMixin, TagsEndpointsMixin,
UsersEndpointsMixin, UploadEndpointsMixin, UsertagsEndpointsMixin,
CollectionsEndpointsMixin
)
logger = logging.getLogger(__name__)
class Client(AccountsEndpointsMixin, DiscoverEndpointsMixin, FeedEndpointsMixin,
FriendshipsEndpointsMixin, LiveEndpointsMixin, MediaEndpointsMixin,
MiscEndpointsMixin, LocationsEndpointsMixin, TagsEndpointsMixin,
UsersEndpointsMixin, UploadEndpointsMixin, UsertagsEndpointsMixin,
CollectionsEndpointsMixin, object):
"""Main API client class for the private app api."""
API_URL = 'https://i.instagram.com/api/{version!s}/'
USER_AGENT = Constants.USER_AGENT
IG_SIG_KEY = Constants.IG_SIG_KEY
IG_CAPABILITIES = Constants.IG_CAPABILITIES
SIG_KEY_VERSION = Constants.SIG_KEY_VERSION
def __init__(self, username, password, **kwargs):
"""
:param username: Login username
:param password: Login password
:param kwargs: See below
:Keyword Arguments:
- **auto_patch**: Patch the api objects to match the public API. Default: False
- **drop_incompat_key**: Remove api object keys that is not in the public API. Default: False
- **timeout**: Timeout interval in seconds. Default: 15
- **api_url**: Override the default api url base
- **cookie**: Saved cookie string from a previous session
- **settings**: A dict of settings from a previous session
- **on_login**: Callback after successful login
- **proxy**: Specify a proxy ex: 'http://127.0.0.1:8888' (ALPHA)
:return:
"""
self.username = username
self.password = password
self.auto_patch = kwargs.pop('auto_patch', False)
self.drop_incompat_keys = kwargs.pop('drop_incompat_keys', False)
self.api_url = kwargs.pop('api_url', None) or self.API_URL
self.timeout = kwargs.pop('timeout', 15)
self.on_login = kwargs.pop('on_login', None)
self.logger = logger
user_settings = kwargs.pop('settings', None) or {}
self.uuid = (
kwargs.pop('guid', None) or kwargs.pop('uuid', None) or
user_settings.get('uuid') or self.generate_uuid(False))
self.device_id = (
kwargs.pop('device_id', None) or user_settings.get('device_id') or
self.generate_deviceid())
self.signature_key = (
kwargs.pop('signature_key', None) or user_settings.get('signature_key') or
self.IG_SIG_KEY)
self.key_version = (
kwargs.pop('key_version', None) or user_settings.get('key_version') or
self.SIG_KEY_VERSION)
self.ig_capabilities = (
kwargs.pop('ig_capabilities', None) or user_settings.get('ig_capabilities') or
self.IG_CAPABILITIES)
# to maintain backward compat for user_agent kwarg
custom_ua = kwargs.pop('user_agent', '') or user_settings.get('user_agent')
if custom_ua:
self.user_agent = custom_ua
else:
self.app_version = (
kwargs.pop('app_version', None) or user_settings.get('app_version') or
Constants.APP_VERSION)
self.android_release = (
kwargs.pop('android_release', None) or user_settings.get('android_release') or
Constants.ANDROID_RELEASE)
self.android_version = int(
kwargs.pop('android_version', None) or user_settings.get('android_version') or
Constants.ANDROID_VERSION)
self.phone_manufacturer = (
kwargs.pop('phone_manufacturer', None) or user_settings.get('phone_manufacturer') or
Constants.PHONE_MANUFACTURER)
self.phone_device = (
kwargs.pop('phone_device', None) or user_settings.get('phone_device') or
Constants.PHONE_DEVICE)
self.phone_model = (
kwargs.pop('phone_model', None) or user_settings.get('phone_model') or
Constants.PHONE_MODEL)
self.phone_dpi = (
kwargs.pop('phone_dpi', None) or user_settings.get('phone_dpi') or
Constants.PHONE_DPI)
self.phone_resolution = (
kwargs.pop('phone_resolution', None) or user_settings.get('phone_resolution') or
Constants.PHONE_RESOLUTION)
self.phone_chipset = (
kwargs.pop('phone_chipset', None) or user_settings.get('phone_chipset') or
Constants.PHONE_CHIPSET)
cookie_string = kwargs.pop('cookie', None) or user_settings.get('cookie')
cookie_jar = ClientCookieJar(cookie_string=cookie_string)
if cookie_string and cookie_jar.expires_earliest and int(time.time()) >= cookie_jar.expires_earliest:
raise ClientCookieExpiredError('Oldest cookie expired at {0!s}'.format(cookie_jar.expires_earliest))
cookie_handler = compat_urllib_request.HTTPCookieProcessor(cookie_jar)
proxy_handler = None
proxy = kwargs.pop('proxy', None)
if proxy:
warnings.warn('Proxy support is alpha.', UserWarning)
parsed_url = compat_urllib_parse_urlparse(proxy)
if parsed_url.netloc and parsed_url.scheme:
proxy_address = '{0!s}://{1!s}'.format(parsed_url.scheme, parsed_url.netloc)
proxy_handler = compat_urllib_request.ProxyHandler({'https': proxy_address})
else:
raise ValueError('Invalid proxy argument: {0!s}'.format(proxy))
handlers = []
if proxy_handler:
handlers.append(proxy_handler)
# Allow user to override custom ssl context where possible
custom_ssl_context = kwargs.pop('custom_ssl_context', None)
try:
httpshandler = compat_urllib_request.HTTPSHandler(context=custom_ssl_context)
except TypeError:
# py version < 2.7.9
httpshandler = compat_urllib_request.HTTPSHandler()
handlers.extend([
compat_urllib_request.HTTPHandler(),
httpshandler,
cookie_handler])
opener = compat_urllib_request.build_opener(*handlers)
opener.cookie_jar = cookie_jar
self.opener = opener
# ad_id must be initialised after cookie_jar/opener because
# it relies on self.authenticated_user_name
self.ad_id = (
kwargs.pop('ad_id', None) or user_settings.get('ad_id') or
self.generate_adid())
if not cookie_string: # [TODO] There's probably a better way than to depend on cookie_string
if not self.username or not self.password:
raise ClientLoginRequiredError('login_required', code=400)
self.login()
@property
def settings(self):
"""Helper property that extracts the settings that you should cache
in addition to username and password."""
return {
'uuid': self.uuid,
'device_id': self.device_id,
'ad_id': self.ad_id,
'signature_key': self.signature_key,
'key_version': self.key_version,
'ig_capabilities': self.ig_capabilities,
'app_version': self.app_version,
'android_release': self.android_release,
'android_version': self.android_version,
'phone_manufacturer': self.phone_manufacturer,
'phone_device': self.phone_device,
'phone_model': self.phone_model,
'phone_dpi': self.phone_dpi,
'phone_resolution': self.phone_resolution,
'phone_chipset': self.phone_chipset,
'cookie': self.opener.cookie_jar.dump(),
'created_ts': int(time.time())
}
@property
def user_agent(self):
"""Returns the useragent string that the client is currently using."""
return Constants.USER_AGENT_FORMAT % {
'app_version': self.app_version,
'android_version': self.android_version,
'android_release': self.android_release,
'brand': self.phone_manufacturer,
'device': self.phone_device,
'model': self.phone_model,
'dpi': self.phone_dpi,
'resolution': self.phone_resolution,
'chipset': self.phone_chipset}
@user_agent.setter
def user_agent(self, value):
"""Override the useragent string with your own"""
mobj = re.search(Constants.USER_AGENT_EXPRESSION, value)
if not mobj:
raise ValueError('User-agent specified does not fit format required: {0!s}'.format(
Constants.USER_AGENT_EXPRESSION))
self.app_version = mobj.group('app_version')
self.android_release = mobj.group('android_release')
self.android_version = int(mobj.group('android_version'))
self.phone_manufacturer = mobj.group('manufacturer')
self.phone_device = mobj.group('device')
self.phone_model = mobj.group('model')
self.phone_dpi = mobj.group('dpi')
self.phone_resolution = mobj.group('resolution')
self.phone_chipset = mobj.group('chipset')
@staticmethod
def generate_useragent(**kwargs):
"""
Helper method to generate a useragent string based on device parameters
:param kwargs:
- **app_version**
- **android_version**
- **android_release**
- **brand**
- **device**
- **model**
- **dpi**
- **resolution**
- **chipset**
:return: A compatible user agent string
"""
return Constants.USER_AGENT_FORMAT % {
'app_version': kwargs.pop('app_version', None) or Constants.APP_VERSION,
'android_version': int(kwargs.pop('android_version', None) or Constants.ANDROID_VERSION),
'android_release': kwargs.pop('android_release', None) or Constants.ANDROID_RELEASE,
'brand': kwargs.pop('phone_manufacturer', None) or Constants.PHONE_MANUFACTURER,
'device': kwargs.pop('phone_device', None) or Constants.PHONE_DEVICE,
'model': kwargs.pop('phone_model', None) or Constants.PHONE_MODEL,
'dpi': kwargs.pop('phone_dpi', None) or Constants.PHONE_DPI,
'resolution': kwargs.pop('phone_resolution', None) or Constants.PHONE_RESOLUTION,
'chipset': kwargs.pop('phone_chipset', None) or Constants.PHONE_CHIPSET}
@staticmethod
def validate_useragent(value):
"""
Helper method to validate a useragent string for format correctness
:param value:
:return:
"""
mobj = re.search(Constants.USER_AGENT_EXPRESSION, value)
if not mobj:
raise ValueError('User-agent specified does not fit format required: {0!s}'.format(
Constants.USER_AGENT_EXPRESSION))
parse_params = {
'app_version': mobj.group('app_version'),
'android_version': int(mobj.group('android_version')),
'android_release': mobj.group('android_release'),
'brand': mobj.group('manufacturer'),
'device': mobj.group('device'),
'model': mobj.group('model'),
'dpi': mobj.group('dpi'),
'resolution': mobj.group('resolution'),
'chipset': mobj.group('chipset')
}
return {
'user_agent': Constants.USER_AGENT_FORMAT % parse_params,
'parsed_params': parse_params
}
def get_cookie_value(self, key):
for cookie in self.cookie_jar:
if cookie.name.lower() == key.lower():
return cookie.value
return None
@property
def csrftoken(self):
"""The client's current csrf token"""
return self.get_cookie_value('csrftoken')
@property
def token(self):
"""For compatibility. Equivalent to :meth:`csrftoken`"""
return self.csrftoken
@property
def authenticated_user_id(self):
"""The current authenticated user id"""
return self.get_cookie_value('ds_user_id')
@property
def authenticated_user_name(self):
"""The current authenticated user name"""
return self.get_cookie_value('ds_user')
@property
def phone_id(self):
"""Current phone ID. For use in certain functions."""
return self.generate_uuid(return_hex=False, seed=self.device_id)
@property
def timezone_offset(self):
"""Timezone offset in seconds. For use in certain functions."""
return int(round((datetime.now() - datetime.utcnow()).total_seconds()))
@property
def rank_token(self):
if not self.authenticated_user_id:
return None
return '{0!s}_{1!s}'.format(self.authenticated_user_id, self.uuid)
@property
def authenticated_params(self):
return {
'_csrftoken': self.csrftoken,
'_uuid': self.uuid,
'_uid': self.authenticated_user_id
}
@property
def cookie_jar(self):
"""The client's cookiejar instance."""
return self.opener.cookie_jar
@property
def default_headers(self):
return {
'User-Agent': self.user_agent,
'Connection': 'close',
'Accept': '*/*',
'Accept-Language': 'en-US',
'Accept-Encoding': 'gzip, deflate',
'X-IG-Capabilities': self.ig_capabilities,
'X-IG-Connection-Type': 'WIFI',
'X-IG-Connection-Speed': '{0:d}kbps'.format(random.randint(1000, 5000)),
}
@property
def radio_type(self):
"""For use in certain endpoints"""
return 'wifi-none'
def _generate_signature(self, data):
"""
Generates the signature for a data string
:param data: content to be signed
:return:
"""
return hmac.new(
self.signature_key.encode('ascii'), data.encode('ascii'),
digestmod=hashlib.sha256).hexdigest()
@classmethod
def generate_uuid(cls, return_hex=False, seed=None):
"""
Generate uuid
:param return_hex: Return in hex format
:param seed: Seed value to generate a consistent uuid
:return:
"""
if seed:
m = hashlib.md5()
m.update(seed.encode('utf-8'))
new_uuid = uuid.UUID(m.hexdigest())
else:
new_uuid = uuid.uuid1()
if return_hex:
return new_uuid.hex
return str(new_uuid)
@classmethod
def generate_deviceid(cls, seed=None):
"""
Generate an android device ID
:param seed: Seed value to generate a consistent device ID
:return:
"""
return 'android-{0!s}'.format(cls.generate_uuid(True, seed)[:16])
def generate_adid(self, seed=None):
"""
Generate an Advertising ID based on the login username since
the Google Ad ID is a personally identifying but resettable ID.
:return:
"""
modified_seed = seed or self.authenticated_user_name or self.username
if modified_seed:
# Do some trivial mangling of original seed
sha2 = hashlib.sha256()
sha2.update(modified_seed.encode('utf-8'))
modified_seed = sha2.hexdigest()
return self.generate_uuid(False, modified_seed)
def _read_response(self, response):
"""
Extract the response body from a http response.
:param response:
:return:
"""
if response.info().get('Content-Encoding') == 'gzip':
buf = BytesIO(response.read())
res = gzip.GzipFile(fileobj=buf).read().decode('utf8')
else:
res = response.read().decode('utf8')
return res
def _call_api(self, endpoint, params=None, query=None, return_response=False, unsigned=False, version='v1'):
"""
Calls the private api.
:param endpoint: endpoint path that should end with '/', example 'discover/explore/'
:param params: POST parameters
:param query: GET url query parameters
:param return_response: return the response instead of the parsed json object
:param unsigned: use post params as-is without signing
:param version: for the versioned api base url. Default 'v1'.
:return:
"""
url = '{0}{1}'.format(self.api_url.format(version=version), endpoint)
if query:
url += ('?' if '?' not in endpoint else '&') + compat_urllib_parse.urlencode(query)
headers = self.default_headers
data = None
if params or params == '':
headers['Content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
if params == '': # force post if empty string
data = ''.encode('ascii')
else:
if not unsigned:
json_params = json.dumps(params, separators=(',', ':'))
hash_sig = self._generate_signature(json_params)
post_params = {
'ig_sig_key_version': self.key_version,
'signed_body': hash_sig + '.' + json_params
}
else:
# direct form post
post_params = params
data = compat_urllib_parse.urlencode(post_params).encode('ascii')
req = compat_urllib_request.Request(url, data, headers=headers)
try:
self.logger.debug('REQUEST: {0!s} {1!s}'.format(url, req.get_method()))
self.logger.debug('DATA: {0!s}'.format(data))
response = self.opener.open(req, timeout=self.timeout)
except compat_urllib_error.HTTPError as e:
error_msg = e.reason
error_response = self._read_response(e)
self.logger.debug('RESPONSE: {0:d} {1!s}'.format(e.code, error_response))
try:
error_obj = json.loads(error_response)
if error_obj.get('message') == 'login_required':
raise ClientLoginRequiredError(
error_obj.get('message'), code=e.code,
error_response=json.dumps(error_obj))
elif e.code == ClientErrorCodes.TOO_MANY_REQUESTS:
raise ClientThrottledError(
error_obj.get('message'), code=e.code,
error_response=json.dumps(error_obj))
elif error_obj.get('message'):
error_msg = '{0!s}: {1!s}'.format(e.reason, error_obj['message'])
except (ClientLoginError, ClientLoginRequiredError, ClientThrottledError):
raise
except Exception as e:
# do nothing else, prob can't parse json
self.logger.warn('Error parsing error response: {}'.format(str(e)))
raise ClientError(error_msg, e.code, error_response)
if return_response:
return response
response_content = self._read_response(response)
self.logger.debug('RESPONSE: {0:d} {1!s}'.format(response.code, response_content))
json_response = json.loads(response_content)
if json_response.get('message', '') == 'login_required':
raise ClientLoginRequiredError(
json_response.get('message'), code=response.code,
error_response=json.dumps(json_response))
# not from oembed or an ok response
if not json_response.get('provider_url') and json_response.get('status', '') != 'ok':
raise ClientError(
json_response.get('message', 'Unknown error'), code=response.code,
error_response=json.dumps(json_response))
return json_response
|
|
import music21
import torch
import numpy as np
from music21 import interval, stream
from torch.utils.data import TensorDataset
from tqdm import tqdm
from DatasetManager.helpers import standard_name, SLUR_SYMBOL, START_SYMBOL, END_SYMBOL, \
standard_note, OUT_OF_RANGE, REST_SYMBOL
from DatasetManager.metadata import FermataMetadata
from DatasetManager.music_dataset import MusicDataset
class ChoraleDataset(MusicDataset):
"""
Class for all chorale-like datasets
"""
def __init__(self,
corpus_it_gen,
name,
voice_ids,
metadatas=None,
sequences_size=8,
subdivision=4,
cache_dir=None):
"""
:param corpus_it_gen: calling this function returns an iterator
over chorales (as music21 scores)
:param name: name of the dataset
:param voice_ids: list of voice_indexes to be used
:param metadatas: list[Metadata], the list of used metadatas
:param sequences_size: in beats
:param subdivision: number of sixteenth notes per beat
:param cache_dir: directory where tensor_dataset is stored
"""
super(ChoraleDataset, self).__init__(cache_dir=cache_dir)
self.voice_ids = voice_ids
# TODO WARNING voice_ids is never used!
self.num_voices = len(voice_ids)
self.name = name
self.sequences_size = sequences_size
self.index2note_dicts = None
self.note2index_dicts = None
self.corpus_it_gen = corpus_it_gen
self.voice_ranges = None # in midi pitch
self.metadatas = metadatas
self.subdivision = subdivision
def __repr__(self):
return f'ChoraleDataset(' \
f'{self.voice_ids},' \
f'{self.name},' \
f'{[metadata.name for metadata in self.metadatas]},' \
f'{self.sequences_size},' \
f'{self.subdivision})'
def iterator_gen(self):
return (chorale
for chorale in self.corpus_it_gen()
if self.is_valid(chorale)
)
def make_tensor_dataset(self):
"""
Implementation of the make_tensor_dataset abstract base class
"""
# todo check on chorale with Chord
print('Making tensor dataset')
self.compute_index_dicts()
self.compute_voice_ranges()
one_tick = 1 / self.subdivision
chorale_tensor_dataset = []
metadata_tensor_dataset = []
for chorale_id, chorale in tqdm(enumerate(self.iterator_gen())):
# precompute all possible transpositions and corresponding metadatas
chorale_transpositions = {}
metadatas_transpositions = {}
# main loop
for offsetStart in np.arange(
chorale.flat.lowestOffset -
(self.sequences_size - one_tick),
chorale.flat.highestOffset,
one_tick):
offsetEnd = offsetStart + self.sequences_size
current_subseq_ranges = self.voice_range_in_subsequence(
chorale,
offsetStart=offsetStart,
offsetEnd=offsetEnd)
transposition = self.min_max_transposition(current_subseq_ranges)
min_transposition_subsequence, max_transposition_subsequence = transposition
for semi_tone in range(min_transposition_subsequence,
max_transposition_subsequence + 1):
start_tick = int(offsetStart * self.subdivision)
end_tick = int(offsetEnd * self.subdivision)
try:
# compute transpositions lazily
if semi_tone not in chorale_transpositions:
(chorale_tensor,
metadata_tensor) = self.transposed_score_and_metadata_tensors(
chorale,
semi_tone=semi_tone)
chorale_transpositions.update(
{semi_tone:
chorale_tensor})
metadatas_transpositions.update(
{semi_tone:
metadata_tensor})
else:
chorale_tensor = chorale_transpositions[semi_tone]
metadata_tensor = metadatas_transpositions[semi_tone]
local_chorale_tensor = self.extract_score_tensor_with_padding(
chorale_tensor,
start_tick, end_tick)
local_metadata_tensor = self.extract_metadata_with_padding(
metadata_tensor,
start_tick, end_tick)
# append and add batch dimension
# cast to int
chorale_tensor_dataset.append(
local_chorale_tensor[None, :, :].int())
metadata_tensor_dataset.append(
local_metadata_tensor[None, :, :, :].int())
except KeyError:
# some problems may occur with the key analyzer
print(f'KeyError with chorale {chorale_id}')
chorale_tensor_dataset = torch.cat(chorale_tensor_dataset, 0)
metadata_tensor_dataset = torch.cat(metadata_tensor_dataset, 0)
dataset = TensorDataset(chorale_tensor_dataset,
metadata_tensor_dataset)
print(f'Sizes: {chorale_tensor_dataset.size()}, {metadata_tensor_dataset.size()}')
return dataset
def transposed_score_and_metadata_tensors(self, score, semi_tone):
"""
Convert chorale to a couple (chorale_tensor, metadata_tensor),
the original chorale is transposed semi_tone number of semi-tones
:param chorale: music21 object
:param semi_tone:
:return: couple of tensors
"""
# transpose
# compute the most "natural" interval given a number of semi-tones
interval_type, interval_nature = interval.convertSemitoneToSpecifierGeneric(
semi_tone)
transposition_interval = interval.Interval(
str(interval_nature) + interval_type)
chorale_tranposed = score.transpose(transposition_interval)
chorale_tensor = self.get_score_tensor(
chorale_tranposed,
offsetStart=0.,
offsetEnd=chorale_tranposed.flat.highestTime)
metadatas_transposed = self.get_metadata_tensor(chorale_tranposed)
return chorale_tensor, metadatas_transposed
def get_metadata_tensor(self, score):
"""
Adds also the index of the voices
:param score: music21 stream
:return:tensor (num_voices, chorale_length, len(self.metadatas) + 1)
"""
md = []
if self.metadatas:
for metadata in self.metadatas:
sequence_metadata = torch.from_numpy(
metadata.evaluate(score, self.subdivision)).long().clone()
square_metadata = sequence_metadata.repeat(self.num_voices, 1)
md.append(
square_metadata[:, :, None]
)
chorale_length = int(score.duration.quarterLength * self.subdivision)
# add voice indexes
voice_id_metada = torch.from_numpy(np.arange(self.num_voices)).long().clone()
square_metadata = torch.transpose(voice_id_metada.repeat(chorale_length, 1),
0, 1)
md.append(square_metadata[:, :, None])
all_metadata = torch.cat(md, 2)
return all_metadata
def set_fermatas(self, metadata_tensor, fermata_tensor):
"""
Impose fermatas for all chorales in a batch
:param metadata_tensor: a (batch_size, sequences_size, num_metadatas)
tensor
:param fermata_tensor: a (sequences_size) binary tensor
"""
if self.metadatas:
for metadata_index, metadata in enumerate(self.metadatas):
if isinstance(metadata, FermataMetadata):
# uses broadcasting
metadata_tensor[:, :, metadata_index] = fermata_tensor
break
return metadata_tensor
def add_fermata(self, metadata_tensor, time_index_start, time_index_stop):
"""
Shorthand function to impose a fermata between two time indexes
"""
fermata_tensor = torch.zeros(self.sequences_size)
fermata_tensor[time_index_start:time_index_stop] = 1
metadata_tensor = self.set_fermatas(metadata_tensor, fermata_tensor)
return metadata_tensor
def min_max_transposition(self, current_subseq_ranges):
if current_subseq_ranges is None:
# todo might be too restrictive
# there is no note in one part
transposition = (0, 0) # min and max transpositions
else:
transpositions = [
(min_pitch_corpus - min_pitch_current,
max_pitch_corpus - max_pitch_current)
for ((min_pitch_corpus, max_pitch_corpus),
(min_pitch_current, max_pitch_current))
in zip(self.voice_ranges, current_subseq_ranges)
]
transpositions = [min_or_max_transposition
for min_or_max_transposition in zip(*transpositions)]
transposition = [max(transpositions[0]),
min(transpositions[1])]
return transposition
def get_score_tensor(self, score, offsetStart, offsetEnd):
chorale_tensor = []
for part_id, part in enumerate(score.parts[:self.num_voices]):
part_tensor = self.part_to_tensor(part, part_id,
offsetStart=offsetStart,
offsetEnd=offsetEnd)
chorale_tensor.append(part_tensor)
return torch.cat(chorale_tensor, 0)
def part_to_tensor(self, part, part_id, offsetStart, offsetEnd):
"""
:param part:
:param part_id:
:param offsetStart:
:param offsetEnd:
:return: torch IntTensor (1, length)
"""
list_notes_and_rests = list(part.flat.getElementsByOffset(
offsetStart=offsetStart,
offsetEnd=offsetEnd,
classList=[music21.note.Note,
music21.note.Rest]))
list_note_strings_and_pitches = [(n.nameWithOctave, n.pitch.midi)
for n in list_notes_and_rests
if n.isNote]
length = int((offsetEnd - offsetStart) * self.subdivision) # in ticks
# add entries to dictionaries if not present
# should only be called by make_dataset when transposing
note2index = self.note2index_dicts[part_id]
index2note = self.index2note_dicts[part_id]
voice_range = self.voice_ranges[part_id]
min_pitch, max_pitch = voice_range
for note_name, pitch in list_note_strings_and_pitches:
# if out of range
if pitch < min_pitch or pitch > max_pitch:
note_name = OUT_OF_RANGE
if note_name not in note2index:
new_index = len(note2index)
index2note.update({new_index: note_name})
note2index.update({note_name: new_index})
print('Warning: Entry ' + str(
{new_index: note_name}) + ' added to dictionaries')
# construct sequence
j = 0
i = 0
t = np.zeros((length, 2))
is_articulated = True
num_notes = len(list_notes_and_rests)
while i < length:
if j < num_notes - 1:
if (list_notes_and_rests[j + 1].offset > i
/ self.subdivision + offsetStart):
t[i, :] = [note2index[standard_name(list_notes_and_rests[j],
voice_range=voice_range)],
is_articulated]
i += 1
is_articulated = False
else:
j += 1
is_articulated = True
else:
t[i, :] = [note2index[standard_name(list_notes_and_rests[j],
voice_range=voice_range)],
is_articulated]
i += 1
is_articulated = False
seq = t[:, 0] * t[:, 1] + (1 - t[:, 1]) * note2index[SLUR_SYMBOL]
tensor = torch.from_numpy(seq).long()[None, :]
return tensor
def voice_range_in_subsequence(self, chorale, offsetStart, offsetEnd):
"""
returns None if no note present in one of the voices -> no transposition
:param chorale:
:param offsetStart:
:param offsetEnd:
:return:
"""
voice_ranges = []
for part in chorale.parts[:self.num_voices]:
voice_range_part = self.voice_range_in_part(part,
offsetStart=offsetStart,
offsetEnd=offsetEnd)
if voice_range_part is None:
return None
else:
voice_ranges.append(voice_range_part)
return voice_ranges
def voice_range_in_part(self, part, offsetStart, offsetEnd):
notes_in_subsequence = part.flat.getElementsByOffset(
offsetStart,
offsetEnd,
includeEndBoundary=False,
mustBeginInSpan=True,
mustFinishInSpan=False,
classList=[music21.note.Note,
music21.note.Rest])
midi_pitches_part = [
n.pitch.midi
for n in notes_in_subsequence
if n.isNote
]
if len(midi_pitches_part) > 0:
return min(midi_pitches_part), max(midi_pitches_part)
else:
return None
def compute_index_dicts(self):
print('Computing index dicts')
self.index2note_dicts = [
{} for _ in range(self.num_voices)
]
self.note2index_dicts = [
{} for _ in range(self.num_voices)
]
# create and add additional symbols
note_sets = [set() for _ in range(self.num_voices)]
for note_set in note_sets:
note_set.add(SLUR_SYMBOL)
note_set.add(START_SYMBOL)
note_set.add(END_SYMBOL)
note_set.add(REST_SYMBOL)
# get all notes: used for computing pitch ranges
for chorale in tqdm(self.iterator_gen()):
for part_id, part in enumerate(chorale.parts[:self.num_voices]):
for n in part.flat.notesAndRests:
note_sets[part_id].add(standard_name(n))
# create tables
for note_set, index2note, note2index in zip(note_sets,
self.index2note_dicts,
self.note2index_dicts):
for note_index, note in enumerate(note_set):
index2note.update({note_index: note})
note2index.update({note: note_index})
def is_valid(self, chorale):
# We only consider 4-part chorales
if not len(chorale.parts) == 4:
return False
# todo contains chord
return True
def compute_voice_ranges(self):
assert self.index2note_dicts is not None
assert self.note2index_dicts is not None
self.voice_ranges = []
print('Computing voice ranges')
for voice_index, note2index in tqdm(enumerate(self.note2index_dicts)):
notes = [
standard_note(note_string)
for note_string in note2index
]
midi_pitches = [
n.pitch.midi
for n in notes
if n.isNote
]
min_midi, max_midi = min(midi_pitches), max(midi_pitches)
self.voice_ranges.append((min_midi, max_midi))
def extract_score_tensor_with_padding(self, tensor_score, start_tick, end_tick):
"""
:param tensor_chorale: (num_voices, length in ticks)
:param start_tick:
:param end_tick:
:return: tensor_chorale[:, start_tick: end_tick]
with padding if necessary
i.e. if start_tick < 0 or end_tick > tensor_chorale length
"""
assert start_tick < end_tick
assert end_tick > 0
length = tensor_score.size()[1]
padded_chorale = []
# todo add PAD_SYMBOL
if start_tick < 0:
start_symbols = np.array([note2index[START_SYMBOL]
for note2index in self.note2index_dicts])
start_symbols = torch.from_numpy(start_symbols).long().clone()
start_symbols = start_symbols.repeat(-start_tick, 1).transpose(0, 1)
padded_chorale.append(start_symbols)
slice_start = start_tick if start_tick > 0 else 0
slice_end = end_tick if end_tick < length else length
padded_chorale.append(tensor_score[:, slice_start: slice_end])
if end_tick > length:
end_symbols = np.array([note2index[END_SYMBOL]
for note2index in self.note2index_dicts])
end_symbols = torch.from_numpy(end_symbols).long().clone()
end_symbols = end_symbols.repeat(end_tick - length, 1).transpose(0, 1)
padded_chorale.append(end_symbols)
padded_chorale = torch.cat(padded_chorale, 1)
return padded_chorale
def extract_metadata_with_padding(self, tensor_metadata,
start_tick, end_tick):
"""
:param tensor_metadata: (num_voices, length, num_metadatas)
last metadata is the voice_index
:param start_tick:
:param end_tick:
:return:
"""
assert start_tick < end_tick
assert end_tick > 0
num_voices, length, num_metadatas = tensor_metadata.size()
padded_tensor_metadata = []
if start_tick < 0:
# TODO more subtle padding
start_symbols = np.zeros((self.num_voices, -start_tick, num_metadatas))
start_symbols = torch.from_numpy(start_symbols).long().clone()
padded_tensor_metadata.append(start_symbols)
slice_start = start_tick if start_tick > 0 else 0
slice_end = end_tick if end_tick < length else length
padded_tensor_metadata.append(tensor_metadata[:, slice_start: slice_end, :])
if end_tick > length:
end_symbols = np.zeros((self.num_voices, end_tick - length, num_metadatas))
end_symbols = torch.from_numpy(end_symbols).long().clone()
padded_tensor_metadata.append(end_symbols)
padded_tensor_metadata = torch.cat(padded_tensor_metadata, 1)
return padded_tensor_metadata
def empty_score_tensor(self, score_length):
start_symbols = np.array([note2index[START_SYMBOL]
for note2index in self.note2index_dicts])
start_symbols = torch.from_numpy(start_symbols).long().clone()
start_symbols = start_symbols.repeat(score_length, 1).transpose(0, 1)
return start_symbols
def random_score_tensor(self, score_length):
chorale_tensor = np.array(
[np.random.randint(len(note2index),
size=score_length)
for note2index in self.note2index_dicts])
chorale_tensor = torch.from_numpy(chorale_tensor).long().clone()
return chorale_tensor
def tensor_to_score(self, tensor_score,
fermata_tensor=None):
"""
:param tensor_score: (num_voices, length)
:return: music21 score object
"""
slur_indexes = [note2index[SLUR_SYMBOL]
for note2index in self.note2index_dicts]
score = music21.stream.Score()
num_voices = tensor_score.size(0)
name_parts = (num_voices == 4)
part_names = ['Soprano', 'Alto', 'Tenor', 'Bass']
for voice_index, (voice, index2note, slur_index) in enumerate(
zip(tensor_score,
self.index2note_dicts,
slur_indexes)):
add_fermata = False
if name_parts:
part = stream.Part(id=part_names[voice_index],
partName=part_names[voice_index],
partAbbreviation=part_names[voice_index],
instrumentName=part_names[voice_index])
else:
part = stream.Part(id='part' + str(voice_index))
dur = 0
total_duration = 0
f = music21.note.Rest()
for note_index in [n.item() for n in voice]:
# if it is a played note
if not note_index == slur_indexes[voice_index]:
# add previous note
if dur > 0:
f.duration = music21.duration.Duration(dur / self.subdivision)
if add_fermata:
f.expressions.append(music21.expressions.Fermata())
add_fermata = False
part.append(f)
dur = 1
f = standard_note(index2note[note_index])
if fermata_tensor is not None and voice_index == 0:
if fermata_tensor[0, total_duration] == 1:
add_fermata = True
else:
add_fermata = False
total_duration += 1
else:
dur += 1
total_duration += 1
# add last note
f.duration = music21.duration.Duration(dur / self.subdivision)
if add_fermata:
f.expressions.append(music21.expressions.Fermata())
add_fermata = False
part.append(f)
score.insert(part)
return score
# TODO should go in ChoraleDataset
# TODO all subsequences start on a beat
class ChoraleBeatsDataset(ChoraleDataset):
def __repr__(self):
return f'ChoraleBeatsDataset(' \
f'{self.voice_ids},' \
f'{self.name},' \
f'{[metadata.name for metadata in self.metadatas]},' \
f'{self.sequences_size},' \
f'{self.subdivision})'
def make_tensor_dataset(self):
"""
Implementation of the make_tensor_dataset abstract base class
"""
# todo check on chorale with Chord
print('Making tensor dataset')
self.compute_index_dicts()
self.compute_voice_ranges()
one_beat = 1.
chorale_tensor_dataset = []
metadata_tensor_dataset = []
for chorale_id, chorale in tqdm(enumerate(self.iterator_gen())):
# precompute all possible transpositions and corresponding metadatas
chorale_transpositions = {}
metadatas_transpositions = {}
# main loop
for offsetStart in np.arange(
chorale.flat.lowestOffset -
(self.sequences_size - one_beat),
chorale.flat.highestOffset,
one_beat):
offsetEnd = offsetStart + self.sequences_size
current_subseq_ranges = self.voice_range_in_subsequence(
chorale,
offsetStart=offsetStart,
offsetEnd=offsetEnd)
transposition = self.min_max_transposition(current_subseq_ranges)
min_transposition_subsequence, max_transposition_subsequence = transposition
for semi_tone in range(min_transposition_subsequence,
max_transposition_subsequence + 1):
start_tick = int(offsetStart * self.subdivision)
end_tick = int(offsetEnd * self.subdivision)
try:
# compute transpositions lazily
if semi_tone not in chorale_transpositions:
(chorale_tensor,
metadata_tensor) = self.transposed_score_and_metadata_tensors(
chorale,
semi_tone=semi_tone)
chorale_transpositions.update(
{semi_tone:
chorale_tensor})
metadatas_transpositions.update(
{semi_tone:
metadata_tensor})
else:
chorale_tensor = chorale_transpositions[semi_tone]
metadata_tensor = metadatas_transpositions[semi_tone]
local_chorale_tensor = self.extract_score_tensor_with_padding(
chorale_tensor,
start_tick, end_tick)
local_metadata_tensor = self.extract_metadata_with_padding(
metadata_tensor,
start_tick, end_tick)
# append and add batch dimension
# cast to int
chorale_tensor_dataset.append(
local_chorale_tensor[None, :, :].int())
metadata_tensor_dataset.append(
local_metadata_tensor[None, :, :, :].int())
except KeyError:
# some problems may occur with the key analyzer
print(f'KeyError with chorale {chorale_id}')
chorale_tensor_dataset = torch.cat(chorale_tensor_dataset, 0)
metadata_tensor_dataset = torch.cat(metadata_tensor_dataset, 0)
dataset = TensorDataset(chorale_tensor_dataset,
metadata_tensor_dataset)
print(f'Sizes: {chorale_tensor_dataset.size()}, {metadata_tensor_dataset.size()}')
return dataset
|
|
#!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import warnings
from flaky import flaky
import os
import pytest
from random import choice
import time
import unittest
import lastfm_source.pylast.pylast as pylast
def load_secrets():
secrets_file = "test_pylast.yaml"
if os.path.isfile(secrets_file):
import yaml # pip install pyyaml
with open(secrets_file, "r") as f: # see example_test_pylast.yaml
doc = yaml.load(f)
else:
doc = {}
try:
doc["username"] = os.environ['PYLAST_USERNAME'].strip()
doc["password_hash"] = os.environ['PYLAST_PASSWORD_HASH'].strip()
doc["api_key"] = os.environ['PYLAST_API_KEY'].strip()
doc["api_secret"] = os.environ['PYLAST_API_SECRET'].strip()
except KeyError:
pytest.skip("Missing environment variables: PYLAST_USERNAME etc.")
return doc
def handle_lastfm_exceptions(f):
"""Skip exceptions caused by Last.fm's broken API"""
def wrapper(*args, **kw):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DeprecationWarning)
try:
result = f(*args, **kw)
if not len(w):
return result
else:
print(w[-1])
pytest.skip('No longer deprecated!')
except pylast.WSError as e:
if not len(w):
if (str(e) == "Invalid Method - "
"No method with that name in this package"):
msg = "Ignore broken Last.fm API: " + str(e)
print(msg)
pytest.skip(msg)
else:
raise(e)
else:
print(w[-1])
print(e)
except Exception as e:
if not len(w):
print(e)
raise(e)
else:
print(w[-1])
print(e)
return wrapper
@flaky(max_runs=5, min_passes=1)
class TestPyLast(unittest.TestCase):
secrets = None
def unix_timestamp(self):
return int(time.time())
def setUp(self):
if self.__class__.secrets is None:
self.__class__.secrets = load_secrets()
self.username = self.__class__.secrets["username"]
password_hash = self.__class__.secrets["password_hash"]
API_KEY = self.__class__.secrets["api_key"]
API_SECRET = self.__class__.secrets["api_secret"]
self.network = pylast.LastFMNetwork(
api_key=API_KEY, api_secret=API_SECRET,
username=self.username, password_hash=password_hash)
def skip_if_lastfm_api_broken(self, value):
"""Skip things not yet restored in Last.fm's broken API"""
if value is None or len(value) == 0:
raise Exception("Last.fm API is broken.")
@handle_lastfm_exceptions
def test_scrobble(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
timestamp = self.unix_timestamp()
lastfm_user = self.network.get_user(self.username)
# Act
self.network.scrobble(artist=artist, title=title, timestamp=timestamp)
# Assert
# limit=2 to ignore now-playing:
last_scrobble = lastfm_user.get_recent_tracks(limit=2)[0]
self.assertEqual(str(last_scrobble.track.artist), str(artist))
self.assertEqual(str(last_scrobble.track.title), str(title))
self.assertEqual(str(last_scrobble.timestamp), str(timestamp))
@handle_lastfm_exceptions
def test_unscrobble(self):
# Arrange
artist = "Test Artist 2"
title = "Test Title 2"
timestamp = self.unix_timestamp()
library = pylast.Library(user=self.username, network=self.network)
self.network.scrobble(artist=artist, title=title, timestamp=timestamp)
lastfm_user = self.network.get_user(self.username)
# Act
library.remove_scrobble(
artist=artist, title=title, timestamp=timestamp)
# Assert
# limit=2 to ignore now-playing:
last_scrobble = lastfm_user.get_recent_tracks(limit=2)[0]
self.assertNotEqual(str(last_scrobble.timestamp), str(timestamp))
@handle_lastfm_exceptions
def test_add_album(self):
# Arrange
library = pylast.Library(user=self.username, network=self.network)
album = self.network.get_album("Test Artist", "Test Album")
# Act
library.add_album(album)
# Assert
my_albums = library.get_albums()
for my_album in my_albums:
value = (album == my_album[0])
if value:
break
self.assertTrue(value)
@handle_lastfm_exceptions
def test_remove_album(self):
# Arrange
library = pylast.Library(user=self.username, network=self.network)
# Pick an artist with plenty of albums
artist = self.network.get_top_artists(limit=1)[0].item
albums = artist.get_top_albums()
# Pick a random one to avoid problems running concurrent tests
album = choice(albums)[0]
library.add_album(album)
# Act
library.remove_album(album)
# Assert
my_albums = library.get_albums()
for my_album in my_albums:
value = (album == my_album[0])
if value:
break
self.assertFalse(value)
@handle_lastfm_exceptions
def test_add_artist(self):
# Arrange
artist = "Test Artist 2"
library = pylast.Library(user=self.username, network=self.network)
# Act
library.add_artist(artist)
# Assert
artists = library.get_artists()
for artist in artists:
value = (str(artist[0]) == "Test Artist 2")
if value:
break
self.assertTrue(value)
@handle_lastfm_exceptions
def test_remove_artist(self):
# Arrange
# Get plenty of artists
artists = self.network.get_top_artists()
# Pick a random one to avoid problems running concurrent tests
my_artist = choice(artists).item
library = pylast.Library(user=self.username, network=self.network)
library.add_artist(my_artist)
# Act
library.remove_artist(my_artist)
# Assert
artists = library.get_artists()
for artist in artists:
value = (artist[0] == my_artist)
if value:
break
self.assertFalse(value)
@handle_lastfm_exceptions
def test_get_venue(self):
# Arrange
venue_name = "Last.fm Office"
country_name = "United Kingdom"
# Act
venue_search = self.network.search_for_venue(venue_name, country_name)
venue = venue_search.get_next_page()[0]
# Assert
self.assertEqual(str(venue.id), "8778225")
@handle_lastfm_exceptions
def test_get_user_registration(self):
# Arrange
username = "RJ"
user = self.network.get_user(username)
# Act
registered = user.get_registered()
# Assert
# Last.fm API broken? Should be yyyy-mm-dd not Unix timestamp
if int(registered):
pytest.skip(f"Should be yyyy-mm-dd: {registered}")
# Just check date because of timezones
self.assertIn(u"2002-11-20 ", registered)
@handle_lastfm_exceptions
def test_get_user_unixtime_registration(self):
# Arrange
username = "RJ"
user = self.network.get_user(username)
# Act
unixtime_registered = user.get_unixtime_registered()
# Assert
# Just check date because of timezones
self.assertEqual(unixtime_registered, u"1037793040")
# @handle_lastfm_exceptions
# def test_get_genderless_user(self):
# # Arrange
# # Currently test_user has no gender set:
# lastfm_user = self.network.get_user("test_user")
#
# # Act
# gender = lastfm_user.get_gender()
#
# # Assert
# self.assertIsNone(gender)
@handle_lastfm_exceptions
def test_get_gender(self):
# Arrange
lastfm_user = self.network.get_user('micemusculus')
# Act
gender = lastfm_user.get_gender()
# Assert
self.assertIn(gender,{pylast.MALE, pylast.FEMALE})
@handle_lastfm_exceptions
def test_get_age(self):
# Arrange
lastfm_user = self.network.get_user('micemusculus')
# Act
age = lastfm_user.get_age()
# Assert
self.assertGreater(age, 0)
@handle_lastfm_exceptions
def test_get_countryless_user(self):
# Arrange
# Currently test_user has no country set:
lastfm_user = self.network.get_user("test_user")
# Act
country = lastfm_user.get_country()
# Assert
self.assertIsNone(country)
@handle_lastfm_exceptions
def test_love(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
track = self.network.get_track(artist, title)
lastfm_user = self.network.get_user(self.username)
# Act
track.love()
# Assert
loved = lastfm_user.get_loved_tracks(limit=1)
self.assertEqual(str(loved[0].track.artist), "Test Artist")
self.assertEqual(str(loved[0].track.title), "Test Title")
@handle_lastfm_exceptions
def test_unlove(self):
# Arrange
artist = pylast.Artist("Test Artist", self.network)
title = "Test Title"
track = pylast.Track(artist, title, self.network)
lastfm_user = self.network.get_user(self.username)
track.love()
# Act
track.unlove()
# Assert
loved = lastfm_user.get_loved_tracks(limit=1)
if len(loved): # OK to be empty but if not:
self.assertNotEqual(str(loved[0].track.artist), "Test Artist")
self.assertNotEqual(str(loved[0].track.title), "Test Title")
@handle_lastfm_exceptions
def test_get_100_albums(self):
# Arrange
library = pylast.Library(user=self.username, network=self.network)
# Act
albums = library.get_albums(limit=100)
# Assert
self.assertGreaterEqual(len(albums), 0)
@handle_lastfm_exceptions
def test_get_limitless_albums(self):
# Arrange
library = pylast.Library(user=self.username, network=self.network)
# Act
albums = library.get_albums(limit=None)
# Assert
self.assertGreaterEqual(len(albums), 0)
@handle_lastfm_exceptions
def test_user_equals_none(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
# Act
value = (lastfm_user is None)
# Assert
self.assertFalse(value)
@handle_lastfm_exceptions
def test_user_not_equal_to_none(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
# Act
value = (lastfm_user is not None)
# Assert
self.assertTrue(value)
@handle_lastfm_exceptions
def test_now_playing_user_with_no_scrobbles(self):
# Arrange
# Currently test-account has no scrobbles:
user = self.network.get_user('test-account')
# Act
current_track = user.get_now_playing()
# Assert
self.assertIsNone(current_track)
@handle_lastfm_exceptions
def test_love_limits(self):
# Arrange
# Currently test-account has at least 23 loved tracks:
user = self.network.get_user("test-user")
# Act/Assert
self.assertEqual(len(user.get_loved_tracks(limit=20)), 20)
self.assertLessEqual(len(user.get_loved_tracks(limit=100)), 100)
self.assertGreaterEqual(len(user.get_loved_tracks(limit=None)), 23)
self.assertGreaterEqual(len(user.get_loved_tracks(limit=0)), 23)
@handle_lastfm_exceptions
def test_update_now_playing(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
album = "Test Album"
track_number = 1
lastfm_user = self.network.get_user(self.username)
# Act
self.network.update_now_playing(
artist=artist, title=title, album=album, track_number=track_number)
# Assert
current_track = lastfm_user.get_now_playing()
self.assertIsNotNone(current_track)
self.assertEqual(str(current_track.title), "Test Title")
self.assertEqual(str(current_track.artist), "Test Artist")
@handle_lastfm_exceptions
def test_album_tags_are_topitems(self):
# Arrange
albums = self.network.get_user('RJ').get_top_albums()
# Act
tags = albums[0].item.get_top_tags(limit=1)
# Assert
self.assertGreater(len(tags), 0)
self.assertIsInstance(tags[0], pylast.TopItem)
def helper_is_thing_hashable(self, thing):
# Arrange
things = set()
# Act
things.add(thing)
# Assert
self.assertIsNotNone(thing)
self.assertEqual(len(things), 1)
@handle_lastfm_exceptions
def test_album_is_hashable(self):
# Arrange
album = self.network.get_album("Test Artist", "Test Album")
# Act/Assert
self.helper_is_thing_hashable(album)
@handle_lastfm_exceptions
def test_artist_is_hashable(self):
# Arrange
test_artist = self.network.get_artist("Test Artist")
artist = test_artist.get_similar(limit=2)[0].item
self.assertIsInstance(artist, pylast.Artist)
# Act/Assert
self.helper_is_thing_hashable(artist)
@handle_lastfm_exceptions
def test_country_is_hashable(self):
# Arrange
country = self.network.get_country("Italy")
# Act/Assert
self.helper_is_thing_hashable(country)
@handle_lastfm_exceptions
def test_metro_is_hashable(self):
# Arrange
metro = self.network.get_metro("Helsinki", "Finland")
# Act/Assert
self.helper_is_thing_hashable(metro)
@handle_lastfm_exceptions
def test_event_is_hashable(self):
# Arrange
user = self.network.get_user("RJ")
event = user.get_past_events(limit=1)[0]
# Act/Assert
self.helper_is_thing_hashable(event)
@handle_lastfm_exceptions
def test_group_is_hashable(self):
# Arrange
group = self.network.get_group("Audioscrobbler Beta")
# Act/Assert
self.helper_is_thing_hashable(group)
@handle_lastfm_exceptions
def test_library_is_hashable(self):
# Arrange
library = pylast.Library(user=self.username, network=self.network)
# Act/Assert
self.helper_is_thing_hashable(library)
@handle_lastfm_exceptions
def test_playlist_is_hashable(self):
# Arrange
playlist = pylast.Playlist(
user="RJ", playlist_id="1k1qp_doglist", network=self.network)
# Act/Assert
self.helper_is_thing_hashable(playlist)
@handle_lastfm_exceptions
def test_tag_is_hashable(self):
# Arrange
tag = self.network.get_top_tags(limit=1)[0]
# Act/Assert
self.helper_is_thing_hashable(tag)
@handle_lastfm_exceptions
def test_track_is_hashable(self):
# Arrange
artist = self.network.get_artist("Test Artist")
track = artist.get_top_tracks()[0].item
self.assertIsInstance(track, pylast.Track)
# Act/Assert
self.helper_is_thing_hashable(track)
@handle_lastfm_exceptions
def test_user_is_hashable(self):
# Arrange
# artist = self.network.get_artist("Test Artist")
# user = artist.get_top_fans(limit=1)[0].item
user = self.network.get_user('micemusculus')
self.assertIsInstance(user, pylast.User)
# Act/Assert
self.helper_is_thing_hashable(user)
@handle_lastfm_exceptions
def test_venue_is_hashable(self):
# Arrange
venue_id = "8778225" # Last.fm office
venue = pylast.Venue(venue_id, self.network)
# Act/Assert
self.helper_is_thing_hashable(venue)
@handle_lastfm_exceptions
def test_xspf_is_hashable(self):
# Arrange
xspf = pylast.XSPF(
uri="lastfm://playlist/1k1qp_doglist", network=self.network)
# Act/Assert
self.helper_is_thing_hashable(xspf)
@handle_lastfm_exceptions
def test_invalid_xml(self):
# Arrange
# Currently causes PCDATA invalid Char value 25
artist = "Blind Willie Johnson"
title = "It's nobody's fault but mine"
# Act
search = self.network.search_for_track(artist, title)
total = search.get_total_result_count()
# Assert
self.skip_if_lastfm_api_broken(total)
self.assertGreaterEqual(int(total), 0)
@handle_lastfm_exceptions
def test_user_play_count_in_track_info(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
track = pylast.Track(
artist=artist, title=title,
network=self.network, username=self.username)
# Act
count = track.get_userplaycount()
# Assert
self.assertGreaterEqual(count, 0)
@handle_lastfm_exceptions
def test_user_loved_in_track_info(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
track = pylast.Track(
artist=artist, title=title,
network=self.network, username=self.username)
# Act
loved = track.get_userloved()
# Assert
self.assertIsNotNone(loved)
self.assertIsInstance(loved, bool)
self.assertNotIsInstance(loved, str)
@handle_lastfm_exceptions
def test_album_in_recent_tracks(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
# Act
# limit=2 to ignore now-playing:
track = lastfm_user.get_recent_tracks(limit=2)[0]
# Assert
self.assertTrue(hasattr(track, 'album'))
@handle_lastfm_exceptions
def test_album_in_artist_tracks(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
# Act
track = lastfm_user.get_artist_tracks(artist="Test Artist")[0]
# Assert
self.assertTrue(hasattr(track, 'album'))
@handle_lastfm_exceptions
def test_enable_rate_limiting(self):
# Arrange
self.assertTrue(self.network.is_rate_limited())
# Act
self.network.enable_rate_limit()
then = time.time()
# Make some network call, limit not applied first time
self.network.get_user(self.username)
# Make a second network call, limiting should be applied
self.network.get_top_artists()
now = time.time()
# Assert
self.assertTrue(self.network.is_rate_limited())
self.assertGreaterEqual(now - then, 0.2)
@handle_lastfm_exceptions
def test_disable_rate_limiting(self):
# Arrange
self.network.enable_rate_limit()
self.assertTrue(self.network.is_rate_limited())
# Act
self.network.disable_rate_limit()
# Make some network call, limit not applied first time
self.network.get_user(self.username)
# Make a second network call, limiting should be applied
self.network.get_top_artists()
# Assert
self.assertFalse(self.network.is_rate_limited())
# Commented out because (a) it'll take a long time and (b) it strangely
# fails due Last.fm's complaining of hitting the rate limit, even when
# limited to one call per second. The ToS allows 5 calls per second.
# def test_get_all_scrobbles(self):
# # Arrange
# lastfm_user = self.network.get_user("RJ")
# self.network.enable_rate_limit() # this is going to be slow...
# # Act
# tracks = lastfm_user.get_recent_tracks(limit=None)
# # Assert
# self.assertGreaterEqual(len(tracks), 0)
def helper_past_events_have_valid_ids(self, thing):
# Act
events = thing.get_past_events()
# Assert
self.helper_assert_events_have_valid_ids(events)
def helper_upcoming_events_have_valid_ids(self, thing):
# Act
events = thing.get_upcoming_events()
# Assert
self.helper_assert_events_have_valid_ids(events)
def helper_assert_events_have_valid_ids(self, events):
# Assert
# If fails, add past/future event for user/Test Artist:
self.assertGreaterEqual(len(events), 1)
for event in events[:2]: # checking first two should be enough
self.assertIsInstance(event.get_headliner(), pylast.Artist)
@handle_lastfm_exceptions
def test_artist_upcoming_events_returns_valid_ids(self):
# Arrange
artist = pylast.Artist("Test Artist", self.network)
# Act/Assert
self.helper_upcoming_events_have_valid_ids(artist)
@handle_lastfm_exceptions
def test_user_past_events_returns_valid_ids(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
# Act/Assert
self.helper_past_events_have_valid_ids(lastfm_user)
@handle_lastfm_exceptions
def test_user_recommended_events_returns_valid_ids(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
# Act
events = lastfm_user.get_upcoming_events()
# Assert
self.helper_assert_events_have_valid_ids(events)
@handle_lastfm_exceptions
def test_user_upcoming_events_returns_valid_ids(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
# Act/Assert
self.helper_upcoming_events_have_valid_ids(lastfm_user)
@handle_lastfm_exceptions
def test_venue_past_events_returns_valid_ids(self):
# Arrange
venue_id = "8778225" # Last.fm office
venue = pylast.Venue(venue_id, self.network)
# Act/Assert
self.helper_past_events_have_valid_ids(venue)
@handle_lastfm_exceptions
def test_venue_upcoming_events_returns_valid_ids(self):
# Arrange
venue_id = "8778225" # Last.fm office
venue = pylast.Venue(venue_id, self.network)
# Act/Assert
self.helper_upcoming_events_have_valid_ids(venue)
@handle_lastfm_exceptions
def test_pickle(self):
# Arrange
import pickle
lastfm_user = self.network.get_user(self.username)
filename = str(self.unix_timestamp()) + ".pkl"
# Act
with open(filename, "wb") as f:
pickle.dump(lastfm_user, f)
with open(filename, "rb") as f:
loaded_user = pickle.load(f)
os.remove(filename)
# Assert
self.assertEqual(lastfm_user, loaded_user)
@handle_lastfm_exceptions
def test_bio_published_date(self):
# Arrange
artist = pylast.Artist("Test Artist", self.network)
# Act
bio = artist.get_bio_published_date()
# Assert
self.assertIsNotNone(bio)
self.assertGreaterEqual(len(bio), 1)
@handle_lastfm_exceptions
def test_bio_content(self):
# Arrange
artist = pylast.Artist("Test Artist", self.network)
# Act
bio = artist.get_bio_content(language="en")
# Assert
self.assertIsNotNone(bio)
self.assertGreaterEqual(len(bio), 1)
@handle_lastfm_exceptions
def test_bio_summary(self):
# Arrange
artist = pylast.Artist("Test Artist", self.network)
# Act
bio = artist.get_bio_summary(language="en")
# Assert
self.assertIsNotNone(bio)
self.assertGreaterEqual(len(bio), 1)
@handle_lastfm_exceptions
def test_album_wiki_content(self):
# Arrange
album = pylast.Album("Test Artist", "Test Album", self.network)
# Act
wiki = album.get_wiki_content()
# Assert
self.assertIsNotNone(wiki)
self.assertGreaterEqual(len(wiki), 1)
@handle_lastfm_exceptions
def test_album_wiki_published_date(self):
# Arrange
album = pylast.Album("Test Artist", "Test Album", self.network)
# Act
wiki = album.get_wiki_published_date()
# Assert
self.assertIsNotNone(wiki)
self.assertGreaterEqual(len(wiki), 1)
@handle_lastfm_exceptions
def test_album_wiki_summary(self):
# Arrange
album = pylast.Album("Test Artist", "Test Album", self.network)
# Act
wiki = album.get_wiki_summary()
# Assert
self.assertIsNotNone(wiki)
self.assertGreaterEqual(len(wiki), 1)
@handle_lastfm_exceptions
def test_track_wiki_content(self):
# Arrange
track = pylast.Track("Test Artist", "Test Title", self.network)
# Act
wiki = track.get_wiki_content()
# Assert
self.assertIsNotNone(wiki)
self.assertGreaterEqual(len(wiki), 1)
@handle_lastfm_exceptions
def test_track_wiki_summary(self):
# Arrange
track = pylast.Track("Test Artist", "Test Title", self.network)
# Act
wiki = track.get_wiki_summary()
# Assert
self.assertIsNotNone(wiki)
self.assertGreaterEqual(len(wiki), 1)
@handle_lastfm_exceptions
def test_lastfm_network_name(self):
# Act
name = str(self.network)
# Assert
self.assertEqual(name, "Last.fm Network")
def helper_validate_results(self, a, b, c):
# Assert
self.assertIsNotNone(a)
self.assertIsNotNone(b)
self.assertIsNotNone(c)
self.assertGreaterEqual(len(a), 0)
self.assertGreaterEqual(len(b), 0)
self.assertGreaterEqual(len(c), 0)
self.assertEqual(a, b)
self.assertEqual(b, c)
def helper_validate_cacheable(self, thing, function_name):
# Arrange
# get thing.function_name()
func = getattr(thing, function_name, None)
# Act
result1 = func(limit=1, cacheable=False)
result2 = func(limit=1, cacheable=True)
result3 = func(limit=1)
# Assert
self.helper_validate_results(result1, result2, result3)
@handle_lastfm_exceptions
def test_cacheable_artist_get_shouts(self):
# Arrange
artist = self.network.get_artist("Test Artist")
# Act/Assert
self.helper_validate_cacheable(artist, "get_shouts")
@handle_lastfm_exceptions
def test_cacheable_event_get_shouts(self):
# Arrange
user = self.network.get_user("RJ")
event = user.get_past_events(limit=1)[0]
# Act/Assert
self.helper_validate_cacheable(event, "get_shouts")
@handle_lastfm_exceptions
def test_cacheable_track_get_shouts(self):
# Arrange
track = self.network.get_top_tracks()[0].item
# Act/Assert
self.helper_validate_cacheable(track, "get_shouts")
@handle_lastfm_exceptions
def test_cacheable_group_get_members(self):
# Arrange
group = self.network.get_group("Audioscrobbler Beta")
# Act/Assert
self.helper_validate_cacheable(group, "get_members")
@handle_lastfm_exceptions
def test_cacheable_library(self):
# Arrange
library = pylast.Library(self.username, self.network)
# Act/Assert
self.helper_validate_cacheable(library, "get_albums")
self.helper_validate_cacheable(library, "get_artists")
self.helper_validate_cacheable(library, "get_tracks")
@handle_lastfm_exceptions
def test_cacheable_user_artist_tracks(self):
# Arrange
lastfm_user = self.network.get_authenticated_user()
# Act
result1 = lastfm_user.get_artist_tracks("Test Artist", cacheable=False)
result2 = lastfm_user.get_artist_tracks("Test Artist", cacheable=True)
result3 = lastfm_user.get_artist_tracks("Test Artist")
# Assert
self.helper_validate_results(result1, result2, result3)
@handle_lastfm_exceptions
def test_cacheable_user(self):
# Arrange
lastfm_user = self.network.get_authenticated_user()
# Act/Assert
# Skip the first one because Last.fm API is broken
# self.helper_validate_cacheable(lastfm_user, "get_friends")
self.helper_validate_cacheable(lastfm_user, "get_loved_tracks")
self.helper_validate_cacheable(lastfm_user, "get_neighbours")
self.helper_validate_cacheable(lastfm_user, "get_past_events")
self.helper_validate_cacheable(lastfm_user, "get_recent_tracks")
self.helper_validate_cacheable(lastfm_user, "get_recommended_artists")
self.helper_validate_cacheable(lastfm_user, "get_recommended_events")
self.helper_validate_cacheable(lastfm_user, "get_shouts")
@handle_lastfm_exceptions
def test_geo_get_events_in_location(self):
# Arrange
# Act
events = self.network.get_geo_events(
location="London", tag="blues", limit=1)
# Assert
self.assertEqual(len(events), 1)
event = events[0]
self.assertIsInstance(event, pylast.Event)
self.assertIn(event.get_venue().location['city'],
["London", "Camden"])
@handle_lastfm_exceptions
def test_geo_get_events_in_latlong(self):
# Arrange
# Act
events = self.network.get_geo_events(
latitude=53.466667, longitude=-2.233333, distance=5, limit=1)
# Assert
self.assertEqual(len(events), 1)
event = events[0]
self.assertIsInstance(event, pylast.Event)
self.assertEqual(event.get_venue().location['city'], "Manchester")
@handle_lastfm_exceptions
def test_geo_get_events_festival(self):
# Arrange
# Act
events = self.network.get_geo_events(
location="Reading", festivalsonly=True, limit=1)
# Assert
self.assertEqual(len(events), 1)
event = events[0]
self.assertIsInstance(event, pylast.Event)
self.assertEqual(event.get_venue().location['city'], "Reading")
def helper_dates_valid(self, dates):
# Assert
self.assertGreaterEqual(len(dates), 1)
self.assertIsInstance(dates[0], tuple)
(start, end) = dates[0]
self.assertLess(start, end)
@handle_lastfm_exceptions
def test_get_metro_weekly_chart_dates(self):
# Arrange
# Act
dates = self.network.get_metro_weekly_chart_dates()
# Assert
self.helper_dates_valid(dates)
def helper_geo_chart(self, function_name, expected_type=pylast.Artist):
# Arrange
metro = self.network.get_metro("Madrid", "Spain")
dates = self.network.get_metro_weekly_chart_dates()
(from_date, to_date) = dates[0]
# get metro.function_name()
func = getattr(metro, function_name, None)
# Act
chart = func(from_date=from_date, to_date=to_date, limit=1)
# Assert
self.assertEqual(len(chart), 1)
self.assertIsInstance(chart[0], pylast.TopItem)
self.assertIsInstance(chart[0].item, expected_type)
@handle_lastfm_exceptions
def test_get_metro_artist_chart(self):
# Arrange/Act/Assert
self.helper_geo_chart("get_artist_chart")
@handle_lastfm_exceptions
def test_get_metro_hype_artist_chart(self):
# Arrange/Act/Assert
self.helper_geo_chart("get_hype_artist_chart")
@handle_lastfm_exceptions
def test_get_metro_unique_artist_chart(self):
# Arrange/Act/Assert
self.helper_geo_chart("get_unique_artist_chart")
@handle_lastfm_exceptions
def test_get_metro_track_chart(self):
# Arrange/Act/Assert
self.helper_geo_chart("get_track_chart", expected_type=pylast.Track)
@handle_lastfm_exceptions
def test_get_metro_hype_track_chart(self):
# Arrange/Act/Assert
self.helper_geo_chart(
"get_hype_track_chart", expected_type=pylast.Track)
@handle_lastfm_exceptions
def test_get_metro_unique_track_chart(self):
# Arrange/Act/Assert
self.helper_geo_chart(
"get_unique_track_chart", expected_type=pylast.Track)
@handle_lastfm_exceptions
def test_geo_get_metros(self):
# Arrange
# Act
metros = self.network.get_metros(country="Poland")
# Assert
self.assertGreaterEqual(len(metros), 1)
self.assertIsInstance(metros[0], pylast.Metro)
self.assertEqual(metros[0].get_country(), "Poland")
@handle_lastfm_exceptions
def test_geo_get_top_artists(self):
# Arrange
# Act
artists = self.network.get_geo_top_artists(
country="United Kingdom", limit=1)
# Assert
self.assertEqual(len(artists), 1)
self.assertIsInstance(artists[0], pylast.TopItem)
self.assertIsInstance(artists[0].item, pylast.Artist)
@handle_lastfm_exceptions
def test_geo_get_top_tracks(self):
# Arrange
# Act
tracks = self.network.get_geo_top_tracks(
country="United Kingdom", location="Manchester", limit=1)
# Assert
self.assertEqual(len(tracks), 1)
self.assertIsInstance(tracks[0], pylast.TopItem)
self.assertIsInstance(tracks[0].item, pylast.Track)
@handle_lastfm_exceptions
def test_metro_class(self):
# Arrange
# Act
metro = self.network.get_metro("Bergen", "Norway")
# Assert
self.assertEqual(metro.get_name(), "Bergen")
self.assertEqual(metro.get_country(), "Norway")
self.assertEqual(str(metro), "Bergen, Norway")
self.assertEqual(metro, pylast.Metro("Bergen", "Norway", self.network))
self.assertNotEqual(
metro,
pylast.Metro("Wellington", "New Zealand", self.network))
@handle_lastfm_exceptions
def test_get_album_play_links(self):
# Arrange
album1 = self.network.get_album("Portishead", "Dummy")
album2 = self.network.get_album("Radiohead", "OK Computer")
albums = [album1, album2]
# Act
links = self.network.get_album_play_links(albums)
# Assert
self.assertIsInstance(links, list)
self.assertEqual(len(links), 2)
self.assertIn("spotify:album:", links[0])
self.assertIn("spotify:album:", links[1])
@handle_lastfm_exceptions
def test_get_artist_play_links(self):
# Arrange
artists = ["Portishead", "Radiohead"]
# Act
links = self.network.get_artist_play_links(artists)
# Assert
self.assertIsInstance(links, list)
self.assertEqual(len(links), 2)
self.assertIn("spotify:artist:", links[0])
self.assertIn("spotify:artist:", links[1])
@handle_lastfm_exceptions
def test_get_track_play_links(self):
# Arrange
track1 = self.network.get_track(artist="Portishead", title="Mysterons")
track2 = self.network.get_track(artist="Radiohead", title="Creep")
tracks = [track1, track2]
# Act
links = self.network.get_track_play_links(tracks)
# Assert
self.assertIsInstance(links, list)
self.assertEqual(len(links), 2)
self.assertIn("spotify:track:", links[0])
self.assertIn("spotify:track:", links[1])
def helper_at_least_one_thing_in_top_list(self, things, expected_type):
# Assert
self.assertGreater(len(things), 1)
self.assertIsInstance(things, list)
self.assertIsInstance(things[0], pylast.TopItem)
self.assertIsInstance(things[0].item, expected_type)
def helper_only_one_thing_in_top_list(self, things, expected_type):
# Assert
self.assertEqual(len(things), 1)
self.assertIsInstance(things, list)
self.assertIsInstance(things[0], pylast.TopItem)
self.assertIsInstance(things[0].item, expected_type)
def helper_only_one_thing_in_list(self, things, expected_type):
# Assert
self.assertEqual(len(things), 1)
self.assertIsInstance(things, list)
self.assertIsInstance(things[0], expected_type)
def helper_two_different_things_in_top_list(self, things, expected_type):
# Assert
self.assertEqual(len(things), 2)
thing1 = things[0]
thing2 = things[1]
self.assertIsInstance(thing1, pylast.TopItem)
self.assertIsInstance(thing2, pylast.TopItem)
self.assertIsInstance(thing1.item, expected_type)
self.assertIsInstance(thing2.item, expected_type)
self.assertNotEqual(thing1, thing2)
def helper_two_things_in_list(self, things, expected_type):
# Assert
self.assertEqual(len(things), 2)
self.assertIsInstance(things, list)
thing1 = things[0]
thing2 = things[1]
self.assertIsInstance(thing1, expected_type)
self.assertIsInstance(thing2, expected_type)
@handle_lastfm_exceptions
def test_user_get_top_tags_with_limit(self):
# Arrange
user = self.network.get_user("RJ")
# Act
tags = user.get_top_tags(limit=1)
# Assert
self.skip_if_lastfm_api_broken(tags)
self.helper_only_one_thing_in_top_list(tags, pylast.Tag)
@handle_lastfm_exceptions
def test_network_get_top_artists_with_limit(self):
# Arrange
# Act
artists = self.network.get_top_artists(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(artists, pylast.Artist)
@handle_lastfm_exceptions
def test_network_get_top_tags_with_limit(self):
# Arrange
# Act
tags = self.network.get_top_tags(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(tags, pylast.Tag)
@handle_lastfm_exceptions
def test_network_get_top_tags_with_no_limit(self):
# Arrange
# Act
tags = self.network.get_top_tags()
# Assert
self.helper_at_least_one_thing_in_top_list(tags, pylast.Tag)
@handle_lastfm_exceptions
def test_network_get_top_tracks_with_limit(self):
# Arrange
# Act
tracks = self.network.get_top_tracks(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(tracks, pylast.Track)
@handle_lastfm_exceptions
def test_artist_top_tracks(self):
# Arrange
# Pick an artist with plenty of plays
artist = self.network.get_top_artists(limit=1)[0].item
# Act
things = artist.get_top_tracks(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
@handle_lastfm_exceptions
def test_artist_top_albums(self):
# Arrange
# Pick an artist with plenty of plays
artist = self.network.get_top_artists(limit=1)[0].item
# Act
things = artist.get_top_albums(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Album)
@handle_lastfm_exceptions
def test_artist_top_fans(self):
# Arrange
# Pick an artist with plenty of plays
artist = self.network.get_top_artists(limit=1)[0].item
# Act
things = artist.get_top_fans(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.User)
@handle_lastfm_exceptions
def test_country_top_tracks(self):
# Arrange
country = self.network.get_country("Croatia")
# Act
things = country.get_top_tracks(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
@handle_lastfm_exceptions
def test_country_network_top_tracks(self):
# Arrange
# Act
things = self.network.get_geo_top_tracks("Croatia", limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
@handle_lastfm_exceptions
def test_tag_top_tracks(self):
# Arrange
tag = self.network.get_tag("blues")
# Act
things = tag.get_top_tracks(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
@handle_lastfm_exceptions
def test_user_top_tracks(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
# Act
things = lastfm_user.get_top_tracks(limit=2)
# Assert
self.helper_two_different_things_in_top_list(things, pylast.Track)
def helper_assert_chart(self, chart, expected_type):
# Assert
self.assertIsNotNone(chart)
self.assertGreater(len(chart), 0)
self.assertIsInstance(chart[0], pylast.TopItem)
self.assertIsInstance(chart[0].item, expected_type)
def helper_get_assert_charts(self, thing, date):
# Arrange
(from_date, to_date) = date
# Act
artist_chart = thing.get_weekly_artist_charts(from_date, to_date)
if type(thing) is not pylast.Tag:
album_chart = thing.get_weekly_album_charts(from_date, to_date)
track_chart = thing.get_weekly_track_charts(from_date, to_date)
# Assert
self.helper_assert_chart(artist_chart, pylast.Artist)
if type(thing) is not pylast.Tag:
self.helper_assert_chart(album_chart, pylast.Album)
self.helper_assert_chart(track_chart, pylast.Track)
@handle_lastfm_exceptions
def test_group_charts(self):
# Arrange
group = self.network.get_group("mnml")
dates = group.get_weekly_chart_dates()
self.helper_dates_valid(dates)
# Act/Assert
self.helper_get_assert_charts(group, dates[-2])
@handle_lastfm_exceptions
def test_tag_charts(self):
# Arrange
tag = self.network.get_tag("rock")
dates = tag.get_weekly_chart_dates()
self.helper_dates_valid(dates)
# Act/Assert
self.helper_get_assert_charts(tag, dates[-2])
@handle_lastfm_exceptions
def test_user_charts(self):
# Arrange
lastfm_user = self.network.get_user("RJ")
dates = lastfm_user.get_weekly_chart_dates()
self.helper_dates_valid(dates)
# Act/Assert
self.helper_get_assert_charts(lastfm_user, dates[0])
@handle_lastfm_exceptions
def test_track_top_fans(self):
# Arrange
track = self.network.get_track("The Cinematic Orchestra", "Postlude")
# Act
fans = track.get_top_fans()
# Assert
self.helper_at_least_one_thing_in_top_list(fans, pylast.User)
# Commented out to avoid spamming
# def test_share_spam(self):
# # Arrange
# users_to_spam = [TODO_ENTER_SPAMEES_HERE]
# spam_message = "Dig the krazee sound!"
# artist = self.network.get_top_artists(limit=1)[0].item
# track = artist.get_top_tracks(limit=1)[0].item
# event = artist.get_upcoming_events()[0]
# # Act
# artist.share(users_to_spam, spam_message)
# track.share(users_to_spam, spam_message)
# event.share(users_to_spam, spam_message)
# Assert
# Check inbox for spam!
# album/artist/event/track/user
@handle_lastfm_exceptions
def test_album_shouts(self):
# Arrange
# Pick an artist with plenty of plays
artist = self.network.get_top_artists(limit=1)[0].item
album = artist.get_top_albums(limit=1)[0].item
# Act
shouts = album.get_shouts(limit=2)
# Assert
self.helper_two_things_in_list(shouts, pylast.Shout)
@handle_lastfm_exceptions
def test_artist_shouts(self):
# Arrange
# Pick an artist with plenty of plays
artist = self.network.get_top_artists(limit=1)[0].item
# Act
shouts = artist.get_shouts(limit=2)
# Assert
self.helper_two_things_in_list(shouts, pylast.Shout)
@handle_lastfm_exceptions
def test_event_shouts(self):
# Arrange
event_id = 3478520 # Glasto 2014
event = pylast.Event(event_id, self.network)
# Act
shouts = event.get_shouts(limit=2)
# Assert
self.helper_two_things_in_list(shouts, pylast.Shout)
@handle_lastfm_exceptions
def test_track_shouts(self):
# Arrange
track = self.network.get_track("The Cinematic Orchestra", "Postlude")
# Act
shouts = track.get_shouts(limit=2)
# Assert
self.helper_two_things_in_list(shouts, pylast.Shout)
@handle_lastfm_exceptions
def test_user_shouts(self):
# Arrange
user = self.network.get_user("RJ")
# Act
shouts = user.get_shouts(limit=2)
# Assert
self.helper_two_things_in_list(shouts, pylast.Shout)
@handle_lastfm_exceptions
def test_album_data(self):
# Arrange
thing = self.network.get_album("Test Artist", "Test Album")
# Act
stringed = str(thing)
repr = thing.__repr__()
title = thing.get_title()
name = thing.get_name()
playcount = thing.get_playcount()
url = thing.get_url()
# Assert
self.assertEqual(stringed, "Test Artist - Test Album")
self.assertIn("pylast.Album('Test Artist', 'Test Album',", repr)
self.assertEqual(title, name)
self.assertIsInstance(playcount, int)
self.assertGreater(playcount, 1)
self.assertEqual(
"http://www.last.fm/music/test%2bartist/test%2balbum", url)
@handle_lastfm_exceptions
def test_track_data(self):
# Arrange
thing = self.network.get_track("Test Artist", "Test Title")
# Act
stringed = str(thing)
repr = thing.__repr__()
title = thing.get_title()
name = thing.get_name()
playcount = thing.get_playcount()
url = thing.get_url(pylast.DOMAIN_FRENCH)
# Assert
self.assertEqual(stringed, "Test Artist - Test Title")
self.assertIn("pylast.Track('Test Artist', 'Test Title',", repr)
self.assertEqual(title, "Test Title")
self.assertEqual(title, name)
self.assertIsInstance(playcount, int)
self.assertGreater(playcount, 1)
self.assertEqual(
"http://www.lastfm.fr/music/test%2bartist/_/test%2btitle", url)
@handle_lastfm_exceptions
def test_tag_top_artists(self):
# Arrange
tag = self.network.get_tag("blues")
# Act
artists = tag.get_top_artists(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(artists, pylast.Artist)
@handle_lastfm_exceptions
def test_country_top_artists(self):
# Arrange
country = self.network.get_country("Ukraine")
# Act
artists = country.get_top_artists(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(artists, pylast.Artist)
@handle_lastfm_exceptions
def test_user_top_artists(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
# Act
artists = lastfm_user.get_top_artists(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(artists, pylast.Artist)
@handle_lastfm_exceptions
def test_tag_top_albums(self):
# Arrange
tag = self.network.get_tag("blues")
# Act
albums = tag.get_top_albums(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(albums, pylast.Album)
@handle_lastfm_exceptions
def test_user_top_albums(self):
# Arrange
user = self.network.get_user("RJ")
# Act
albums = user.get_top_albums(limit=1)
# Assert
self.helper_only_one_thing_in_top_list(albums, pylast.Album)
@handle_lastfm_exceptions
def test_user_tagged_artists(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
tags = ["artisttagola"]
artist = self.network.get_artist("Test Artist")
artist.add_tags(tags)
# Act
artists = lastfm_user.get_tagged_artists('artisttagola', limit=1)
# Assert
self.helper_only_one_thing_in_list(artists, pylast.Artist)
@handle_lastfm_exceptions
def test_user_tagged_albums(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
tags = ["albumtagola"]
album = self.network.get_album("Test Artist", "Test Album")
album.add_tags(tags)
# Act
albums = lastfm_user.get_tagged_albums('albumtagola', limit=1)
# Assert
self.helper_only_one_thing_in_list(albums, pylast.Album)
@handle_lastfm_exceptions
def test_user_tagged_tracks(self):
# Arrange
lastfm_user = self.network.get_user(self.username)
tags = ["tracktagola"]
track = self.network.get_track("Test Artist", "Test Title")
track.add_tags(tags)
# Act
tracks = lastfm_user.get_tagged_tracks('tracktagola', limit=1)
# Assert
self.helper_only_one_thing_in_list(tracks, pylast.Track)
@handle_lastfm_exceptions
def test_caching(self):
# Arrange
user = self.network.get_user("RJ")
# Act
self.network.enable_caching()
shouts1 = user.get_shouts(limit=1, cacheable=True)
shouts2 = user.get_shouts(limit=1, cacheable=True)
# Assert
self.assertTrue(self.network.is_caching_enabled())
self.assertEqual(shouts1, shouts2)
self.network.disable_caching()
self.assertFalse(self.network.is_caching_enabled())
@handle_lastfm_exceptions
def test_create_playlist(self):
# Arrange
title = "Test playlist"
description = "Testing"
lastfm_user = self.network.get_user(self.username)
# Act
playlist = self.network.create_new_playlist(title, description)
# Assert
self.assertIsInstance(playlist, pylast.Playlist)
self.assertEqual(playlist.get_title(), "Test playlist")
self.assertEqual(playlist.get_description(), "Testing")
self.assertEqual(playlist.get_user(), lastfm_user)
@handle_lastfm_exceptions
def test_empty_playlist_unstreamable(self):
# Arrange
title = "Empty playlist"
description = "Unstreamable"
# Act
playlist = self.network.create_new_playlist(title, description)
# Assert
self.assertEqual(playlist.get_size(), 0)
self.assertEqual(playlist.get_duration(), 0)
self.assertFalse(playlist.is_streamable())
@handle_lastfm_exceptions
def test_big_playlist_is_streamable(self):
# Arrange
# Find a big playlist on Last.fm, eg "top 100 classic rock songs"
user = "kaxior"
id = 10417943
playlist = pylast.Playlist(user, id, self.network)
self.assertEqual(
playlist.get_url(),
"http://www.last.fm/user/kaxior/library/"
"playlists/67ajb_top_100_classick_rock_songs")
# Act
# Nothing
# Assert
self.assertIsInstance(playlist, pylast.Playlist)
self.assertGreaterEqual(playlist.get_size(), 45)
self.assertGreater(playlist.get_duration(), 0)
self.assertTrue(playlist.is_streamable())
@handle_lastfm_exceptions
def test_add_track_to_playlist(self):
# Arrange
title = "One track playlist"
description = "Testing"
playlist = self.network.create_new_playlist(title, description)
track = pylast.Track("Test Artist", "Test Title", self.network)
# Act
playlist.add_track(track)
# Assert
self.assertEqual(playlist.get_size(), 1)
self.assertEqual(len(playlist.get_tracks()), 1)
self.assertTrue(playlist.has_track(track))
@handle_lastfm_exceptions
def test_album_mbid(self):
# Arrange
mbid = "a6a265bf-9f81-4055-8224-f7ac0aa6b937"
# Act
album = self.network.get_album_by_mbid(mbid)
album_mbid = album.get_mbid()
# Assert
self.assertIsInstance(album, pylast.Album)
self.assertEqual(album.title.lower(), "test")
self.assertEqual(album_mbid, mbid)
@handle_lastfm_exceptions
def test_artist_mbid(self):
# Arrange
mbid = "7e84f845-ac16-41fe-9ff8-df12eb32af55"
# Act
artist = self.network.get_artist_by_mbid(mbid)
# Assert
self.assertIsInstance(artist, pylast.Artist)
self.assertEqual(artist.name, "MusicBrainz Test Artist")
@handle_lastfm_exceptions
def test_track_mbid(self):
# Arrange
mbid = "ebc037b1-cc9c-44f2-a21f-83c219f0e1e0"
# Act
track = self.network.get_track_by_mbid(mbid)
track_mbid = track.get_mbid()
# Assert
self.assertIsInstance(track, pylast.Track)
self.assertEqual(track.title, "first")
self.assertEqual(track_mbid, mbid)
@handle_lastfm_exceptions
def test_artist_listener_count(self):
# Arrange
artist = self.network.get_artist("Test Artist")
# Act
count = artist.get_listener_count()
# Assert
self.assertIsInstance(count, int)
self.assertGreater(count, 0)
@handle_lastfm_exceptions
def test_event_attendees(self):
# Arrange
user = self.network.get_user("RJ")
event = user.get_past_events(limit=1)[0]
# Act
users = event.get_attendees()
# Assert
self.assertIsInstance(users, list)
self.assertIsInstance(users[0], pylast.User)
@handle_lastfm_exceptions
def test_tag_artist(self):
# Arrange
artist = self.network.get_artist("Test Artist")
# artist.clear_tags()
# Act
artist.add_tag("testing")
# Assert
tags = artist.get_tags()
self.assertGreater(len(tags), 0)
found = False
for tag in tags:
if tag.name == "testing":
found = True
break
self.assertTrue(found)
@handle_lastfm_exceptions
def test_remove_tag_of_type_text(self):
# Arrange
tag = "testing" # text
artist = self.network.get_artist("Test Artist")
artist.add_tag(tag)
# Act
artist.remove_tag(tag)
# Assert
tags = artist.get_tags()
found = False
for tag in tags:
if tag.name == "testing":
found = True
break
self.assertFalse(found)
@handle_lastfm_exceptions
def test_remove_tag_of_type_tag(self):
# Arrange
tag = pylast.Tag("testing", self.network) # Tag
artist = self.network.get_artist("Test Artist")
artist.add_tag(tag)
# Act
artist.remove_tag(tag)
# Assert
tags = artist.get_tags()
found = False
for tag in tags:
if tag.name == "testing":
found = True
break
self.assertFalse(found)
@handle_lastfm_exceptions
def test_remove_tags(self):
# Arrange
tags = ["removetag1", "removetag2"]
artist = self.network.get_artist("Test Artist")
artist.add_tags(tags)
artist.add_tags("1more")
tags_before = artist.get_tags()
# Act
artist.remove_tags(tags)
# Assert
tags_after = artist.get_tags()
self.assertEqual(len(tags_after), len(tags_before) - 2)
found1, found2 = False, False
for tag in tags_after:
if tag.name == "removetag1":
found1 = True
elif tag.name == "removetag2":
found2 = True
self.assertFalse(found1)
self.assertFalse(found2)
@handle_lastfm_exceptions
def test_set_tags(self):
# Arrange
tags = ["sometag1", "sometag2"]
artist = self.network.get_artist("Test Artist")
artist.add_tags(tags)
tags_before = artist.get_tags()
new_tags = ["settag1", "settag2"]
# Act
artist.set_tags(new_tags)
# Assert
tags_after = artist.get_tags()
self.assertNotEqual(tags_before, tags_after)
self.assertEqual(len(tags_after), 2)
found1, found2 = False, False
for tag in tags_after:
if tag.name == "settag1":
found1 = True
elif tag.name == "settag2":
found2 = True
self.assertTrue(found1)
self.assertTrue(found2)
@handle_lastfm_exceptions
def test_tracks_notequal(self):
# Arrange
track1 = pylast.Track("Test Artist", "Test Title", self.network)
track2 = pylast.Track("Test Artist", "Test Track", self.network)
# Act
# Assert
self.assertNotEqual(track1, track2)
@handle_lastfm_exceptions
def test_track_id(self):
# Arrange
track = pylast.Track("Test Artist", "Test Title", self.network)
# Act
id = track.get_id()
# Assert
self.skip_if_lastfm_api_broken(id)
self.assertEqual(id, "14053327")
@handle_lastfm_exceptions
def test_track_title_prop_caps(self):
# Arrange
track = pylast.Track("test artist", "test title", self.network)
# Act
title = track.get_title(properly_capitalized=True)
# Assert
self.assertEqual(title, "Test Title")
@handle_lastfm_exceptions
def test_track_listener_count(self):
# Arrange
track = pylast.Track("test artist", "test title", self.network)
# Act
count = track.get_listener_count()
# Assert
self.assertGreater(count, 21)
@handle_lastfm_exceptions
def test_album_rel_date(self):
# Arrange
album = pylast.Album("Test Artist", "Test Release", self.network)
# Act
date = album.get_release_date()
# Assert
self.skip_if_lastfm_api_broken(date)
self.assertIn("2011", date)
@handle_lastfm_exceptions
def test_album_tracks(self):
# Arrange
album = pylast.Album("Test Artist", "Test Release", self.network)
# Act
tracks = album.get_tracks()
# Assert
self.assertIsInstance(tracks, list)
self.assertIsInstance(tracks[0], pylast.Track)
self.assertEqual(len(tracks), 4)
@handle_lastfm_exceptions
def test_tags(self):
# Arrange
tag1 = self.network.get_tag("blues")
tag2 = self.network.get_tag("rock")
# Act
tag_repr = repr(tag1)
tag_str = str(tag1)
name = tag1.get_name(properly_capitalized=True)
url = tag1.get_url()
# Assert
self.assertEqual("blues", tag_str)
self.assertIn("pylast.Tag", tag_repr)
self.assertIn("blues", tag_repr)
self.assertEqual("blues", name)
self.assertTrue(tag1 == tag1)
self.assertTrue(tag1 != tag2)
self.assertEqual(url, "http://www.last.fm/tag/blues")
@handle_lastfm_exceptions
def test_tags_similar(self):
# Arrange
tag = self.network.get_tag("blues")
# Act
similar = tag.get_similar()
# Assert
self.skip_if_lastfm_api_broken(similar)
found = False
for tag in similar:
if tag.name == "delta blues":
found = True
break
self.assertTrue(found)
@handle_lastfm_exceptions
def test_artists(self):
# Arrange
artist1 = self.network.get_artist("Radiohead")
artist2 = self.network.get_artist("Portishead")
# Act
url = artist1.get_url()
mbid = artist1.get_mbid()
image = artist1.get_cover_image()
playcount = artist1.get_playcount()
streamable = artist1.is_streamable()
name = artist1.get_name(properly_capitalized=False)
name_cap = artist1.get_name(properly_capitalized=True)
# Assert
self.assertIn("http", image)
self.assertGreater(playcount, 1)
self.assertTrue(artist1 != artist2)
self.assertEqual(name.lower(), name_cap.lower())
self.assertEqual(url, "http://www.last.fm/music/radiohead")
self.assertEqual(mbid, "a74b1b7f-71a5-4011-9441-d0b5e4122711")
self.assertIsInstance(streamable, bool)
@handle_lastfm_exceptions
def test_events(self):
# Arrange
event_id_1 = 3162700 # Glasto 2013
event_id_2 = 3478520 # Glasto 2014
event1 = pylast.Event(event_id_1, self.network)
event2 = pylast.Event(event_id_2, self.network)
# Act
text = str(event1)
rep = repr(event1)
title = event1.get_title()
artists = event1.get_artists()
start = event1.get_start_date()
description = event1.get_description()
review_count = event1.get_review_count()
attendance_count = event1.get_attendance_count()
# Assert
self.assertIn("3162700", rep)
self.assertIn("pylast.Event", rep)
self.assertEqual(text, "Event #3162700")
self.assertTrue(event1 != event2)
self.assertIn("Glastonbury", title)
found = False
for artist in artists:
if artist.name == "The Rolling Stones":
found = True
break
self.assertTrue(found)
self.assertIn("Wed, 26 Jun 2013", start)
self.assertIn("astonishing bundle", description)
self.assertGreater(review_count, 0)
self.assertGreater(attendance_count, 100)
@handle_lastfm_exceptions
def test_countries(self):
# Arrange
country1 = pylast.Country("Italy", self.network)
country2 = pylast.Country("Finland", self.network)
# Act
text = str(country1)
rep = repr(country1)
url = country1.get_url()
# Assert
self.assertIn("Italy", rep)
self.assertIn("pylast.Country", rep)
self.assertEqual(text, "Italy")
self.assertTrue(country1 == country1)
self.assertTrue(country1 != country2)
self.assertEqual(url, "http://www.last.fm/place/italy")
@handle_lastfm_exceptions
def test_track_eq_none_is_false(self):
# Arrange
track1 = None
track2 = pylast.Track("Test Artist", "Test Title", self.network)
# Act / Assert
self.assertFalse(track1 == track2)
@handle_lastfm_exceptions
def test_track_ne_none_is_true(self):
# Arrange
track1 = None
track2 = pylast.Track("Test Artist", "Test Title", self.network)
# Act / Assert
self.assertTrue(track1 != track2)
@handle_lastfm_exceptions
def test_artist_eq_none_is_false(self):
# Arrange
artist1 = None
artist2 = pylast.Artist("Test Artist", self.network)
# Act / Assert
self.assertFalse(artist1 == artist2)
@handle_lastfm_exceptions
def test_artist_ne_none_is_true(self):
# Arrange
artist1 = None
artist2 = pylast.Artist("Test Artist", self.network)
# Act / Assert
self.assertTrue(artist1 != artist2)
@handle_lastfm_exceptions
def test_album_eq_none_is_false(self):
# Arrange
album1 = None
album2 = pylast.Album("Test Artist", "Test Album", self.network)
# Act / Assert
self.assertFalse(album1 == album2)
@handle_lastfm_exceptions
def test_album_ne_none_is_true(self):
# Arrange
album1 = None
album2 = pylast.Album("Test Artist", "Test Album", self.network)
# Act / Assert
self.assertTrue(album1 != album2)
@handle_lastfm_exceptions
def test_event_eq_none_is_false(self):
# Arrange
event1 = None
event_id = 3478520 # Glasto 2014
event2 = pylast.Event(event_id, self.network)
# Act / Assert
self.assertFalse(event1 == event2)
@handle_lastfm_exceptions
def test_event_ne_none_is_true(self):
# Arrange
event1 = None
event_id = 3478520 # Glasto 2014
event2 = pylast.Event(event_id, self.network)
# Act / Assert
self.assertTrue(event1 != event2)
@handle_lastfm_exceptions
def test_band_members(self):
# Arrange
artist = pylast.Artist("The Beatles", self.network)
# Act
band_members = artist.get_band_members()
# Assert
self.skip_if_lastfm_api_broken(band_members)
self.assertGreaterEqual(len(band_members), 4)
# @handle_lastfm_exceptions
# def test_no_band_members(self):
# # Arrange
# artist = pylast.Artist("John Lennon", self.network)
#
# # Act
# band_members = artist.get_band_members()
#
# # Assert
# self.assertIsNone(band_members)
@handle_lastfm_exceptions
def test_get_recent_tracks_from_to(self):
# Arrange
lastfm_user = self.network.get_user("RJ")
from datetime import datetime
start = datetime(2011, 7, 21, 15, 10)
end = datetime(2011, 7, 21, 15, 15)
import calendar
utc_start = calendar.timegm(start.utctimetuple())
utc_end = calendar.timegm(end.utctimetuple())
# Act
tracks = lastfm_user.get_recent_tracks(time_from=utc_start,
time_to=utc_end)
# Assert
self.assertEqual(len(tracks), 1)
self.assertEqual(str(tracks[0].track.artist), "Johnny Cash")
self.assertEqual(str(tracks[0].track.title), "Ring of Fire")
@handle_lastfm_exceptions
def test_artist_get_correction(self):
# Arrange
artist = pylast.Artist("guns and roses", self.network)
# Act
corrected_artist_name = artist.get_correction()
# Assert
self.assertEqual(corrected_artist_name, "Guns N' Roses")
@handle_lastfm_exceptions
def test_track_get_correction(self):
# Arrange
track = pylast.Track("Guns N' Roses", "mrbrownstone", self.network)
# Act
corrected_track_name = track.get_correction()
# Assert
self.assertEqual(corrected_track_name, "Mr. Brownstone")
@handle_lastfm_exceptions
def test_track_with_no_mbid(self):
# Arrange
track = pylast.Track("Static-X", "Set It Off", self.network)
# Act
mbid = track.get_mbid()
# Assert
self.assertEqual(mbid, None)
def test_init_with_token(self):
# Arrange/Act
try:
pylast.LastFMNetwork(
api_key=self.__class__.secrets["api_key"],
api_secret=self.__class__.secrets["api_secret"],
token="invalid",
)
except pylast.WSError as exc:
msg = str(exc)
# Assert
self.assertEqual(msg,
"Unauthorized Token - This token has not been issued")
@flaky(max_runs=5, min_passes=1)
class TestPyLastWithLibreFm(unittest.TestCase):
"""Own class for Libre.fm because we don't need the Last.fm setUp"""
def test_libre_fm(self):
# Arrange
secrets = load_secrets()
username = secrets["username"]
password_hash = secrets["password_hash"]
# Act
network = pylast.LibreFMNetwork(
password_hash=password_hash, username=username)
artist = network.get_artist("Radiohead")
name = artist.get_name()
# Assert
self.assertEqual(name, "Radiohead")
if __name__ == '__main__':
unittest.main(failfast=True)
|
|
"""
Component to interface with an alarm control panel.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel/
"""
import asyncio
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.const import (
ATTR_CODE, ATTR_CODE_FORMAT, ATTR_ENTITY_ID, SERVICE_ALARM_TRIGGER,
SERVICE_ALARM_DISARM, SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_NIGHT)
from homeassistant.config import load_yaml_config_file
from homeassistant.loader import bind_hass
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
DOMAIN = 'alarm_control_panel'
SCAN_INTERVAL = timedelta(seconds=30)
ATTR_CHANGED_BY = 'changed_by'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SERVICE_TO_METHOD = {
SERVICE_ALARM_DISARM: 'alarm_disarm',
SERVICE_ALARM_ARM_HOME: 'alarm_arm_home',
SERVICE_ALARM_ARM_AWAY: 'alarm_arm_away',
SERVICE_ALARM_ARM_NIGHT: 'alarm_arm_night',
SERVICE_ALARM_TRIGGER: 'alarm_trigger'
}
ATTR_TO_PROPERTY = [
ATTR_CODE,
ATTR_CODE_FORMAT
]
ALARM_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_CODE): cv.string,
})
@bind_hass
def alarm_disarm(hass, code=None, entity_id=None):
"""Send the alarm the command for disarm."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_DISARM, data)
@bind_hass
def alarm_arm_home(hass, code=None, entity_id=None):
"""Send the alarm the command for arm home."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_ARM_HOME, data)
@bind_hass
def alarm_arm_away(hass, code=None, entity_id=None):
"""Send the alarm the command for arm away."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_ARM_AWAY, data)
@bind_hass
def alarm_arm_night(hass, code=None, entity_id=None):
"""Send the alarm the command for arm night."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_ARM_NIGHT, data)
@bind_hass
def alarm_trigger(hass, code=None, entity_id=None):
"""Send the alarm the command for trigger."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_TRIGGER, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
yield from component.async_setup(config)
@asyncio.coroutine
def async_alarm_service_handler(service):
"""Map services to methods on Alarm."""
target_alarms = component.async_extract_from_service(service)
code = service.data.get(ATTR_CODE)
method = "async_{}".format(SERVICE_TO_METHOD[service.service])
update_tasks = []
for alarm in target_alarms:
yield from getattr(alarm, method)(code)
if not alarm.should_poll:
continue
update_tasks.append(alarm.async_update_ha_state(True))
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
descriptions = yield from hass.async_add_job(
load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
for service in SERVICE_TO_METHOD:
hass.services.async_register(
DOMAIN, service, async_alarm_service_handler,
descriptions.get(service), schema=ALARM_SERVICE_SCHEMA)
return True
# pylint: disable=no-self-use
class AlarmControlPanel(Entity):
"""An abstract class for alarm control devices."""
@property
def code_format(self):
"""Regex for code format or None if no code is required."""
return None
@property
def changed_by(self):
"""Last change triggered by."""
return None
def alarm_disarm(self, code=None):
"""Send disarm command."""
raise NotImplementedError()
def async_alarm_disarm(self, code=None):
"""Send disarm command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.alarm_disarm, code)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
raise NotImplementedError()
def async_alarm_arm_home(self, code=None):
"""Send arm home command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.alarm_arm_home, code)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
raise NotImplementedError()
def async_alarm_arm_away(self, code=None):
"""Send arm away command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.alarm_arm_away, code)
def alarm_arm_night(self, code=None):
"""Send arm night command."""
raise NotImplementedError()
def async_alarm_arm_night(self, code=None):
"""Send arm night command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.alarm_arm_night, code)
def alarm_trigger(self, code=None):
"""Send alarm trigger command."""
raise NotImplementedError()
def async_alarm_trigger(self, code=None):
"""Send alarm trigger command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.alarm_trigger, code)
@property
def state_attributes(self):
"""Return the state attributes."""
state_attr = {
ATTR_CODE_FORMAT: self.code_format,
ATTR_CHANGED_BY: self.changed_by
}
return state_attr
|
|
import logging
import re
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
# Handle "<actions/>"
def handle_actions(top):
# Nothing to do if it's empty.
# Otherwise...
if list(top) and len(list(top)) > 0:
raise NotImplementedError("Don't know how to handle a "
"non-empty <actions> element.")
# Handle "<description>my cool job</description>"
def handle_description(top):
return [['description', top.text]]
# Handle "<keepDependencies>false</keepDependencies>"
def handle_keepdependencies(top):
# JJB cannot handle any other value than false, here.
# There is no corresponding YAML option.
return None
# Handle "<properties>..."
def handle_properties(top):
properties = []
parameters = []
for child in top:
# GitHub
if child.tag == 'com.coravy.hudson.plugins.github.GithubProjectProperty': # NOQA
github = handle_github_project_property(child)
properties.append(github)
# Parameters
elif child.tag == 'hudson.model.ParametersDefinitionProperty':
parametersdefs = handle_parameters_property(child)
for pd in parametersdefs:
parameters.append(pd)
# Parameters
elif child.tag == 'com.sonyericsson.rebuild.RebuildSettings':
# latest version of JJB (1.3.0 at the moment) doesn't support this.
continue
#Throttling
elif child.tag == 'hudson.plugins.throttleconcurrents.ThrottleJobProperty':
throttleproperty = handle_throttle_property(child)
properties.append(throttleproperty)
# Slack
elif child.tag == 'jenkins.plugins.slack.SlackNotifier_-SlackJobProperty':
slackproperty = handle_slack_property(child)
properties.append(slackproperty)
elif child.tag == 'jenkins.model.BuildDiscarderProperty':
discarderproperty = handle_build_discarder_property(child)
properties.append(discarderproperty)
# A property we don't know about
else:
print "cannot handle XML %s" % child.tag
return [['properties', properties], ['parameters', parameters]]
# Handle "<jenkins.model.BuildDiscarderProperty>"
def handle_build_discarder_property(top):
discarder = {}
mapping = {
'daysToKeep': 'days-to-keep',
'numToKeep': 'num-to-keep',
'artifactDaysToKeep': 'artifact-days-to-keep',
'artifactNumToKeep': 'artifact-num-to-keep',
}
for child in top[0]:
discarder[mapping[child.tag]] = int(child.text)
return {'build-discarder': discarder}
# Handle "<com.coravy.hudson.plugins.github.GithubProjectProperty>..."
def handle_github_project_property(top):
github = {}
for child in top:
if child.tag == 'projectUrl':
github['url'] = child.text
elif child.tag == 'displayName':
pass
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
return {'github': github}
# Handle "<hudson.model.ParametersDefinitionProperty>..."
def handle_parameters_property(top):
parameters = []
for parameterdefs in top:
if parameterdefs.tag != 'parameterDefinitions':
raise NotImplementedError("cannot handle "
"XML %s" % parameterdefs.tag)
for parameterdef in parameterdefs:
if parameterdef.tag == 'hudson.model.StringParameterDefinition':
parameter_type = 'string'
elif parameterdef.tag == 'hudson.model.BooleanParameterDefinition':
parameter_type = 'bool'
elif parameterdef.tag == 'hudson.model.ChoiceParameterDefinition':
parameter_type = 'choice'
else:
raise NotImplementedError(parameterdef.tag)
parameter_settings = {}
for defsetting in parameterdef:
key = {
'defaultValue': 'default',
}.get(defsetting.tag, defsetting.tag)
# If the XML had a blank string, don't pass None to PyYAML,
# because PyYAML will translate this as "null". Just use a
# blank string to be safe.
if defsetting.text is None:
value = ''
# If the XML has a value of "true" or "false", we shouldn't
# treat the value as a string. Use native Python booleans
# so PyYAML will not quote the values as strings.
elif defsetting.text == 'true':
value = True
elif defsetting.text == 'false':
value = False
# Get all the choices
elif parameter_type == 'choice':
choices = []
for child in defsetting:
if(child.attrib['class'] == 'string-array'):
for element in child:
choices.append(element.text)
else:
raise NotImplementedError(child.attrib['class'])
value = choices
# Assume that PyYAML will handle everything else correctly
else:
value = defsetting.text
parameter_settings[key] = value
parameters.append({parameter_type: parameter_settings})
return parameters
def get_bool(txt):
trues = ['true', 'True', 'Yes', 'yes', '1']
return txt in trues
def handle_throttle_property(top):
throttle_ret = {}
for child in top:
if child.tag == 'maxConcurrentPerNode':
throttle_ret['max-per-node'] = child.text
elif child.tag == 'maxConcurrentTotal':
throttle_ret['max-total'] = child.text
elif child.tag == 'throttleOption':
throttle_ret['option'] = child.text
elif child.tag == 'throttleEnabled':
throttle_ret['enabled'] = get_bool(child.text)
elif child.tag == 'categories':
throttle_ret['categories'] = []
elif child.tag == 'configVersion':
pass # assigned by jjb
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
return {'throttle':throttle_ret}
def handle_slack_property(top):
slack_ret = {}
notifications = {
"notifySuccess":"notify-success",
"notifyAborted":"notify-aborted",
"notifyNotBuilt":"notify-not-built",
"notifyUnstable":"notify-unstable",
"notifyFailure":"notify-failure",
"notifyBackToNormal":"notify-back-to-normal",
"notifyRepeatedFailure":"notify-repeated-failure"
}
for child in top:
if child.tag == 'teamDomain':
slack_ret['team-domain'] = child.text
elif child.tag == 'token':
slack_ret['token'] = child.text
elif child.tag == 'room':
slack_ret['room'] = child.text
elif child.tag == 'includeTestSummary':
slack_ret['include-test-summary'] = (child.text == 'true')
elif child.tag == 'showCommitList':
slack_ret['show-commit-list'] = (child.text == 'true')
elif child.tag == 'includeCustomMessage':
slack_ret['include-custom-message'] = (child.text == 'true')
elif child.tag == 'customMessage':
slack_ret['custom-message'] = child.text
elif child.tag == 'startNotification':
slack_ret['start-notification'] = (child.text == 'true')
elif child.tag in notifications:
slack_ret[notifications[child.tag]] = (child.text == 'true')
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
return {'slack':slack_ret}
# Handle "<scm>..."
def handle_scm(top):
scm_ret = {}
if 'class' in top.attrib:
if top.attrib['class'] == 'hudson.scm.NullSCM':
return None
if top.attrib['class'] == 'org.jenkinsci.plugins.multiplescms.MultiSCM':
scms = []
for scm in top[0]:
scms.append(handle_scm(scm)[0])
return scms
if top.tag == 'hudson.plugins.git.GitSCM' or \
('class' in top.attrib and top.attrib['class'] == 'hudson.plugins.git.GitSCM'):
git = handle_scm_git(top)
scm_ret = {'git': git}
elif top.tag == 'hudson.plugins.mercurial.MercurialSCM' or \
('class' in top.attrib and top.attrib['class'] == 'hudson.plugins.mercurial.MercurialSCM'):
hg = handle_scm_hg(top)
scm_ret = {'hg': hg}
else:
raise NotImplementedError("%s scm not supported" % top.attrib['class'])
return [['scm', [scm_ret]]]
def handle_scm_hg(top):
hg = {}
for child in top:
if child.tag == 'source':
hg['url'] = child.text
elif child.tag == 'credentialsId':
hg['credentials-id'] = child.text
elif child.tag == 'revisionType':
hg['revision-type'] = child.text.lower()
elif child.tag == 'revision':
hg['revision'] = child.text
elif child.tag == 'modules':
pass
elif child.tag == 'clean':
hg['clean'] = (child.text == 'true')
elif child.tag == 'subdir':
hg['subdir'] = child.text
elif child.tag == 'disableChangeLog':
hg['disable-changelog'] = (child.text == 'true')
elif child.tag == 'browser' and 'class' in child.attrib:
browser_class = child.attrib['class']
if browser_class == 'hudson.plugins.mercurial.browser.BitBucket':
hg['browser'] = 'bitbucketweb'
elif browser_class == 'hudson.plugins.mercurial.browser.FishEye':
hg['browser'] = 'fisheye'
elif browser_class == 'hudson.plugins.mercurial.browser.GoogleCode':
hg['browser'] = 'googlecode'
elif browser_class == 'hudson.plugins.mercurial.browser.HgWeb':
hg['browser'] = 'hgweb'
elif browser_class == 'hudson.plugins.mercurial.browser.Kallithea':
# Not supported by JJB
raise NotImplementedError("%s is not yet supported by jenkins-job-builder." %
browser_class)
elif browser_class == 'hudson.plugins.mercurial.browser.KilnHG':
hg['browser'] = 'kilnhg'
elif browser_class == 'hudson.plugins.mercurial.browser.RhodeCode':
hg['browser'] = 'rhodecode'
elif browser_class == 'hudson.plugins.mercurial.browser.RhodeCodeLegacy':
hg['browser'] = 'rhodecode-pre-1.2'
if child.find('url') is not None:
hg['browser-url'] = child.find('url').text
return hg
def handle_scm_git(top):
git = {}
for child in top:
if child.tag == 'configVersion':
continue # we don't care
elif child.tag == 'userRemoteConfigs':
if len(list(child)) != 1:
# expected "hudson.plugins.git.UserRemoteConfig" tag
raise NotImplementedError("%s not supported with %i "
"children" % (child.tag,
len(list(child))))
for setting in child[0]:
git[setting.tag] = setting.text
elif child.tag == 'gitTool':
git['git-tool'] = child.text
elif child.tag == 'excludedUsers':
if child.text:
users = child.text.split()
git['excluded-users'] = users
elif child.tag == 'buildChooser':
if child.attrib['class'] == \
'hudson.plugins.git.util.DefaultBuildChooser':
continue
else:
# see JJB's jenkins_jobs/modules/scm.py
# for other build choosers
raise NotImplementedError("%s build "
"chooser" % child.attrib['class'])
elif child.tag == 'disableSubmodules':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
raise NotImplementedError("TODO: %s" % child.tag)
elif child.tag == 'recursiveSubmodules':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
raise NotImplementedError("TODO: %s" % child.tag)
elif child.tag == 'authorOrCommitter':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['use-author'] = True
elif child.tag == 'useShallowClone':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['shallow-clone'] = True
elif child.tag == 'ignoreNotifyCommit':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['ignore-notify'] = True
elif child.tag == 'wipeOutWorkspace':
git['wipe-workspace'] = (child.text == 'true')
elif child.tag == 'skipTag':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['skip-tag'] = True
elif child.tag == 'pruneBranches':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['prune'] = True
elif child.tag == 'remotePoll':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['fastpoll'] = True
elif child.tag == 'relativeTargetDir':
# If it's empty, no explicit 'basedir' YAML needed.
if child.text:
git['basedir'] = child.text
elif child.tag == 'reference':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'gitConfigName':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'gitConfigEmail':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'scmName':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'branches':
if child[0][0].tag != 'name':
raise NotImplementedError("%s XML not supported"
% child[0][0].tag)
branches = []
for item in child:
for branch in item:
branches.append(branch.text)
git['branches'] = branches
elif child.tag == 'doGenerateSubmoduleConfigurations':
if len(list(child)) != 0:
raise NotImplementedError("%s not supported with %i children"
% (child.tag, len(list(child))))
# JJB doesn't handle this element anyway. Just continue on.
continue
elif child.tag == 'submoduleCfg':
if len(list(child)) > 0:
raise NotImplementedError("%s not supported with %i children"
% (child.tag, len(list(child))))
elif child.tag == 'browser':
# XXX: blunt hammer: just use the "auto" browser for everything.
git['browser'] = 'auto'
elif child.tag == 'extensions':
for extension in child:
# hudson.plugins.git.extensions.impl.RelativeTargetDirectory
if extension.tag == 'hudson.plugins.git.extensions.impl.RelativeTargetDirectory':
if len(list(extension)) != 1:
# expected <relativeTargetDir>
raise NotImplementedError("%s not supported with %i children" % (extension.tag, len(list(extension))))
if extension[0].tag != 'relativeTargetDir':
raise NotImplementedError("%s XML not supported" % extension[0].tag)
git['basedir'] = extension[0].text
elif extension.tag == 'hudson.plugins.git.extensions.impl.CheckoutOption':
if len(list(extension)) != 1:
# expected <timeout>
raise NotImplementedError("%s not supported with %i children" % (extension.tag, len(list(extension))))
if extension[0].tag != 'timeout':
raise NotImplementedError("%s XML not supported" % child[0][0].tag)
git['timeout'] = extension[0].text
elif extension.tag == 'hudson.plugins.git.extensions.impl.WipeWorkspace':
if len(list(extension)) != 0:
raise NotImplementedError("%s not supported with %i children" % (extension.tag, len(list(extension))))
git['wipe-workspace'] = True
elif extension.tag == 'hudson.plugins.git.extensions.impl.LocalBranch':
git['local-branch'] = extension[0].text
elif extension.tag == 'hudson.plugins.git.extensions.impl.PerBuildTag':
pass
else:
raise NotImplementedError("%s not supported" % extension.tag)
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
return git
# Handle "<canRoam>true</canRoam>"
def handle_canroam(top):
# JJB doesn't have an explicit YAML setting for this; instead, it
# infers it from the "node" parameter. So there's no need to handle the
# XML here.
return None
# Handle "<disabled>false</disabled>"
def handle_disabled(top):
return [['disabled', top.text == 'true']]
# Handle "<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>" NOQA
def handle_blockbuildwhendownstreambuilding(top):
return [['block-downstream', top.text == 'true']]
# Handle "<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>" NOQA
def handle_blockbuildwhenupstreambuilding(top):
return [['block-upstream', top.text == 'true']]
def handle_triggers(top):
triggers = []
for child in top:
if child.tag == 'hudson.triggers.SCMTrigger':
pollscm = {}
for setting in child:
if setting.tag == 'spec':
pollscm['cron'] = setting.text
elif setting.tag == 'ignorePostCommitHooks':
pollscm['ignore-post-commit-hooks'] = \
(setting.text == 'true')
else:
raise NotImplementedError("cannot handle scm trigger "
"setting %s" % setting.tag)
triggers.append({'pollscm': pollscm})
elif child.tag == 'hudson.triggers.TimerTrigger':
timed_trigger = {}
timed_trigger['timed'] = child[0].text
triggers.append(timed_trigger)
elif child.tag == 'jenkins.triggers.ReverseBuildTrigger':
reverse = {}
for setting in child:
if setting.tag == 'upstreamProjects':
reverse['jobs'] = setting.text
elif setting.tag == 'threshold':
pass # TODO
elif setting.tag == 'spec':
pass # TODO
else:
raise NotImplementedError("cannot handle reverse trigger "
"setting %s" % setting.tag)
triggers.append(reverse)
elif child.tag == 'com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.GerritTrigger': # NOQA
# Skip for now
pass
elif child.tag == 'com.cloudbees.jenkins.GitHubPushTrigger':
triggers.append('github')
elif child.tag == 'org.jenkinsci.plugins.ghprb.GhprbTrigger':
ghpr = {}
for ghprel in child:
tagname = ghprel.tag
if tagname == 'spec' or tagname == 'cron':
ghpr['cron'] = ghprel.text
elif tagname == 'adminlist':
ghpr['admin-list'] = ghprel.text.strip().split('\n')
elif tagname == 'allowMembersOfWhitelistedOrgsAsAdmin':
ghpr['allow-whitelist-orgs-as-admins'] = get_bool(ghprel.text)
elif tagname == 'whitelist' and ghprel.text is not None:
ghpr['white-list'] = ghprel.text.strip().split('\n')
elif tagname == 'orgslist' and ghprel.text is not None:
ghpr['org-list'] = ghprel.text.strip().split('\n')
elif tagname == 'buildDescTemplate':
ghpr['build-desc-template'] = ghprel.text
elif tagname == 'triggerPhrase':
ghpr['trigger-phrase'] = ghprel.text
elif tagname == 'onlyTriggerPhrase':
ghpr['only-trigger-phrase'] = get_bool(ghprel.text)
elif tagname == 'useGitHubHooks':
ghpr['github-hooks'] = get_bool(ghprel.text)
elif tagname == 'permitAll':
ghpr['permit-all'] = get_bool(ghprel.text)
elif tagname == 'autoCloseFailedPullRequests':
ghpr['auto-close-on-fail'] = get_bool(ghprel.text)
elif tagname == 'whiteListTargetBranches':
ghpr['white-list-target-branches'] = []
for ghprbranch in ghprel:
if ghprbranch[0].text is not None:
ghpr['white-list-target-branches'].append(ghprbranch[0].text.strip())
elif tagname == 'gitHubAuthId':
ghpr['auth-id'] = ghprel.text
triggers.append({'github-pull-request': ghpr})
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
return [['triggers', triggers]]
def handle_concurrentbuild(top):
return [['concurrent', top.text == 'true']]
def handle_axes(top):
axes = []
for child in top:
if child.tag == 'hudson.matrix.LabelExpAxis':
axis = {'type': 'label-expression'}
for axis_element in child:
if axis_element.tag == 'name':
axis['name'] = axis_element.text
if axis_element.tag == 'values':
values = []
for value_element in axis_element:
values.append(value_element.text)
axis['values'] = values
axes.append({'axis': axis})
elif child.tag == 'hudson.matrix.LabelAxis':
axis = {'type': 'slave'}
for axis_element in child:
if axis_element.tag == 'name':
axis['name'] = axis_element.text
if axis_element.tag == 'values':
values = []
for value_element in axis_element:
values.append(value_element.text)
axis['values'] = values
axes.append({'axis': axis})
elif child.tag == 'hudson.matrix.TextAxis':
axis = {'type': 'user-defined'}
for axis_element in child:
if axis_element.tag == 'name':
axis['name'] = axis_element.text
if axis_element.tag == 'values':
values = []
for value_element in axis_element:
values.append(value_element.text)
axis['values'] = values
axes.append({'axis': axis})
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
return [['axes', axes]]
def handle_builders(top):
builders = []
for child in top:
if child.tag == 'hudson.plugins.copyartifact.CopyArtifact':
copyartifact = {}
selectdict = {
'StatusBuildSelector': 'last-successful',
'LastCompletedBuildSelector': 'last-completed',
'SpecificBuildSelector': 'specific-build',
'SavedBuildSelector': 'last-saved',
'TriggeredBuildSelector': 'upstream-build',
'PermalinkBuildSelector': 'permalink',
'WorkspaceSelector': 'workspace-latest',
'ParameterizedBuildSelector': 'build-param',
'DownstreamBuildSelector': 'downstream-build'}
for copy_element in child:
if copy_element.tag == 'project':
copyartifact[copy_element.tag] = copy_element.text
elif copy_element.tag == 'filter':
copyartifact[copy_element.tag] = copy_element.text
elif copy_element.tag == 'target':
copyartifact[copy_element.tag] = copy_element.text
elif copy_element.tag == 'excludes':
copyartifact['exclude-pattern'] = copy_element.text
elif copy_element.tag == 'selector':
select = copy_element.attrib['class']
select = select.replace('hudson.plugins.copyartifact.', '')
copyartifact['which-build'] = selectdict[select]
elif copy_element.tag == 'flatten':
copyartifact[copy_element.tag] = \
(copy_element.text == 'true')
elif copy_element.tag == 'doNotFingerprintArtifacts':
# Not yet implemented in JJB
# ADD RAW XML
continue
elif copy_element.tag == 'optional':
copyartifact[copy_element.tag] = \
(copy_element.text == 'true')
else:
raise NotImplementedError("cannot handle "
"XML %s" % copy_element.tag)
builders.append({'copyartifact': copyartifact})
elif child.tag == 'hudson.tasks.Shell':
shell = handle_commands(child)
builders.append({'shell': shell})
elif child.tag == 'hudson.tasks.BatchFile':
batch = handle_commands(child)
builders.append({'batch':batch})
elif child.tag == 'hudson.tasks.Maven':
maven = {}
for maven_element in child:
if maven_element.tag == 'targets':
maven['goals'] = maven_element.text
elif maven_element.tag == 'mavenName':
maven['name'] = maven_element.text
elif maven_element.tag == 'usePrivateRepository':
maven['private-repository'] = (maven_element.text == 'true')
elif maven_element.tag == 'settings':
maven['settings'] = maven_element.attrib['class']
elif maven_element.tag == 'globalSettings':
maven['global-settings'] = maven_element.attrib['class']
else:
continue
builders.append({'maven-target':maven})
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
return [['builders', builders]]
def handle_commands(element):
for shell_element in element:
# Assumption: there's only one <command> in this
# <hudson.tasks.Shell>
if shell_element.tag == 'command':
shell = ''+shell_element.text+''
else:
raise NotImplementedError("cannot handle "
"XML %s" % shell_element.tag)
return shell
def handle_publishers(top):
publishers = []
for child in top:
if child.tag == 'hudson.tasks.ArtifactArchiver':
archive = {}
for element in child:
if element.tag == 'artifacts':
archive['artifacts'] = element.text
elif element.tag == 'allowEmptyArchive':
archive['allow-empty'] = (element.text == 'true')
elif element.tag == 'fingerprint':
archive['fingerprint'] = (element.text == 'true')
elif element.tag == 'onlyIfSuccessful':
# only-if-success first available in JJB 1.3.0
archive['only-if-success'] = (element.text == 'true')
elif element.tag == 'defaultExcludes':
# default-excludes is not yet available in JJB master
archive['default-excludes'] = (element.text == 'true')
elif element.tag == 'latestOnly':
archive['latest-only'] = (element.text == 'true')
else:
raise NotImplementedError("cannot handle "
"XML %s" % element.tag)
publishers.append({'archive': archive})
elif child.tag == 'hudson.plugins.descriptionsetter.DescriptionSetterPublisher': # NOQA
setter = {}
for element in child:
if element.tag == 'regexp':
setter['regexp'] = element.text
elif element.tag == 'regexpForFailed':
setter['regexp-for-failed'] = element.text
elif element.tag == 'setForMatrix':
setter['set-for-matrix'] = (element.text == 'true')
elif element.tag == 'description':
setter['description'] = element.text
else:
raise NotImplementedError("cannot handle "
"XML %s" % element.tag)
publishers.append({'description-setter': setter})
elif child.tag == 'hudson.tasks.Fingerprinter':
fingerprint = {}
for element in child:
if element.tag == 'targets':
fingerprint['files'] = element.text
elif element.tag == 'recordBuildArtifacts':
fingerprint['record-artifacts'] = (element.text == 'true')
else:
raise NotImplementedError("cannot handle "
"XML %s" % element.tag)
publishers.append({'fingerprint': fingerprint})
elif child.tag == 'hudson.plugins.emailext.ExtendedEmailPublisher':
ext_email = {}
for element in child:
if element.tag == 'recipientList':
ext_email['recipients'] = element.text
elif element.tag == 'replyTo':
ext_email['reply-to'] = element.text
elif element.tag == 'contentType':
ext_email['content-type'] = element.text
elif element.tag == 'defaultSubject':
ext_email['subject'] = element.text
elif element.tag == 'defaultContent':
ext_email['body'] = element.text
elif element.tag in ['attachBuildLog', 'compressBuildLog']:
ext_email['attach-build-log'] = (element.text == 'true')
elif element.tag == 'attachmentsPattern':
ext_email['attachment'] = element.text
elif element.tag in ['saveOutput', 'disabled']:
pass
elif element.tag == 'preBuild':
ext_email['pre-build'] = (element.text == 'true')
elif element.tag == 'presendScript':
ext_email['presend-script'] = element.text
elif element.tag == 'sendTo':
ext_email['send-to'] = element.text
elif element.tag == 'configuredTriggers':
print "IGNORED configuredTriggers in email-ext"
else:
raise NotImplementedError("cannot handle "
"XML %s" % element.tag)
publishers.append({'email-ext': ext_email})
elif child.tag == 'hudson.tasks.junit.JUnitResultArchiver':
junit_publisher = {}
for element in child:
if element.tag == 'testResults':
junit_publisher['results'] = element.text
elif element.tag == 'keepLongStdio':
junit_publisher['keep-long-stdio'] = \
(element.text == 'true')
elif element.tag == 'healthScaleFactor':
junit_publisher['health-scale-factor'] = element.text
else:
raise NotImplementedError("cannot handle "
"XML %s" % element.tag)
publishers.append({'junit': junit_publisher})
elif child.tag == 'hudson.plugins.parameterizedtrigger.BuildTrigger':
build_trigger = {}
for element in child:
for sub in element:
if sub.tag == 'hudson.plugins.parameterizedtrigger.BuildTriggerConfig': # NOQA
for config in sub:
if config.tag == 'projects':
build_trigger['project'] = config.text
elif config.tag == 'condition':
build_trigger['condition'] = config.text
elif config.tag == 'triggerWithNoParameters':
build_trigger['trigger-with-no-params'] = \
(config.text == 'true')
elif config.tag == 'configs':
pass
else:
raise NotImplementedError("cannot handle "
"XML %s" % config.tag)
publishers.append({'trigger-parameterized-builds': build_trigger})
elif child.tag == 'hudson.tasks.Mailer':
email_settings = {}
for element in child:
if element.tag == 'recipients':
email_settings['recipients'] = element.text
elif element.tag == 'dontNotifyEveryUnstableBuild':
email_settings['notify-every-unstable-build'] = \
(element.text == 'true')
elif element.tag == 'sendToIndividuals':
email_settings['send-to-individuals'] = \
(element.text == 'true')
else:
raise NotImplementedError("cannot handle "
"email %s" % element.tag)
publishers.append({'email': email_settings})
elif child.tag == 'htmlpublisher.HtmlPublisher':
html_publisher = {}
element = child[0]
if element.tag != 'reportTargets':
raise NotImplementedError("Cannot handle XML %s" % element.tag)
for subelement in element:
if subelement.tag != 'htmlpublisher.HtmlPublisherTarget':
raise NotImplementedError("Cannot handle XML %s" % element.tag)
for config in subelement:
if config.tag == 'reportName':
html_publisher['name'] = config.text
if config.tag == 'reportDir':
html_publisher['dir'] = config.text
if config.tag == 'reportFiles':
html_publisher['files'] = config.text
if config.tag == 'keepAll':
html_publisher['keep-all'] = (config.text == 'true')
if config.tag == 'allowMissing':
html_publisher['allow-missing'] = (config.text == 'true')
if config.tag == 'alwaysLinkToLastBuild':
html_publisher['link-to-last-build'] = (config.text == 'true')
if config.tag == 'wrapperName':
# Apparently, older versions leakded this wrapper name
# to the job configuration.
pass
if len(html_publisher) > 0:
publishers.append({'html-publisher': html_publisher})
elif child.tag == 'org.jvnet.hudson.plugins.groovypostbuild.GroovyPostbuildRecorder':
groovy = {}
for groovy_element in child:
if groovy_element.tag == 'groovyScript':
groovy['script'] = groovy_element.text
elif groovy_element.tag == 'classpath':
classpaths = []
for child1 in groovy_element:
for child2 in child1:
if child2.tag == 'path':
classpaths.append(child2.text)
groovy['classpath'] = classpaths;
else:
continue
raise NotImplementedError("cannot handle groovy-postbuild elements")
publishers.append({'groovy-postbuild':groovy})
elif child.tag == 'org.jenkins__ci.plugins.flexible__publish.FlexiblePublisher': # NOQA
raise NotImplementedError("cannot handle XML %s" % child.tag)
elif child.tag == 'hudson.plugins.s3.S3BucketPublisher':
raise NotImplementedError("cannot handle XML %s" % child.tag)
elif child.tag == 'hudson.plugins.robot.RobotPublisher':
raise NotImplementedError("cannot handle XML %s" % child.tag)
elif child.tag == 'jenkins.plugins.publish__over__ssh.BapSshPublisherPlugin':
raise NotImplementedError("cannot handle XML %s" % child.tag)
elif child.tag == 'jenkins.plugins.slack.SlackNotifier':
slacknotifier = {}
slack_tags = ['teamDomain', 'authToken', 'buildServerUrl', 'room']
for slack_el in child:
if slack_el.tag not in slack_tags:
raise NotImplementedError("cannot handle SlackNotifier.%s" % slack_el.tag)
slack_yaml_key = re.sub('([A-Z])', r'-\1', slack_el.tag).lower()
slacknotifier[slack_yaml_key] = slack_el.text
publishers.append({'slack': slacknotifier})
elif child.tag == 'hudson.plugins.postbuildtask.PostbuildTask':
post_tasks = []
for pt in child[0]:
post_task = {}
for ptel in pt:
if ptel.tag == 'logTexts':
matches = []
for logtext in ptel:
match = {}
for logtextel in logtext:
if logtextel.tag == 'logText':
match['log-text'] = logtextel.text
elif logtextel.tag == 'operator':
match['operator'] = logtextel.text
matches.append(match)
post_task['matches'] = matches
elif ptel.tag == 'EscalateStatus':
post_task['escalate-status'] = get_bool(ptel.text)
elif ptel.tag == 'RunIfJobSuccessful':
post_task['run-if-job-successful'] = get_bool(ptel.text)
elif ptel.tag == 'script':
post_task['script'] = ptel.text
post_tasks.append(post_task)
publishers.append({'post-tasks': post_tasks})
elif child.tag == 'hudson.plugins.ws__cleanup.WsCleanup':
cleanup = {'include': [], 'exclude': [], 'clean-if': []}
for cleanupel in child:
if cleanupel.tag == 'patterns':
for pattern in cleanupel:
pattern_glob = None
pattern_type = None
for patternel in pattern:
if patternel.tag == 'pattern':
pattern_glob = patternel.text
elif patternel.tag == 'type':
pattern_type = patternel.text
cleanup[pattern_type.lower()].append(pattern_glob)
elif cleanupel.tag == 'deleteDirs':
cleanup['dirmatch'] = get_bool(cleanupel.text)
elif cleanupel.tag == 'cleanWhenSuccess':
cleanup['clean-if'].append({'success': get_bool(cleanupel.text)})
elif cleanupel.tag == 'cleanWhenUnstable':
cleanup['clean-if'].append({'unstable': get_bool(cleanupel.text)})
elif cleanupel.tag == 'cleanWhenFailure':
cleanup['clean-if'].append({'failure': get_bool(cleanupel.text)})
elif cleanupel.tag == 'cleanWhenNotBuilt':
cleanup['clean-if'].append({'not-built': get_bool(cleanupel.text)})
elif cleanupel.tag == 'cleanWhenAborted':
cleanup['clean-if'].append({'aborted': get_bool(cleanupel.text)})
elif cleanupel.tag == 'notFailBuild':
cleanup['fail-build'] = not get_bool(cleanupel.text)
elif cleanupel.tag == 'cleanupMatrixParent':
cleanup['clean-parent'] = get_bool(cleanupel.text)
publishers.append({'workspace-cleanup': cleanup})
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
return [['publishers', publishers]]
def handle_buildwrappers(top):
wrappers = []
for child in top:
if child.tag == 'EnvInjectPasswordWrapper':
inject = {}
for element in child:
if element.tag == 'injectGlobalPasswords':
inject['global'] = (element.text == 'true')
elif element.tag == 'maskPasswordParameters':
inject['mask-password-params'] = (element.text == 'true')
elif element.tag == 'passwordEntries':
if len(list(element)) > 0:
raise NotImplementedError('TODO: implement handling '
'here')
else:
raise NotImplementedError("cannot handle "
"XML %s" % element.tag)
wrappers.append({'inject': inject})
elif child.tag == 'EnvInjectBuildWrapper':
build_inject = {}
for element in child:
if element.tag == 'info':
for subelement in element:
if subelement.tag == 'propertiesFilePath':
build_inject['properties-file'] = subelement.text
if subelement.tag == 'loadFilesFromMaster':
pass
else:
raise NotImplementedError("cannot handle "
"XML %s" % element.tag)
wrappers.append({'inject': build_inject})
elif child.tag == 'hudson.plugins.build__timeout.BuildTimeoutWrapper':
pass
elif child.tag == 'hudson.plugins.ansicolor.AnsiColorBuildWrapper':
wrappers.append({'ansicolor': {'colormap': 'xterm'}})
elif child.tag == 'com.cloudbees.jenkins.plugins.sshagent.SSHAgentBuildWrapper': # NOQA
ssh_agents = {}
for element in child:
if element.tag == 'credentialIds':
keys = []
for key in element:
keys.append(key.text)
ssh_agents['users'] = keys
elif element.tag == 'ignoreMissing':
pass
else:
raise NotImplementedError("cannot handle "
"XML %s" % element.tag)
wrappers.append({'ssh-agent-credentials': ssh_agents})
elif child.tag == 'org.jenkinsci.plugins.buildnamesetter.BuildNameSetter': # NOQA
wrappers.append({'build-name': {'name': child[0].text}})
elif child.tag == 'hudson.plugins.timestamper.TimestamperBuildWrapper':
wrappers.append('timestamps')
elif child.tag == 'hudson.plugins.ws__cleanup.PreBuildCleanup':
preclean = {}
preclean_patterns = {'include': '', 'exclude': ''}
for element in child:
if element.tag == 'deleteDirs':
preclean['dirmatch'] = (element.text == 'true')
elif element.tag == 'patterns':
for subelement in element:
if subelement.tag != 'hudson.plugins.ws__cleanup.Pattern':
raise NotImplementedError("cannot handle "
"XML %s" % subelement.tag)
if subelement.find('type') is not None and subelement.find('pattern') is not None:
rule_type = subelement.find('type').text.lower()
rule_patt = subelement.find('pattern').text
preclean_patterns[rule_type] = rule_patt
elif element.tag == 'cleanupParameter':
# JJB does not seem to support this. Ignored.
pass
elif element.tag == 'externalDelete':
# JJB does not seem to support this. Ignored.
pass
else:
raise NotImplementedError("cannot handle "
"XML %s" % subelement.tag)
for rule in preclean_patterns:
if len(preclean_patterns[rule]) > 0:
preclean[rule] = preclean_patterns[rule]
if len(preclean) > 0:
wrappers.append({'workspace-cleanup': preclean})
else:
wrappers.append('workspace-cleanup')
elif child.tag == 'org.jenkinsci.plugins.xvfb.XvfbBuildWrapper':
xvfb = {}
for element in child:
if element.tag == 'installationName':
xvfb['installation-name'] = element.text
if element.tag == 'autoDisplayName':
xvfb['auto-display-name'] = (element.text == 'true')
if element.tag == 'displayName':
xvfb['display-name'] = element.text
if element.tag == 'assignedLabels':
xvfb['assigned-labels'] = element.text
if element.tag == 'parallelBuild':
xvfb['parallel-build'] = (element.text == 'true')
if element.tag == 'timeout':
xvfb['timeout'] = element.text
if element.tag == 'screen':
xvfb['screen'] = element.text
if element.tag == 'displayNameOffset':
xvfb['display-name-offset'] = element.text
if element.tag == 'additionalOptions':
xvfb['additional-options'] = element.text
if element.tag == 'debug':
xvfb['debug'] = (element.text == 'true')
if element.tag == 'shutdownWithBuild':
xvfb['shutdown-with-build'] = (element.text == 'true')
wrappers.append({'xvfb': xvfb})
elif child.tag == 'com.michelin.cio.hudson.plugins.maskpasswords.MaskPasswordsBuildWrapper':
wrappers.append('mask-passwords')
else:
print child
raise NotImplementedError("cannot handle XML %s" % child.tag)
return [['wrappers', wrappers]]
def handle_executionstrategy(top):
strategy = {}
for child in top:
if child.tag == 'runSequentially':
strategy['run-sequentially'] = (child.text == 'true')
elif child.tag == 'sorter':
# Is there anything but NOOP?
pass
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
return [['execution-strategy', strategy]]
# Handle "<logrotator>...</logrotator>"'
def handle_logrotator(top):
logrotate = {}
for child in top:
if child.tag == 'daysToKeep':
logrotate['daysToKeep'] = child.text
elif child.tag == 'numToKeep':
logrotate['numToKeep'] = child.text
elif child.tag == 'artifactDaysToKeep':
logrotate['artifactDaysToKeep'] = child.text
elif child.tag == 'artifactNumToKeep':
logrotate['artifactNumToKeep'] = child.text
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
return [['logrotate', logrotate]]
# Handle "<combinationFilter>a != "b"</combinationFilter>"
def handle_combinationfilter(top):
return [['combination-filter', top.text]]
# Handle "<assignedNode>server.example.com</assignedNode>"
def handle_assignednode(top):
return [['node', top.text]]
# Handle "<displayName>my cool job</displayName>"
def handle_displayname(top):
return [['display-name', top.text]]
# Handle "<quietPeriod>5</quietPeriod>"
def handle_quietperiod(top):
return [['quiet-period', top.text]]
# Handle "<scmCheckoutRetryCount>8</scmCheckoutRetryCount>"
def handle_scmcheckoutretrycount(top):
return [['retry-count', top.text]]
def handle_customworkspace(top):
return [['workspace', top.text]]
def handle_jdk(top):
return [['jdk',top.text]]
|
|
from __future__ import absolute_import
from collections import OrderedDict
import warnings
import six
import math
import decimal
from plotly import utils
from plotly import exceptions
from plotly import graph_reference
from plotly import session
from plotly.files import (CONFIG_FILE, CREDENTIALS_FILE, FILE_CONTENT,
GRAPH_REFERENCE_FILE, check_file_permissions)
DEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
REQUIRED_GANTT_KEYS = ['Task', 'Start', 'Finish']
PLOTLY_SCALES = {'Greys': ['rgb(0,0,0)', 'rgb(255,255,255)'],
'YlGnBu': ['rgb(8,29,88)', 'rgb(255,255,217)'],
'Greens': ['rgb(0,68,27)', 'rgb(247,252,245)'],
'YlOrRd': ['rgb(128,0,38)', 'rgb(255,255,204)'],
'Bluered': ['rgb(0,0,255)', 'rgb(255,0,0)'],
'RdBu': ['rgb(5,10,172)', 'rgb(178,10,28)'],
'Reds': ['rgb(220,220,220)', 'rgb(178,10,28)'],
'Blues': ['rgb(5,10,172)', 'rgb(220,220,220)'],
'Picnic': ['rgb(0,0,255)', 'rgb(255,0,0)'],
'Rainbow': ['rgb(150,0,90)', 'rgb(255,0,0)'],
'Portland': ['rgb(12,51,131)', 'rgb(217,30,30)'],
'Jet': ['rgb(0,0,131)', 'rgb(128,0,0)'],
'Hot': ['rgb(0,0,0)', 'rgb(255,255,255)'],
'Blackbody': ['rgb(0,0,0)', 'rgb(160,200,255)'],
'Earth': ['rgb(0,0,130)', 'rgb(255,255,255)'],
'Electric': ['rgb(0,0,0)', 'rgb(255,250,220)'],
'Viridis': ['rgb(68,1,84)', 'rgb(253,231,37)']}
# color constants for violin plot
DEFAULT_FILLCOLOR = '#1f77b4'
DEFAULT_HISTNORM = 'probability density'
ALTERNATIVE_HISTNORM = 'probability'
# Warning format
def warning_on_one_line(message, category, filename, lineno,
file=None, line=None):
return '%s:%s: %s:\n\n%s\n\n' % (filename, lineno, category.__name__,
message)
warnings.formatwarning = warning_on_one_line
try:
import IPython
import IPython.core.display
_ipython_imported = True
except ImportError:
_ipython_imported = False
try:
import numpy as np
_numpy_imported = True
except ImportError:
_numpy_imported = False
try:
import pandas as pd
_pandas_imported = True
except ImportError:
_pandas_imported = False
try:
import scipy as scp
_scipy_imported = True
except ImportError:
_scipy_imported = False
try:
import scipy.spatial as scs
_scipy__spatial_imported = True
except ImportError:
_scipy__spatial_imported = False
try:
import scipy.cluster.hierarchy as sch
_scipy__cluster__hierarchy_imported = True
except ImportError:
_scipy__cluster__hierarchy_imported = False
try:
import scipy
import scipy.stats
_scipy_imported = True
except ImportError:
_scipy_imported = False
from plotly.tools import FigureFactory
def my_map_face2color(face, colormap, vmin, vmax):
"""
Normalize facecolor values by vmin/vmax and return rgb-color strings
This function takes a tuple color along with a colormap and a minimum
(vmin) and maximum (vmax) range of possible mean distances for the
given parametrized surface. It returns an rgb color based on the mean
distance between vmin and vmax
"""
if vmin >= vmax:
vmax = vmin + 1
if len(colormap) == 1:
# color each triangle face with the same color in colormap
face_color = colormap[0]
face_color = FigureFactory._convert_to_RGB_255(face_color)
face_color = FigureFactory._label_rgb(face_color)
else:
if face >= vmax:
# pick last color in colormap
face_color = colormap[-1]
face_color = FigureFactory._convert_to_RGB_255(face_color)
face_color = FigureFactory._label_rgb(face_color)
else:
# find the normalized distance t of a triangle face between
# vmin and vmax where the distance is between 0 and 1
t = (face - vmin) / float((vmax - vmin))
low_color_index = int(t / (1./(len(colormap) - 1)))
face_color = FigureFactory._find_intermediate_color(
colormap[low_color_index],
colormap[low_color_index + 1],
t * (len(colormap) - 1) - low_color_index
)
face_color = FigureFactory._convert_to_RGB_255(face_color)
face_color = FigureFactory._label_rgb(face_color)
return face_color
def my_trisurf(x, y, z, simplices, show_colorbar, edges_color,
colormap=None, color_func=None, plot_edges=False,
x_edge=None, y_edge=None, z_edge=None, facecolor=None, data_list=False,
minmax_values = None):
"""
Refer to FigureFactory.create_trisurf() for docstring
"""
# numpy import check
if _numpy_imported is False:
raise ImportError("FigureFactory._trisurf() requires "
"numpy imported.")
import numpy as np
from plotly.graph_objs import graph_objs
points3D = np.vstack((x, y, z)).T
simplices = np.atleast_2d(simplices)
# vertices of the surface triangles
tri_vertices = points3D[simplices]
# Define colors for the triangle faces
if color_func is None:
# mean values of z-coordinates of triangle vertices
mean_dists = tri_vertices[:, :, 2].mean(-1)
elif isinstance(color_func, (list, np.ndarray)):
# Pre-computed list / array of values to map onto color
if len(color_func) != len(simplices):
raise ValueError("If color_func is a list/array, it must "
"be the same length as simplices.")
# convert all colors in color_func to rgb
for index in range(len(color_func)):
if isinstance(color_func[index], str):
if '#' in color_func[index]:
foo = FigureFactory._hex_to_rgb(color_func[index])
color_func[index] = FigureFactory._label_rgb(foo)
if isinstance(color_func[index], tuple):
foo = FigureFactory._convert_to_RGB_255(color_func[index])
color_func[index] = FigureFactory._label_rgb(foo)
mean_dists = np.asarray(color_func)
else:
# apply user inputted function to calculate
# custom coloring for triangle vertices
mean_dists = []
for triangle in tri_vertices:
dists = []
for vertex in triangle:
dist = color_func(vertex[0], vertex[1], vertex[2])
dists.append(dist)
mean_dists.append(np.mean(dists))
mean_dists = np.asarray(mean_dists)
# Check if facecolors are already strings and can be skipped
if isinstance(mean_dists[0], str):
facecolor = mean_dists
else:
if minmax_values == None :
min_mean_dists = np.min(mean_dists)
max_mean_dists = np.max(mean_dists)
else :
min_mean_dists = minmax_values[0]
max_mean_dists = minmax_values[1]
if facecolor is None:
facecolor = []
for index in range(len(mean_dists)):
color = my_map_face2color(mean_dists[index],
colormap,
min_mean_dists,
max_mean_dists)
facecolor.append(color)
# Make sure facecolor is a list so output is consistent across Pythons
facecolor = list(facecolor)
ii, jj, kk = simplices.T
triangles = graph_objs.Mesh3d(x=x, y=y, z=z, facecolor=facecolor,
i=ii, j=jj, k=kk, name='', hoverinfo='skip')
mean_dists_are_numbers = not isinstance(mean_dists[0], str)
if mean_dists_are_numbers and show_colorbar is True:
# make a colorscale from the colors
colorscale = FigureFactory._make_colorscale(colormap)
colorscale = FigureFactory._convert_colorscale_to_rgb(colorscale)
colorbar = graph_objs.Scatter3d(
x=[x[0]], # !!! solve a bug in the orginal file !
y=[y[0]],
z=[z[0]],
mode='markers',
marker=dict(
size=0.1,
color=[min_mean_dists, max_mean_dists],
colorscale=colorscale,
showscale=True,
colorbar = dict(
len = 0.5
),
),
hoverinfo='None',
showlegend=False
)
# the triangle sides are not plotted
if plot_edges is False:
if mean_dists_are_numbers and show_colorbar is True:
return graph_objs.Data([triangles, colorbar])
else:
return graph_objs.Data([triangles])
# define the lists x_edge, y_edge and z_edge, of x, y, resp z
# coordinates of edge end points for each triangle
# None separates data corresponding to two consecutive triangles
is_none = [ii is None for ii in [x_edge, y_edge, z_edge]]
if any(is_none):
if not all(is_none):
raise ValueError("If any (x_edge, y_edge, z_edge) is None, "
"all must be None")
else:
x_edge = []
y_edge = []
z_edge = []
# Pull indices we care about, then add a None column to separate tris
ixs_triangles = [0, 1, 2, 0]
pull_edges = tri_vertices[:, ixs_triangles, :]
x_edge_pull = np.hstack([pull_edges[:, :, 0],
np.tile(None, [pull_edges.shape[0], 1])])
y_edge_pull = np.hstack([pull_edges[:, :, 1],
np.tile(None, [pull_edges.shape[0], 1])])
z_edge_pull = np.hstack([pull_edges[:, :, 2],
np.tile(None, [pull_edges.shape[0], 1])])
# Now unravel the edges into a 1-d vector for plotting
x_edge = np.hstack([x_edge, x_edge_pull.reshape([1, -1])[0]])
y_edge = np.hstack([y_edge, y_edge_pull.reshape([1, -1])[0]])
z_edge = np.hstack([z_edge, z_edge_pull.reshape([1, -1])[0]])
if not (len(x_edge) == len(y_edge) == len(z_edge)):
raise exceptions.PlotlyError("The lengths of x_edge, y_edge and "
"z_edge are not the same.")
# define the lines for plotting
lines = graph_objs.Scatter3d(
x=x_edge, y=y_edge, z=z_edge, mode='lines',
line=graph_objs.Line(
color=edges_color,
width=1.5
),
showlegend=False
)
if data_list :
if mean_dists_are_numbers and show_colorbar is True:
return [triangles, lines, colorbar]
else:
return [triangles, lines]
else :
if mean_dists_are_numbers and show_colorbar is True:
return graph_objs.Data([triangles, lines, colorbar])
else:
return graph_objs.Data([triangles, lines])
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import webob
from webob import exc
from cinder.api.contrib import admin_actions
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.api.contrib import test_backups
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
from cinder.tests.unit import cast_as_call
from cinder.tests.unit import fake_snapshot
from cinder.volume import api as volume_api
CONF = cfg.CONF
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class AdminActionsTest(test.TestCase):
def setUp(self):
super(AdminActionsTest, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(lock_path=self.tempdir,
group='oslo_concurrency')
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake')
self.volume_api = volume_api.API()
cast_as_call.mock_cast_as_call(self.volume_api.volume_rpcapi.client)
cast_as_call.mock_cast_as_call(self.volume_api.scheduler_rpcapi.client)
def _issue_volume_reset(self, ctx, volume, updated_status):
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dump_as_bytes({'os-reset_status': updated_status})
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
return resp
def _issue_snapshot_reset(self, ctx, snapshot, updated_status):
req = webob.Request.blank('/v2/fake/snapshots/%s/action' %
snapshot.id)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dump_as_bytes({'os-reset_status': updated_status})
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
return resp
def _issue_backup_reset(self, ctx, backup, updated_status):
req = webob.Request.blank('/v2/fake/backups/%s/action' % backup['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dump_as_bytes({'os-reset_status': updated_status})
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
return resp
def _create_volume(self, context, updates=None):
db_volume = {'status': 'available',
'host': 'test',
'availability_zone': 'fake_zone',
'attach_status': 'detached'}
if updates:
db_volume.update(updates)
volume = objects.Volume(context=context, **db_volume)
volume.create()
return volume
def test_valid_updates(self):
vac = admin_actions.VolumeAdminController()
vac.validate_update({'status': 'creating'})
vac.validate_update({'status': 'available'})
vac.validate_update({'status': 'deleting'})
vac.validate_update({'status': 'error'})
vac.validate_update({'status': 'error_deleting'})
vac.validate_update({'attach_status': 'detached'})
vac.validate_update({'attach_status': 'attached'})
vac.validate_update({'migration_status': 'migrating'})
vac.validate_update({'migration_status': 'error'})
vac.validate_update({'migration_status': 'completing'})
vac.validate_update({'migration_status': 'none'})
vac.validate_update({'migration_status': 'starting'})
def test_reset_attach_status(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'attach_status': 'detached'})
resp = self._issue_volume_reset(ctx,
volume,
{'attach_status': 'attached'})
self.assertEqual(202, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('attached', volume['attach_status'])
def test_reset_attach_invalid_status(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'attach_status': 'detached'})
resp = self._issue_volume_reset(ctx,
volume,
{'attach_status': 'bogus-status'})
self.assertEqual(400, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('detached', volume['attach_status'])
def test_reset_migration_invalid_status(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'migration_status': None})
resp = self._issue_volume_reset(ctx,
volume,
{'migration_status': 'bogus-status'})
self.assertEqual(400, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertIsNone(volume['migration_status'])
def test_reset_migration_status(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'migration_status': None})
resp = self._issue_volume_reset(ctx,
volume,
{'migration_status': 'migrating'})
self.assertEqual(202, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('migrating', volume['migration_status'])
def test_reset_status_as_admin(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available'})
resp = self._issue_volume_reset(ctx,
volume,
{'status': 'error'})
self.assertEqual(202, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('error', volume['status'])
def test_reset_status_as_non_admin(self):
ctx = context.RequestContext('fake', 'fake')
volume = db.volume_create(context.get_admin_context(),
{'status': 'error', 'size': 1})
resp = self._issue_volume_reset(ctx,
volume,
{'status': 'error'})
# request is not authorized
self.assertEqual(403, resp.status_int)
volume = db.volume_get(context.get_admin_context(), volume['id'])
# status is still 'error'
self.assertEqual('error', volume['status'])
def test_backup_reset_status_as_admin(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available',
'user_id': 'user',
'project_id': 'project'})
backup = db.backup_create(ctx, {'status': 'available',
'size': 1,
'volume_id': volume['id'],
'user_id': 'user',
'project_id': 'project'})
resp = self._issue_backup_reset(ctx,
backup,
{'status': 'error'})
self.assertEqual(202, resp.status_int)
def test_backup_reset_status_as_non_admin(self):
ctx = context.RequestContext('fake', 'fake')
backup = db.backup_create(ctx, {'status': 'available',
'size': 1,
'volume_id': "fakeid"})
resp = self._issue_backup_reset(ctx,
backup,
{'status': 'error'})
# request is not authorized
self.assertEqual(403, resp.status_int)
def test_backup_reset_status(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1})
backup = db.backup_create(ctx, {'status': 'available',
'volume_id': volume['id'],
'user_id': 'user',
'project_id': 'project'})
resp = self._issue_backup_reset(ctx,
backup,
{'status': 'error'})
self.assertEqual(202, resp.status_int)
def test_invalid_status_for_backup(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1})
backup = db.backup_create(ctx, {'status': 'available',
'volume_id': volume['id']})
resp = self._issue_backup_reset(ctx,
backup,
{'status': 'restoring'})
self.assertEqual(400, resp.status_int)
def test_backup_reset_status_with_invalid_backup(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1})
backup = db.backup_create(ctx, {'status': 'available',
'volume_id': volume['id'],
'user_id': 'user',
'project_id': 'project'})
backup['id'] = 'fake_id'
resp = self._issue_backup_reset(ctx,
backup,
{'status': 'error'})
# Should raise 404 if backup doesn't exist.
self.assertEqual(404, resp.status_int)
def test_malformed_reset_status_body(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'size': 1})
resp = self._issue_volume_reset(ctx,
volume,
{'x-status': 'bad'})
self.assertEqual(400, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('available', volume['status'])
def test_invalid_status_for_volume(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'size': 1})
resp = self._issue_volume_reset(ctx,
volume,
{'status': 'invalid'})
self.assertEqual(400, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('available', volume['status'])
def test_reset_status_for_missing_volume(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s/action' %
'missing-volume-id')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
body = {'os-reset_status': {'status': 'available'}}
req.body = jsonutils.dump_as_bytes(body)
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
self.assertEqual(404, resp.status_int)
self.assertRaises(exception.NotFound, db.volume_get, ctx,
'missing-volume-id')
def test_reset_attached_status(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1,
'attach_status': 'attached'})
resp = self._issue_volume_reset(ctx,
volume,
{'status': 'available',
'attach_status': 'detached'})
self.assertEqual(202, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('detached', volume['attach_status'])
self.assertEqual('available', volume['status'])
def test_invalid_reset_attached_status(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1,
'attach_status': 'detached'})
resp = self._issue_volume_reset(ctx,
volume,
{'status': 'available',
'attach_status': 'invalid'})
self.assertEqual(400, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('available', volume['status'])
self.assertEqual('detached', volume['attach_status'])
def test_snapshot_reset_status(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1,
'availability_zone': 'test',
'attach_status': 'detached'})
kwargs = {
'volume_id': volume['id'],
'cgsnapshot_id': None,
'user_id': ctx.user_id,
'project_id': ctx.project_id,
'status': 'error_deleting',
'progress': '0%',
'volume_size': volume['size'],
'metadata': {}
}
snapshot = objects.Snapshot(context=ctx, **kwargs)
snapshot.create()
self.addCleanup(snapshot.destroy)
resp = self._issue_snapshot_reset(ctx, snapshot, {'status': 'error'})
self.assertEqual(202, resp.status_int)
snapshot = objects.Snapshot.get_by_id(ctx, snapshot['id'])
self.assertEqual('error', snapshot.status)
def test_invalid_status_for_snapshot(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1})
snapshot = objects.Snapshot(ctx, status='available',
volume_id=volume['id'])
snapshot.create()
self.addCleanup(snapshot.destroy)
resp = self._issue_snapshot_reset(ctx, snapshot,
{'status': 'attaching'})
self.assertEqual(400, resp.status_int)
self.assertEqual('available', snapshot.status)
def test_force_delete(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is creating
volume = self._create_volume(ctx, {'size': 1, 'host': None})
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dump_as_bytes({'os-force_delete': {}})
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEqual(202, resp.status_int)
# volume is deleted
self.assertRaises(exception.NotFound, objects.Volume.get_by_id, ctx,
volume.id)
@mock.patch.object(volume_api.API, 'delete_snapshot', return_value=True)
@mock.patch('cinder.objects.Snapshot.get_by_id')
@mock.patch.object(db, 'snapshot_get')
@mock.patch.object(db, 'volume_get')
def test_force_delete_snapshot(self, volume_get, snapshot_get, get_by_id,
delete_snapshot):
ctx = context.RequestContext('admin', 'fake', True)
volume = stubs.stub_volume(1)
snapshot = stubs.stub_snapshot(1)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
volume_get.return_value = volume
snapshot_get.return_value = snapshot
get_by_id.return_value = snapshot_obj
path = '/v2/fake/snapshots/%s/action' % snapshot['id']
req = webob.Request.blank(path)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dump_as_bytes({'os-force_delete': {}})
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
self.assertEqual(202, resp.status_int)
def test_force_detach_instance_attached_volume(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = self._create_volume(ctx, {'provider_location': '',
'size': 1})
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.addCleanup(svc.stop)
self.volume_api.reserve_volume(ctx, volume)
mountpoint = '/dev/vbd'
attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID,
None, mountpoint, 'rw')
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('in-use', volume['status'])
self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('False', admin_metadata[0]['value'])
self.assertEqual('attached_mode', admin_metadata[1]['key'])
self.assertEqual('rw', admin_metadata[1]['value'])
conn_info = self.volume_api.initialize_connection(ctx,
volume,
connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
# build request to force detach
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request status of 'error'
body = {'os-force_detach': {'attachment_id': attachment['id'],
'connector': connector}}
req.body = jsonutils.dump_as_bytes(body)
# attach admin context to request
req.environ['cinder.context'] = ctx
# make request
resp = req.get_response(app())
# request is accepted
self.assertEqual(202, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertRaises(exception.VolumeAttachmentNotFound,
db.volume_attachment_get,
ctx, attachment['id'])
# status changed to 'available'
self.assertEqual('available', volume['status'])
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'], 'readonly')
self.assertEqual('False', admin_metadata[0]['value'])
def test_force_detach_host_attached_volume(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = self._create_volume(ctx, {'provider_location': '',
'size': 1})
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.addCleanup(svc.stop)
self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
host_name = 'fake-host'
attachment = self.volume_api.attach(ctx, volume, None, host_name,
mountpoint, 'ro')
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('in-use', volume['status'])
self.assertIsNone(attachment['instance_uuid'])
self.assertEqual(host_name, attachment['attached_host'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('False', admin_metadata[0]['value'])
self.assertEqual('attached_mode', admin_metadata[1]['key'])
self.assertEqual('ro', admin_metadata[1]['value'])
conn_info = self.volume_api.initialize_connection(ctx,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
# build request to force detach
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request status of 'error'
body = {'os-force_detach': {'attachment_id': attachment['id'],
'connector': connector}}
req.body = jsonutils.dump_as_bytes(body)
# attach admin context to request
req.environ['cinder.context'] = ctx
# make request
resp = req.get_response(app())
# request is accepted
self.assertEqual(202, resp.status_int)
volume = db.volume_get(ctx, volume['id'])
self.assertRaises(exception.VolumeAttachmentNotFound,
db.volume_attachment_get,
ctx, attachment['id'])
# status changed to 'available'
self.assertEqual('available', volume['status'])
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('False', admin_metadata[0]['value'])
def test_volume_force_detach_raises_remote_error(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = self._create_volume(ctx, {'provider_location': '',
'size': 1})
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.addCleanup(svc.stop)
self.volume_api.reserve_volume(ctx, volume)
mountpoint = '/dev/vbd'
attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID,
None, mountpoint, 'rw')
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('in-use', volume['status'])
self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('False', admin_metadata[0]['value'])
self.assertEqual('attached_mode', admin_metadata[1]['key'])
self.assertEqual('rw', admin_metadata[1]['value'])
conn_info = self.volume_api.initialize_connection(ctx,
volume,
connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
# build request to force detach
volume_remote_error = \
messaging.RemoteError(exc_type='VolumeAttachmentNotFound')
with mock.patch.object(volume_api.API, 'detach',
side_effect=volume_remote_error):
req = webob.Request.blank('/v2/fake/volumes/%s/action' %
volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
body = {'os-force_detach': {'attachment_id': 'fake'}}
req.body = jsonutils.dump_as_bytes(body)
# attach admin context to request
req.environ['cinder.context'] = ctx
# make request
resp = req.get_response(app())
self.assertEqual(400, resp.status_int)
# test for KeyError when missing connector
volume_remote_error = (
messaging.RemoteError(exc_type='KeyError'))
with mock.patch.object(volume_api.API, 'detach',
side_effect=volume_remote_error):
req = webob.Request.blank('/v2/fake/volumes/%s/action' %
volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
body = {'os-force_detach': {'attachment_id': 'fake'}}
req.body = jsonutils.dump_as_bytes(body)
# attach admin context to request
req.environ['cinder.context'] = ctx
# make request
self.assertRaises(messaging.RemoteError,
req.get_response,
app())
# test for VolumeBackendAPIException
volume_remote_error = (
messaging.RemoteError(exc_type='VolumeBackendAPIException'))
with mock.patch.object(volume_api.API, 'detach',
side_effect=volume_remote_error):
req = webob.Request.blank('/v2/fake/volumes/%s/action' %
volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
body = {'os-force_detach': {'attachment_id': 'fake',
'connector': connector}}
req.body = jsonutils.dump_as_bytes(body)
# attach admin context to request
req.environ['cinder.context'] = ctx
# make request
self.assertRaises(messaging.RemoteError,
req.get_response,
app())
def test_volume_force_detach_raises_db_error(self):
# In case of DB error 500 error code is returned to user
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = self._create_volume(ctx, {'provider_location': '',
'size': 1})
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.addCleanup(svc.stop)
self.volume_api.reserve_volume(ctx, volume)
mountpoint = '/dev/vbd'
attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID,
None, mountpoint, 'rw')
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEqual('in-use', volume['status'])
self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('False', admin_metadata[0]['value'])
self.assertEqual('attached_mode', admin_metadata[1]['key'])
self.assertEqual('rw', admin_metadata[1]['value'])
conn_info = self.volume_api.initialize_connection(ctx,
volume,
connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
# build request to force detach
volume_remote_error = \
messaging.RemoteError(exc_type='DBError')
with mock.patch.object(volume_api.API, 'detach',
side_effect=volume_remote_error):
req = webob.Request.blank('/v2/fake/volumes/%s/action' %
volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
body = {'os-force_detach': {'attachment_id': 'fake',
'connector': connector}}
req.body = jsonutils.dump_as_bytes(body)
# attach admin context to request
req.environ['cinder.context'] = ctx
# make request
self.assertRaises(messaging.RemoteError,
req.get_response,
app())
def test_attach_in_used_volume_by_instance(self):
"""Test that attaching to an in-use volume fails."""
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = self._create_volume(ctx, {'provider_location': '',
'size': 1})
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.addCleanup(svc.stop)
self.volume_api.reserve_volume(ctx, volume)
conn_info = self.volume_api.initialize_connection(ctx,
volume, connector)
self.volume_api.attach(ctx, volume, fakes.get_fake_uuid(), None,
'/dev/vbd0', 'rw')
self.assertEqual('rw', conn_info['data']['access_mode'])
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
fakes.get_fake_uuid(),
None,
'/dev/vdb1',
'ro')
def test_attach_in_used_volume_by_host(self):
"""Test that attaching to an in-use volume fails."""
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = self._create_volume(ctx, {'provider_location': '',
'size': 1})
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.addCleanup(svc.stop)
self.volume_api.reserve_volume(ctx, volume)
self.volume_api.initialize_connection(ctx, volume, connector)
self.volume_api.attach(ctx, volume, None, 'fake_host1',
'/dev/vbd0', 'rw')
conn_info = self.volume_api.initialize_connection(ctx,
volume, connector)
conn_info['data']['access_mode'] = 'rw'
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
None,
'fake_host2',
'/dev/vbd1',
'ro')
def test_invalid_iscsi_connector(self):
"""Test connector without the initiator (required by iscsi driver)."""
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = self._create_volume(ctx, {'provider_location': '',
'size': 1})
connector = {}
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.addCleanup(svc.stop)
self.assertRaises(exception.InvalidInput,
self.volume_api.initialize_connection,
ctx, volume, connector)
def test_attach_attaching_volume_with_different_instance(self):
"""Test that attaching volume reserved for another instance fails."""
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = self._create_volume(ctx, {'provider_location': '',
'size': 1})
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.addCleanup(svc.stop)
self.volume_api.reserve_volume(ctx, volume)
values = {'volume_id': volume['id'],
'attach_status': 'attaching',
'attach_time': timeutils.utcnow(),
'instance_uuid': 'abc123',
}
db.volume_attach(ctx, values)
db.volume_admin_metadata_update(ctx, volume['id'],
{"attached_mode": 'rw'}, False)
mountpoint = '/dev/vbd'
attachment = self.volume_api.attach(ctx, volume,
stubs.FAKE_UUID, None,
mountpoint, 'rw')
self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid'])
self.assertEqual(volume['id'], attachment['volume_id'], volume['id'])
self.assertEqual('attached', attachment['attach_status'])
def test_attach_attaching_volume_with_different_mode(self):
"""Test that attaching volume reserved for another mode fails."""
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = self._create_volume(ctx, {'provider_location': '',
'size': 1})
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.addCleanup(svc.stop)
values = {'status': 'attaching',
'instance_uuid': fakes.get_fake_uuid()}
db.volume_update(ctx, volume['id'], values)
db.volume_admin_metadata_update(ctx, volume['id'],
{"attached_mode": 'rw'}, False)
mountpoint = '/dev/vbd'
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
values['instance_uuid'],
None,
mountpoint,
'ro')
def _migrate_volume_prep(self):
admin_ctx = context.get_admin_context()
# create volume's current host and the destination host
db.service_create(admin_ctx,
{'host': 'test',
'topic': CONF.volume_topic,
'created_at': timeutils.utcnow()})
db.service_create(admin_ctx,
{'host': 'test2',
'topic': CONF.volume_topic,
'created_at': timeutils.utcnow()})
# current status is available
volume = self._create_volume(admin_ctx)
return volume
def _migrate_volume_exec(self, ctx, volume, host, expected_status,
force_host_copy=False):
admin_ctx = context.get_admin_context()
# build request to migrate to host
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume.id)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
body = {'os-migrate_volume': {'host': host,
'force_host_copy': force_host_copy}}
req.body = jsonutils.dump_as_bytes(body)
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# verify status
self.assertEqual(expected_status, resp.status_int)
volume = objects.Volume.get_by_id(admin_ctx, volume.id)
return volume
def test_migrate_volume_success(self):
expected_status = 202
host = 'test2'
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_prep()
volume = self._migrate_volume_exec(ctx, volume, host, expected_status)
self.assertEqual('starting', volume['migration_status'])
def test_migrate_volume_fail_replication(self):
expected_status = 400
host = 'test2'
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_prep()
# current status is available
volume = self._create_volume(ctx, {'provider_location': '',
'attach_status': '',
'replication_status': 'active'})
volume = self._migrate_volume_exec(ctx, volume, host, expected_status)
def test_migrate_volume_as_non_admin(self):
expected_status = 403
host = 'test2'
ctx = context.RequestContext('fake', 'fake')
volume = self._migrate_volume_prep()
self._migrate_volume_exec(ctx, volume, host, expected_status)
def test_migrate_volume_without_host_parameter(self):
expected_status = 400
host = 'test3'
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_prep()
# build request to migrate without host
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
body = {'os-migrate_volume': {'host': host,
'force_host_copy': False}}
req.body = jsonutils.dump_as_bytes(body)
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# verify status
self.assertEqual(expected_status, resp.status_int)
def test_migrate_volume_host_no_exist(self):
expected_status = 400
host = 'test3'
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_prep()
self._migrate_volume_exec(ctx, volume, host, expected_status)
def test_migrate_volume_same_host(self):
expected_status = 400
host = 'test'
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_prep()
self._migrate_volume_exec(ctx, volume, host, expected_status)
def test_migrate_volume_migrating(self):
expected_status = 400
host = 'test2'
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_prep()
model_update = {'migration_status': 'migrating'}
volume = db.volume_update(ctx, volume['id'], model_update)
self._migrate_volume_exec(ctx, volume, host, expected_status)
def test_migrate_volume_with_snap(self):
expected_status = 400
host = 'test2'
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_prep()
snap = objects.Snapshot(ctx, volume_id=volume['id'])
snap.create()
self.addCleanup(snap.destroy)
self._migrate_volume_exec(ctx, volume, host, expected_status)
def test_migrate_volume_bad_force_host_copy(self):
expected_status = 400
host = 'test2'
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_prep()
self._migrate_volume_exec(ctx, volume, host, expected_status,
force_host_copy='foo')
def _migrate_volume_comp_exec(self, ctx, volume, new_volume, error,
expected_status, expected_id, no_body=False):
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
body = {'new_volume': new_volume['id'], 'error': error}
if no_body:
body = {'': body}
else:
body = {'os-migrate_volume_completion': body}
req.body = jsonutils.dump_as_bytes(body)
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
resp_dict = resp.json
# verify status
self.assertEqual(expected_status, resp.status_int)
if expected_id:
self.assertEqual(expected_id, resp_dict['save_volume_id'])
else:
self.assertNotIn('save_volume_id', resp_dict)
def test_migrate_volume_comp_as_non_admin(self):
admin_ctx = context.get_admin_context()
volume = db.volume_create(admin_ctx, {'id': 'fake1'})
new_volume = db.volume_create(admin_ctx, {'id': 'fake2'})
expected_status = 403
expected_id = None
ctx = context.RequestContext('fake', 'fake')
self._migrate_volume_comp_exec(ctx, volume, new_volume, False,
expected_status, expected_id)
def test_migrate_volume_comp_no_mig_status(self):
admin_ctx = context.get_admin_context()
volume1 = self._create_volume(admin_ctx, {'migration_status': 'foo'})
volume2 = self._create_volume(admin_ctx, {'migration_status': None})
expected_status = 400
expected_id = None
ctx = context.RequestContext('admin', 'fake', True)
self._migrate_volume_comp_exec(ctx, volume1, volume2, False,
expected_status, expected_id)
self._migrate_volume_comp_exec(ctx, volume2, volume1, False,
expected_status, expected_id)
def test_migrate_volume_comp_bad_mig_status(self):
admin_ctx = context.get_admin_context()
volume1 = self._create_volume(admin_ctx,
{'migration_status': 'migrating'})
volume2 = self._create_volume(admin_ctx,
{'migration_status': 'target:foo'})
expected_status = 400
expected_id = None
ctx = context.RequestContext('admin', 'fake', True)
self._migrate_volume_comp_exec(ctx, volume1, volume2, False,
expected_status, expected_id)
def test_migrate_volume_comp_no_action(self):
admin_ctx = context.get_admin_context()
volume = db.volume_create(admin_ctx, {'id': 'fake1'})
new_volume = db.volume_create(admin_ctx, {'id': 'fake2'})
expected_status = 400
expected_id = None
ctx = context.RequestContext('fake', 'fake')
self._migrate_volume_comp_exec(ctx, volume, new_volume, False,
expected_status, expected_id, True)
def test_migrate_volume_comp_from_nova(self):
admin_ctx = context.get_admin_context()
volume = self._create_volume(admin_ctx, {'status': 'in-use',
'migration_status': None,
'attach_status': 'attached'})
new_volume = self._create_volume(admin_ctx,
{'migration_status': None,
'attach_status': 'detached'})
expected_status = 200
expected_id = new_volume.id
ctx = context.RequestContext('admin', 'fake', True)
self._migrate_volume_comp_exec(ctx, volume, new_volume, False,
expected_status, expected_id)
def test_backup_reset_valid_updates(self):
vac = admin_actions.BackupAdminController()
vac.validate_update({'status': 'available'})
vac.validate_update({'status': 'error'})
self.assertRaises(exc.HTTPBadRequest,
vac.validate_update,
{'status': 'restoring'})
self.assertRaises(exc.HTTPBadRequest,
vac.validate_update,
{'status': 'creating'})
@mock.patch('cinder.backup.api.API._check_support_to_force_delete')
def _force_delete_backup_util(self, test_status, mock_check_support):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
mock_check_support.return_value = True
# current status is dependent on argument: test_status.
id = test_backups.BackupsAPITestCase._create_backup(status=test_status)
req = webob.Request.blank('/v2/fake/backups/%s/action' % id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes({'os-force_delete': {}})
req.environ['cinder.context'] = ctx
res = req.get_response(app())
self.assertEqual(202, res.status_int)
self.assertEqual('deleting',
test_backups.BackupsAPITestCase.
_get_backup_attrib(id, 'status'))
db.backup_destroy(context.get_admin_context(), id)
def test_delete_backup_force_when_creating(self):
self._force_delete_backup_util('creating')
def test_delete_backup_force_when_deleting(self):
self._force_delete_backup_util('deleting')
def test_delete_backup_force_when_restoring(self):
self._force_delete_backup_util('restoring')
def test_delete_backup_force_when_available(self):
self._force_delete_backup_util('available')
def test_delete_backup_force_when_error(self):
self._force_delete_backup_util('error')
def test_delete_backup_force_when_error_deleting(self):
self._force_delete_backup_util('error_deleting')
@mock.patch('cinder.backup.rpcapi.BackupAPI.check_support_to_force_delete',
return_value=False)
def test_delete_backup_force_when_not_supported(self, mock_check_support):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
self.override_config('backup_driver', 'cinder.backup.drivers.ceph')
id = test_backups.BackupsAPITestCase._create_backup()
req = webob.Request.blank('/v2/fake/backups/%s/action' % id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes({'os-force_delete': {}})
req.environ['cinder.context'] = ctx
res = req.get_response(app())
self.assertEqual(405, res.status_int)
|
|
import argparse
import datetime
import time
from argh import arg
from six import iteritems
import pnc_cli.cli_types as types
import pnc_cli.utils as utils
from pnc_cli.swagger_client import ProductMilestoneRest
from pnc_cli.swagger_client import ProductmilestonesApi
from pnc_cli.swagger_client import ProductversionsApi
import pnc_cli.user_config as uc
productversions_api = ProductversionsApi(uc.user.get_api_client())
milestones_api = ProductmilestonesApi(uc.user.get_api_client())
def create_milestone_object(**kwargs):
created_milestone = ProductMilestoneRest()
for key, value in iteritems(kwargs):
setattr(created_milestone, key, value)
return created_milestone
def check_date_order(start_date, end_date):
if not start_date <= end_date:
raise argparse.ArgumentTypeError("Error: start date must be before end date")
def get_product_version_from_milestone(milestone_id):
return get_milestone(milestone_id).product_version_id
def unique_version_value(parent_product_version_id, version):
parent_product_version = utils.checked_api_call(productversions_api, 'get_specific',
id=parent_product_version_id).content
for milestone in parent_product_version.product_milestones:
if milestone.version == version:
raise argparse.ArgumentTypeError("Error: version already being used for another milestone")
@arg("-p", "--page-size", help="Limit the amount of ProductReleases returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_milestones(page_size=200, page_index=0, q="", sort=""):
"""
List all ProductMilestones
"""
response = utils.checked_api_call(milestones_api, 'get_all', page_size=page_size, page_index=page_index, q=q, sort=sort)
if response:
return utils.format_json_list(response.content)
@arg("product_version_id", help="ID of the ProductVersion to create a ProductMilestone from.",
type=types.existing_product_version)
@arg("version", help="Version of the ProductMilestone. Will be appended to the version from product_version_id.",
type=types.valid_version_create)
@arg("starting_date", help="Planned starting date for the ProductMilestone.", type=types.valid_date)
@arg("planned_end_date", help="Planned date for the end of this ProductMilestone.", type=types.valid_date)
@arg("issue_tracker_url", help="Issue tracker URL for this ProductMilestone.", type=types.valid_url)
def create_milestone(**kwargs):
"""
Create a new ProductMilestone
"""
check_date_order(kwargs.get('starting_date'), kwargs.get('planned_end_date'))
base_version = str(productversions_api.get_specific(
id=kwargs.get('product_version_id')).content.version)
kwargs['version'] = base_version + "." + kwargs.get('version')
unique_version_value(kwargs.get('product_version_id'), kwargs['version'])
created_milestone = create_milestone_object(**kwargs)
response = utils.checked_api_call(
milestones_api,
'create_new',
body=created_milestone)
if response:
return utils.format_json(response.content)
@arg("id", help="ProductVersion ID to retrieve milestones for.", type=types.existing_product_version)
def list_milestones_for_version(id):
"""
List ProductMilestones for a specific ProductVersion
"""
response = utils.checked_api_call(
milestones_api,
'get_all_by_product_version_id',
version_id=id)
if response:
return utils.format_json_list(response.content)
@arg("id", help="ProductMilestone ID to retrieve.", type=types.existing_product_milestone)
def get_milestone(id):
response = utils.checked_api_call(milestones_api, 'get_specific', id=id)
return utils.format_json(response.content)
@arg("id", help="ProductMilestone ID to update.", type=types.existing_product_milestone)
@arg("-v", "--version", help="New version for the ProductMilestone.", type=types.valid_version_update)
@arg("-sd", "--starting-date", help="New start date for the ProductMilestone.", type=types.valid_date)
@arg("-ped", "--planned-end-date", help="New release date for the ProductMilestone.", type=types.valid_date)
def update_milestone(id, **kwargs):
"""
Update a ProductMilestone
"""
existing_milestone = utils.checked_api_call(milestones_api, 'get_specific', id=id).content
existing_start_date = existing_milestone.starting_date
existing_end_date = existing_milestone.planned_end_date
updated_start_date = kwargs.get('starting_date')
updated_ending_date = kwargs.get('planned_end_date')
if updated_start_date and updated_ending_date:
check_date_order(updated_start_date, updated_ending_date)
elif updated_start_date:
check_date_order(updated_start_date, existing_end_date)
elif updated_ending_date:
check_date_order(existing_start_date, updated_ending_date)
if kwargs.get('version'):
unique_version_value(get_product_version_from_milestone(id), kwargs.get('version'))
for key, value in iteritems(kwargs):
setattr(existing_milestone, key, value)
response = utils.checked_api_call(
milestones_api, 'update', id=id, body=existing_milestone)
if response:
return utils.format_json(response.content)
@arg("id", help="ProductMilestone ID to update.", type=types.existing_product_milestone)
@arg("-w", "--wait", help="Wait for release process to finish", action='store_true')
def close_milestone(id, **kwargs):
"""
Close a milestone. This triggers its release process.
The user can optionally specify the release-date, otherwise today's date is
used.
If the wait parameter is specified and set to True, upon closing the milestone,
we'll periodically check that the release being processed is done.
Required:
- id: int
Optional:
- wait key: bool
"""
existing_milestone = utils.checked_api_call(milestones_api, 'get_specific', id=id).content
response = utils.checked_api_call(
milestones_api, 'close_milestone', id=id, body=existing_milestone)
latest_release = utils.checked_api_call(milestones_api, 'get_latest_release', id=id).content
if kwargs.get('wait') == True:
while latest_release.status == 'IN_PROGRESS':
print("Latest release for milestone is in progress, waiting till it finishes...")
time.sleep(60)
latest_release = utils.checked_api_call(milestones_api, 'get_latest_release', id=id).content
print("Status of release for milestone: " + latest_release.status)
if response:
return utils.format_json(response.content)
@arg("id", help="ID of the ProductMilestone to list distributed artifacts for.", type=types.existing_product_milestone)
@arg("-p", "--page-size", help="Limit the amount of distributed artifacts returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_distributed_artifacts(id, page_size=200, page_index=0, sort="", q=""):
response = utils.checked_api_call(milestones_api, 'get_distributed_artifacts', id=id, page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return utils.format_json_list(response.content)
@arg('id', help="ID of the ProductMilestone to add a distributed artifact to.", type=types.existing_product_milestone)
#TODO: come up with a way to check that a given artifact ID exists. Currently the REST API doesn't have a method available like
# get_specific for the artifacts
@arg('artifact_id', help='ID of the Artifact to add.', type=types.existing_built_artifact)
def add_distributed_artifact():
pass
@arg('id', help="ID of the ProductMilestone to remove the distributed artifact from.", type=types.existing_product_milestone)
@arg('artifact_id', help='ID of the distributed artifact to remove.', type=types.existing_built_artifact)
def remove_distributed_artifact():
pass
@arg("id", help="ID of the ProductMilestone to list distributed builds for.", type=types.existing_product_milestone)
@arg("-p", "--page-size", help="Limit the amount of distributed builds returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_distributed_builds(id, page_size=200, page_index=0, sort='', q=''):
response = utils.checked_api_call(milestones_api, 'get_distributed_builds', id=id, page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return utils.format_json_list(response.content)
|
|
""" test script to verify the CG method works, and time it versus cholesky """
import argparse
import json
import logging
from collections import defaultdict
import matplotlib.pyplot as plt
import scipy.io
import seaborn
from implicit.als import AlternatingLeastSquares
from implicit.gpu import HAS_CUDA
from implicit.nearest_neighbours import bm25_weight
def benchmark_accuracy(plays):
output = defaultdict(list)
def store_loss(name):
def inner(iteration, elapsed, loss):
print(f"model {name} iteration {iteration} loss {loss:.5f}")
output[name].append(loss)
return inner
for steps in [2, 3, 4]:
model = AlternatingLeastSquares(
factors=128,
use_gpu=False,
regularization=0,
iterations=25,
calculate_training_loss=True,
)
model.cg_steps = steps
model.fit_callback = store_loss(f"cg{steps}")
model.fit(plays)
if HAS_CUDA:
model = AlternatingLeastSquares(
factors=128,
use_native=True,
use_gpu=True,
regularization=0,
iterations=25,
calculate_training_loss=True,
)
model.fit_callback = store_loss("gpu")
model.use_gpu = True
model.fit(plays)
model = AlternatingLeastSquares(
factors=128,
use_native=True,
use_cg=False,
use_gpu=False,
regularization=0,
iterations=25,
calculate_training_loss=True,
)
model.fit_callback = store_loss("cholesky")
model.fit(plays)
return output
def benchmark_times(plays, iterations=3):
times = defaultdict(lambda: defaultdict(list))
def store_time(model, name):
def inner(iteration, elapsed, loss):
print(name, model.factors, iteration, elapsed)
times[name][model.factors].append(elapsed)
return inner
output = defaultdict(list)
for factors in range(32, 257, 32):
for steps in [2, 3, 4]:
model = AlternatingLeastSquares(
factors=factors,
use_native=True,
use_cg=True,
use_gpu=False,
regularization=0,
iterations=iterations,
)
model.fit_callback = store_time(model, f"cg{steps}")
model.cg_steps = steps
model.fit(plays)
model = AlternatingLeastSquares(
factors=factors,
use_native=True,
use_cg=False,
regularization=0,
iterations=iterations,
use_gpu=False,
)
model.fit_callback = store_time(model, "cholesky")
model.fit(plays)
if HAS_CUDA:
model = AlternatingLeastSquares(
factors=factors,
use_native=True,
use_gpu=True,
regularization=0,
iterations=iterations,
)
model.fit_callback = store_time(model, "gpu")
model.fit(plays)
# take the min time for the output
output["factors"].append(factors)
for name, stats in times.items():
output[name].append(min(stats[factors]))
return output
LABELS = {
"cg2": "CG (2 Steps/Iteration)",
"cg3": "CG (3 Steps/Iteration)",
"cg4": "CG (4 Steps/Iteration)",
"gpu": "GPU",
"cholesky": "Cholesky",
}
COLOURS = {
"cg2": "#2ca02c",
"cg3": "#ff7f0e",
"cg4": "#c5b0d5",
"gpu": "#1f77b4",
"cholesky": "#d62728",
}
def generate_speed_graph(
data,
filename="als_speed.png",
labels=None,
colours=None,
):
labels = labels or {}
colours = colours or {}
seaborn.set()
_, ax = plt.subplots()
factors = data["factors"]
for key in data.keys():
ax.plot(
factors, data[key], color=colours.get(key, COLOURS.get(key)), marker="o", markersize=6
)
ax.text(factors[-1] + 5, data[key][-1], labels.get(key, LABELS[key]), fontsize=10)
ax.set_ylabel("Seconds per Iteration")
ax.set_xlabel("Factors")
plt.savefig(filename, bbox_inches="tight", dpi=300)
def generate_loss_graph(data, filename="als_speed.png"):
seaborn.set()
_, ax = plt.subplots()
iterations = range(1, len(data["cholesky"]) + 1)
for key in data.keys():
ax.plot(iterations, data[key], color=COLOURS[key], marker="o", markersize=6)
ax.text(iterations[-1] + 1, data[key][-1], LABELS[key], fontsize=10)
ax.set_ylabel("Mean Squared Error")
ax.set_xlabel("Iteration")
plt.savefig(filename, bbox_inches="tight", dpi=300)
def main():
parser = argparse.ArgumentParser(
description="Benchmark CG version against Cholesky",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--input",
type=str,
required=True,
dest="inputfile",
help="dataset file in matrix market format",
)
parser.add_argument("--graph", help="generates graphs", action="store_true")
parser.add_argument("--loss", help="test training loss", action="store_true")
parser.add_argument("--speed", help="test training speed", action="store_true")
args = parser.parse_args()
if not (args.speed or args.loss):
print("must specify at least one of --speed or --loss")
parser.print_help()
else:
plays = bm25_weight(scipy.io.mmread(args.inputfile)).tocsr()
logging.basicConfig(level=logging.DEBUG)
if args.loss:
acc = benchmark_accuracy(plays)
with open("als_accuracy.json", "w", encoding="utf8") as o:
json.dump(acc, o)
if args.graph:
generate_loss_graph(acc, "als_accuracy.png")
if args.speed:
speed = benchmark_times(plays)
with open("als_speed.json", "w", encoding="utf8") as o:
json.dump(speed, o)
if args.graph:
generate_speed_graph(speed, "als_speed.png")
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python2
# Python
from BaseHTTPServer import HTTPServer
import abc
import argparse
import codecs
import logging
import os
import select
import socket
import sys
# 3rd party
from inotify.watcher import Watcher
from prometheus_client import Counter
from prometheus_client import MetricsHandler
import inotify
# TODO: Implement offline mode
# TODO: Support other inotify modules?
# TODO: Support Python3 (using inotifyx?)
logger = logging.getLogger(__name__)
POLL_TIMEOUT = 10000
FILE_EVENTS_TO_WATCH = inotify.IN_MODIFY
DIR_EVENTS_TO_WATCH = inotify.IN_MOVED_TO | inotify.IN_MOVED_FROM | inotify.IN_DELETE | inotify.IN_CREATE
class MetaAbstractLineHandler(abc.ABCMeta):
'''Keep track of all subclassed versions'''
children = []
def __init__(cls, name, bases, dct):
cls.children.append(cls)
super(MetaAbstractLineHandler, cls).__init__(name, bases, dct)
# pylint: disable=R0921
class AbstractLineHandler(object):
'''Base class for building your own LineHandler
After subclassing implement your own process method'''
__metaclass__ = MetaAbstractLineHandler
testcases = None # None = throw warning; False = no testcases; otherwise iterable with testcases
testcase_args = None # Used to instantiate this class for a testcase
testcase_kwargs = None # Used to instantiate this class for a testcase
@abc.abstractmethod
def process(self, line):
pass
@property
def logger(self):
try:
return self._logger
except AttributeError:
self._logger = logging.getLogger(__name__ + '.' + self.__class__.__name__)
return self._logger
class FileStats(object):
'''Track handlers for a spefic file'''
def __init__(self, handlers):
self.watchdescriptor = None
self._filehandle = None
self.position_in_file = None
self.unprocessed = ''
self.handlers = handlers
def __repr__(self):
return '{}(handle={}, position={}, handlers={})'.format(self.__class__.__name__, self._filehandle, self.position_in_file, self.handlers)
@property
def filehandle(self):
return self._filehandle
@filehandle.setter
def filehandle(self, handle):
self._filehandle = handle
if handle is None:
self.position_in_file = -1
else:
self.position_in_file = handle.tell()
def __del__(self):
self.disable()
def disable(self):
try:
self._filehandle.close()
except (IOError, AttributeError):
pass
self.watchdescriptor = None
self._filehandle = None
self.unprocessed = ''
class CloudedEvent(inotify.watcher.Event):
'''Wrapper class to protect against segfaults.
python-inotify can segfault when requesting the __repr__ of certain events.
To prevent this we will override this method with a less useful but also
non-crashing version.
https://bitbucket.org/JanKanis/python-inotify/issue/5/possible-segfault-in-_inotifyc-read_events
https://bitbucket.org/JanKanis/python-inotify/issue/8/segfault-watching-directory
https://bitbucket.org/JanKanis/python-inotify/issue/10/str-or-repr-of-event-results-in-core-dump
'''
def __repr__(self):
return 'CloudedEvent(wd={0.wd}, fullpath={0.fullpath}, mask={0.mask}, cookie={0.cookie})'.format(self)
class DirStats(object):
def __init__(self, filenames):
self.filenames = filenames
def __repr__(self):
return '{}(filenames={})'.format(self.__class__.__name__, self.filenames)
def ignore_untracked(func):
def wrapped(self, event, *args, **kwargs):
if event.fullpath not in self.filestats:
logger.debug('Ignoring unknown file %s.', event.fullpath)
return
return func(self, event, *args, **kwargs)
return wrapped
class MyWatcher(Watcher):
'''An inotify watcher meant for tracking log files
This watcher has the following characteristics:
- A file that has multiple handlers will only be read once per change
- When a file is replaced the watcher will switch to the new file
- A file can be created after the watcher got started and it will still be processed
When files have new content the appropriate handlers will be called to process it.
'''
# Copied from inotify/watcher.py
_event_props = {
'access': 'File was accessed',
'modify': 'File was modified',
'attrib': 'Attribute of a directory entry was changed',
'close_write': 'File was closed after being written to',
'close_nowrite': 'File was closed without being written to',
'open': 'File was opened',
'moved_from': 'Directory entry was renamed from this name',
'moved_to': 'Directory entry was renamed to this name',
'create': 'Directory entry was created',
'delete': 'Directory entry was deleted',
'delete_self': 'The watched directory entry was deleted',
'move_self': 'The watched directory entry was renamed',
'unmount': 'Directory was unmounted, and can no longer be watched',
'q_overflow': 'Kernel dropped events due to queue overflow',
'ignored': 'Directory entry is no longer being watched',
'isdir': 'Event occurred on a directory',
}
def __init__(self, *args, **kwargs):
super(MyWatcher, self).__init__(*args, **kwargs)
self.filestats = {}
self.dirstats = {}
def add_handler(self, path, handler):
try:
self.filestats[path].handlers.append(handler)
except KeyError:
self.filestats[path] = FileStats([handler])
self.add(path)
def add(self, path, from_beginning_of_file=False):
# Registering a handler on the file itself
filestats = self.filestats[path]
if filestats.watchdescriptor is None:
try:
filestats.watchdescriptor = super(MyWatcher, self).add(path, FILE_EVENTS_TO_WATCH)
except OSError as ex:
logger.info('Non-fatal problem: failed to open %s: %s', path, ex)
self.reset_filehandle(path, from_beginning_of_file)
# Registering a handler on the folder that contains the file, to detect file renames
dirname = os.path.dirname(path)
try:
self.dirstats[dirname].filenames.append(path)
except KeyError:
super(MyWatcher, self).add(dirname, DIR_EVENTS_TO_WATCH)
self.dirstats[dirname] = DirStats([path])
def reset_filehandle(self, path, from_beginning_of_file=False):
stats = self.filestats[path]
# Cleanup
if stats.filehandle:
try:
stats.filehandle.close()
except IOError as ex:
logger.info('Failed to close filehandle %s: %s', path, ex)
# Setup
try:
# Opening an unbuffered stream, requires since we use select
handle = codecs.open(path, 'r', 'UTF-8', 'replace', 0)
if from_beginning_of_file:
handle.seek(0)
else:
handle.seek(0, 2) # 0 bytes from the end of the file
except IOError:
# This can happen when the file doesn't exist yet.
handle = None
stats.filehandle = handle
stats.unprocessed = ''
def read(self, bufsize=None):
return [CloudedEvent(event.raw, event.path) for event in super(MyWatcher, self).read(bufsize)]
def process_events(self, bufsize=None):
events = self.read(bufsize)
for event in events:
for event_type in self._event_props:
if getattr(event, event_type):
try:
handler = getattr(self, 'process_' + event_type)
except AttributeError:
logger.debug('No handler for %s', event_type)
else:
logger.debug('Calling handler for %s', event_type)
handler(event)
@ignore_untracked
def process_moved_from(self, event):
logger.debug('DELETE/MOVED_FROM Event: %s', event.fullpath)
logger.debug('Removing inotify from %s', event.fullpath)
try:
self.remove_path(event.fullpath) # Stop monitoring with inotify
except inotify.watcher.InotifyWatcherException:
# Apparently we weren't even watching that file
self.process_ignored(event)
process_delete = process_moved_from
@ignore_untracked
def process_moved_to(self, event):
logger.debug('MOVED_TO Event: %s', event.fullpath)
logger.debug('Adding inotify to %s', event.fullpath)
self.add(event.fullpath) # (re)start monitoring with inotify
@ignore_untracked
def process_create(self, event):
logger.debug('CREATE Event: %s', event.fullpath)
logger.debug('Adding inotify to %s', event.fullpath)
self.add(event.fullpath, from_beginning_of_file=True) # (re)start monitoring with inotify
self.process_modify(event)
def process_modify(self, event):
filestats = self.filestats[event.fullpath]
if filestats.filehandle is None:
logger.debug('Ignoring read for non-existent file %s', event.fullpath)
return
# first, check if the file was truncated:
curr_size = os.fstat(filestats.filehandle.fileno()).st_size
if curr_size < filestats.position_in_file:
logger.info('File %s was truncated, seeking to beginning of file', event.fullpath)
filestats.filehandle.seek(0)
filestats.position_in_file = 0
filestats.unprocessed = ''
try:
partial = filestats.filehandle.read()
try:
last_newline = partial.rindex('\n')
except ValueError:
if partial:
logger.debug('No newline found: %s', repr(partial))
lines = []
filestats.unprocessed += partial
else:
lines = (filestats.unprocessed + partial[:last_newline]).splitlines()
filestats.unprocessed = partial[(last_newline + 1):] # +1 because we don't care about the newline
filestats.position_in_file = filestats.filehandle.tell()
except IOError:
logger.warning('Error reading lines from file %s', event.fullpath)
return
for line in lines:
# logger.debug('%s: %s', event.fullpath, line)
for handler in filestats.handlers:
try:
handler.process(line)
except Exception:
# Catching all possible exceptions: Continued service is
# more important than the processing of a particular line
handler.logger.exception('Failed to process line %s', repr(line))
def process_ignored(self, event):
logger.debug('inotify reported it is no longer monitoring %s', event.fullpath)
try:
filestats = self.filestats[event.fullpath]
except KeyError:
logger.debug('inotify reported it is no longer monitoring unknown %s', event.fullpath)
finally:
filestats.disable()
# If the path was in self.filestats we're interested in it...
if os.path.exists(event.fullpath):
logger.debug('inotify reported its no longer monitoring %s, readding it.', event.fullpath)
self.add(event.fullpath)
else:
logger.debug('inotify reported its no longer monitoring %s.', event.fullpath)
class MoreSilentMetricsHandler(MetricsHandler):
'''A more silent version of the vanilla MetricsHandler'''
def log_request(self, code='-', *args, **kwargs):
if code == 200:
return
# Old-style class, so no super()
MetricsHandler.log_request(self, code, *args, **kwargs)
class MoreRobustHTTPServer(HTTPServer):
'''A more robust version of the vanilla HTTPServer
Unlike the vanilla vesion this won't stop functioning once a broken pipe is
encoutered.'''
def _handle_request_noblock(self):
try:
# No super as HTTPServer is an old-style class
HTTPServer._handle_request_noblock(self)
except socket.error:
logger.info('Socket error.')
def start_http_server(portnr):
server_address = ('', portnr)
httpd = MoreRobustHTTPServer(server_address, MoreSilentMetricsHandler)
return httpd
def run_offline(setting, logfiles):
raise NotImplementedError()
def run_online(settings, logfiles):
READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR
# READ_WRITE = READ_ONLY | select.POLLOUT
poller = select.poll()
http_server = start_http_server(settings.port)
logger.info('Now listening for HTTP requests on port %s', settings.port)
poller.register(http_server, READ_ONLY)
filesystem_server = MyWatcher()
poller.register(filesystem_server, READ_ONLY)
for (filename, handler) in logfiles:
filesystem_server.add_handler(filename, handler)
pollcount = Counter('pollcount', 'The number of poll events processed by logfile_exporter.') # noqa
loopcount = 0
while settings.max_polls <= 0 or loopcount < settings.max_polls:
events = poller.poll(POLL_TIMEOUT)
pollcount.inc()
loopcount += 1
for fd, _event in events:
if fd == http_server.fileno():
http_server._handle_request_noblock()
elif fd == filesystem_server.fileno():
filesystem_server.process_events()
else:
logger.warning('Event from an unknown file descriptor')
logger.info('Terminating program.')
def run_testcases(handlers):
import unittest
from tests import load_tests_from_handler
# Removing duplicate handlers
unique_handlers = set([type(handler) for handler in handlers])
logger.info('Running testcases')
failures = 0
errors = 0
for handler_type in unique_handlers:
tests = load_tests_from_handler(unittest.defaultTestLoader, handler_type)
if tests:
result = tests(unittest.result.TestResult())
failure_count = len(result.failures)
error_count = len(result.errors)
failures += failure_count
errors += error_count
logger_func = logger.info
if failure_count or error_count:
logger_func = logger.warning
logger_func(
'%s executed %s testcases: %s failures, %s errors.',
handler_type,
result.testsRun,
failure_count,
error_count,
)
else:
if handler_type.testcases is False:
logger.info('%s has no testcases.', handler_type)
else:
logger.warning('%s has no testcases.', handler_type)
return (failures, errors)
def run(myfiles, configure_basic_logger=True):
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='count', default=0)
parser.add_argument('-q', '--quiet', action='count', default=0)
parser.add_argument('-p', '--port', default=9123, type=int, help='Port to listen on')
parser.add_argument('-o', '--offline', action='store_true', help='Feed the existing log files to the handlers and then quit.')
parser.add_argument('-t', '--testcases', choices=['skip', 'strict', 'run', 'run-then-quit'], default='run')
parser.add_argument('--max-polls', default=-1, type=int, help=argparse.SUPPRESS)
args = parser.parse_args()
if configure_basic_logger:
desired_loglevel = max(1, logging.INFO - (args.verbose * 10) + (args.quiet * 10))
logging.basicConfig(
level=desired_loglevel,
datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)s %(levelname)-10s [%(name)s] %(message)s',
)
if args.testcases in ['strict', 'run', 'run-then-quit']:
(failures, errors) = run_testcases([handler for (_filename, handler) in myfiles])
if args.testcases == 'run-then-quit':
exit_code = 0 if max(failures, errors) == 0 else 9
sys.exit(exit_code)
if args.testcases == 'strict' and max(failures, errors) > 0:
logger.error('Aborting program; not all testcases passed.')
sys.exit(9)
if args.offline:
run_offline(args, myfiles)
else:
try:
run_online(args, myfiles)
except KeyboardInterrupt:
pass
|
|
from __future__ import unicode_literals
import copy
import datetime
from boto3 import Session
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel, CloudFormationModel
from moto.core.utils import iso_8601_datetime_with_milliseconds
from moto.utilities.utils import random_string
from moto.ec2 import ec2_backends
from .exceptions import (
ClusterAlreadyExistsFaultError,
ClusterNotFoundError,
ClusterParameterGroupNotFoundError,
ClusterSecurityGroupNotFoundError,
ClusterSnapshotAlreadyExistsError,
ClusterSnapshotNotFoundError,
ClusterSubnetGroupNotFoundError,
InvalidParameterCombinationError,
InvalidParameterValueError,
InvalidSubnetError,
ResourceNotFoundFaultError,
SnapshotCopyAlreadyDisabledFaultError,
SnapshotCopyAlreadyEnabledFaultError,
SnapshotCopyDisabledFaultError,
SnapshotCopyGrantAlreadyExistsFaultError,
SnapshotCopyGrantNotFoundFaultError,
UnknownSnapshotCopyRegionFaultError,
ClusterSecurityGroupNotFoundFaultError,
)
from moto.core import ACCOUNT_ID
class TaggableResourceMixin(object):
resource_type = None
def __init__(self, region_name, tags):
self.region = region_name
self.tags = tags or []
@property
def resource_id(self):
return None
@property
def arn(self):
return "arn:aws:redshift:{region}:{account_id}:{resource_type}:{resource_id}".format(
region=self.region,
account_id=ACCOUNT_ID,
resource_type=self.resource_type,
resource_id=self.resource_id,
)
def create_tags(self, tags):
new_keys = [tag_set["Key"] for tag_set in tags]
self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
self.tags.extend(tags)
return self.tags
def delete_tags(self, tag_keys):
self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
return self.tags
class Cluster(TaggableResourceMixin, CloudFormationModel):
resource_type = "cluster"
def __init__(
self,
redshift_backend,
cluster_identifier,
node_type,
master_username,
master_user_password,
db_name,
cluster_type,
cluster_security_groups,
vpc_security_group_ids,
cluster_subnet_group_name,
availability_zone,
preferred_maintenance_window,
cluster_parameter_group_name,
automated_snapshot_retention_period,
port,
cluster_version,
allow_version_upgrade,
number_of_nodes,
publicly_accessible,
encrypted,
region_name,
tags=None,
iam_roles_arn=None,
enhanced_vpc_routing=None,
restored_from_snapshot=False,
kms_key_id=None,
):
super(Cluster, self).__init__(region_name, tags)
self.redshift_backend = redshift_backend
self.cluster_identifier = cluster_identifier
self.create_time = iso_8601_datetime_with_milliseconds(
datetime.datetime.utcnow()
)
self.status = "available"
self.node_type = node_type
self.master_username = master_username
self.master_user_password = master_user_password
self.db_name = db_name if db_name else "dev"
self.vpc_security_group_ids = vpc_security_group_ids
self.enhanced_vpc_routing = (
enhanced_vpc_routing if enhanced_vpc_routing is not None else False
)
self.cluster_subnet_group_name = cluster_subnet_group_name
self.publicly_accessible = publicly_accessible
self.encrypted = encrypted
self.allow_version_upgrade = (
allow_version_upgrade if allow_version_upgrade is not None else True
)
self.cluster_version = cluster_version if cluster_version else "1.0"
self.port = int(port) if port else 5439
self.automated_snapshot_retention_period = (
int(automated_snapshot_retention_period)
if automated_snapshot_retention_period
else 1
)
self.preferred_maintenance_window = (
preferred_maintenance_window
if preferred_maintenance_window
else "Mon:03:00-Mon:03:30"
)
if cluster_parameter_group_name:
self.cluster_parameter_group_name = [cluster_parameter_group_name]
else:
self.cluster_parameter_group_name = ["default.redshift-1.0"]
if cluster_security_groups:
self.cluster_security_groups = cluster_security_groups
else:
self.cluster_security_groups = ["Default"]
if availability_zone:
self.availability_zone = availability_zone
else:
# This could probably be smarter, but there doesn't appear to be a
# way to pull AZs for a region in boto
self.availability_zone = region_name + "a"
if cluster_type == "single-node":
self.number_of_nodes = 1
elif number_of_nodes:
self.number_of_nodes = int(number_of_nodes)
else:
self.number_of_nodes = 1
self.iam_roles_arn = iam_roles_arn or []
self.restored_from_snapshot = restored_from_snapshot
self.kms_key_id = kms_key_id
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html
return "AWS::Redshift::Cluster"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
redshift_backend = redshift_backends[region_name]
properties = cloudformation_json["Properties"]
if "ClusterSubnetGroupName" in properties:
subnet_group_name = properties[
"ClusterSubnetGroupName"
].cluster_subnet_group_name
else:
subnet_group_name = None
cluster = redshift_backend.create_cluster(
cluster_identifier=resource_name,
node_type=properties.get("NodeType"),
master_username=properties.get("MasterUsername"),
master_user_password=properties.get("MasterUserPassword"),
db_name=properties.get("DBName"),
cluster_type=properties.get("ClusterType"),
cluster_security_groups=properties.get("ClusterSecurityGroups", []),
vpc_security_group_ids=properties.get("VpcSecurityGroupIds", []),
cluster_subnet_group_name=subnet_group_name,
availability_zone=properties.get("AvailabilityZone"),
preferred_maintenance_window=properties.get("PreferredMaintenanceWindow"),
cluster_parameter_group_name=properties.get("ClusterParameterGroupName"),
automated_snapshot_retention_period=properties.get(
"AutomatedSnapshotRetentionPeriod"
),
port=properties.get("Port"),
cluster_version=properties.get("ClusterVersion"),
allow_version_upgrade=properties.get("AllowVersionUpgrade"),
enhanced_vpc_routing=properties.get("EnhancedVpcRouting"),
number_of_nodes=properties.get("NumberOfNodes"),
publicly_accessible=properties.get("PubliclyAccessible"),
encrypted=properties.get("Encrypted"),
region_name=region_name,
kms_key_id=properties.get("KmsKeyId"),
)
return cluster
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Endpoint.Address":
return self.endpoint
elif attribute_name == "Endpoint.Port":
return self.port
raise UnformattedGetAttTemplateException()
@property
def endpoint(self):
return "{0}.cg034hpkmmjt.{1}.redshift.amazonaws.com".format(
self.cluster_identifier, self.region
)
@property
def security_groups(self):
return [
security_group
for security_group in self.redshift_backend.describe_cluster_security_groups()
if security_group.cluster_security_group_name
in self.cluster_security_groups
]
@property
def vpc_security_groups(self):
return [
security_group
for security_group in self.redshift_backend.ec2_backend.describe_security_groups()
if security_group.id in self.vpc_security_group_ids
]
@property
def parameter_groups(self):
return [
parameter_group
for parameter_group in self.redshift_backend.describe_cluster_parameter_groups()
if parameter_group.cluster_parameter_group_name
in self.cluster_parameter_group_name
]
@property
def resource_id(self):
return self.cluster_identifier
def to_json(self):
json_response = {
"MasterUsername": self.master_username,
"MasterUserPassword": "****",
"ClusterVersion": self.cluster_version,
"VpcSecurityGroups": [
{"Status": "active", "VpcSecurityGroupId": group.id}
for group in self.vpc_security_groups
],
"ClusterSubnetGroupName": self.cluster_subnet_group_name,
"AvailabilityZone": self.availability_zone,
"ClusterStatus": self.status,
"NumberOfNodes": self.number_of_nodes,
"AutomatedSnapshotRetentionPeriod": self.automated_snapshot_retention_period,
"PubliclyAccessible": self.publicly_accessible,
"Encrypted": self.encrypted,
"DBName": self.db_name,
"PreferredMaintenanceWindow": self.preferred_maintenance_window,
"ClusterParameterGroups": [
{
"ParameterApplyStatus": "in-sync",
"ParameterGroupName": group.cluster_parameter_group_name,
}
for group in self.parameter_groups
],
"ClusterSecurityGroups": [
{
"Status": "active",
"ClusterSecurityGroupName": group.cluster_security_group_name,
}
for group in self.security_groups
],
"Port": self.port,
"NodeType": self.node_type,
"ClusterIdentifier": self.cluster_identifier,
"AllowVersionUpgrade": self.allow_version_upgrade,
"Endpoint": {"Address": self.endpoint, "Port": self.port},
"ClusterCreateTime": self.create_time,
"PendingModifiedValues": [],
"Tags": self.tags,
"EnhancedVpcRouting": self.enhanced_vpc_routing,
"IamRoles": [
{"ApplyStatus": "in-sync", "IamRoleArn": iam_role_arn}
for iam_role_arn in self.iam_roles_arn
],
"KmsKeyId": self.kms_key_id,
}
if self.restored_from_snapshot:
json_response["RestoreStatus"] = {
"Status": "completed",
"CurrentRestoreRateInMegaBytesPerSecond": 123.0,
"SnapshotSizeInMegaBytes": 123,
"ProgressInMegaBytes": 123,
"ElapsedTimeInSeconds": 123,
"EstimatedTimeToCompletionInSeconds": 123,
}
try:
json_response[
"ClusterSnapshotCopyStatus"
] = self.cluster_snapshot_copy_status
except AttributeError:
pass
return json_response
class SnapshotCopyGrant(TaggableResourceMixin, BaseModel):
resource_type = "snapshotcopygrant"
def __init__(self, snapshot_copy_grant_name, kms_key_id):
self.snapshot_copy_grant_name = snapshot_copy_grant_name
self.kms_key_id = kms_key_id
def to_json(self):
return {
"SnapshotCopyGrantName": self.snapshot_copy_grant_name,
"KmsKeyId": self.kms_key_id,
}
class SubnetGroup(TaggableResourceMixin, CloudFormationModel):
resource_type = "subnetgroup"
def __init__(
self,
ec2_backend,
cluster_subnet_group_name,
description,
subnet_ids,
region_name,
tags=None,
):
super(SubnetGroup, self).__init__(region_name, tags)
self.ec2_backend = ec2_backend
self.cluster_subnet_group_name = cluster_subnet_group_name
self.description = description
self.subnet_ids = subnet_ids
if not self.subnets:
raise InvalidSubnetError(subnet_ids)
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-clustersubnetgroup.html
return "AWS::Redshift::ClusterSubnetGroup"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
redshift_backend = redshift_backends[region_name]
properties = cloudformation_json["Properties"]
subnet_group = redshift_backend.create_cluster_subnet_group(
cluster_subnet_group_name=resource_name,
description=properties.get("Description"),
subnet_ids=properties.get("SubnetIds", []),
region_name=region_name,
)
return subnet_group
@property
def subnets(self):
return self.ec2_backend.get_all_subnets(filters={"subnet-id": self.subnet_ids})
@property
def vpc_id(self):
return self.subnets[0].vpc_id
@property
def resource_id(self):
return self.cluster_subnet_group_name
def to_json(self):
return {
"VpcId": self.vpc_id,
"Description": self.description,
"ClusterSubnetGroupName": self.cluster_subnet_group_name,
"SubnetGroupStatus": "Complete",
"Subnets": [
{
"SubnetStatus": "Active",
"SubnetIdentifier": subnet.id,
"SubnetAvailabilityZone": {"Name": subnet.availability_zone},
}
for subnet in self.subnets
],
"Tags": self.tags,
}
class SecurityGroup(TaggableResourceMixin, BaseModel):
resource_type = "securitygroup"
def __init__(
self, cluster_security_group_name, description, region_name, tags=None
):
super(SecurityGroup, self).__init__(region_name, tags)
self.cluster_security_group_name = cluster_security_group_name
self.description = description
self.ingress_rules = []
@property
def resource_id(self):
return self.cluster_security_group_name
def to_json(self):
return {
"EC2SecurityGroups": [],
"IPRanges": [],
"Description": self.description,
"ClusterSecurityGroupName": self.cluster_security_group_name,
"Tags": self.tags,
}
class ParameterGroup(TaggableResourceMixin, CloudFormationModel):
resource_type = "parametergroup"
def __init__(
self,
cluster_parameter_group_name,
group_family,
description,
region_name,
tags=None,
):
super(ParameterGroup, self).__init__(region_name, tags)
self.cluster_parameter_group_name = cluster_parameter_group_name
self.group_family = group_family
self.description = description
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-clusterparametergroup.html
return "AWS::Redshift::ClusterParameterGroup"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
redshift_backend = redshift_backends[region_name]
properties = cloudformation_json["Properties"]
parameter_group = redshift_backend.create_cluster_parameter_group(
cluster_parameter_group_name=resource_name,
description=properties.get("Description"),
group_family=properties.get("ParameterGroupFamily"),
region_name=region_name,
)
return parameter_group
@property
def resource_id(self):
return self.cluster_parameter_group_name
def to_json(self):
return {
"ParameterGroupFamily": self.group_family,
"Description": self.description,
"ParameterGroupName": self.cluster_parameter_group_name,
"Tags": self.tags,
}
class Snapshot(TaggableResourceMixin, BaseModel):
resource_type = "snapshot"
def __init__(
self, cluster, snapshot_identifier, region_name, tags=None, iam_roles_arn=None
):
super(Snapshot, self).__init__(region_name, tags)
self.cluster = copy.copy(cluster)
self.snapshot_identifier = snapshot_identifier
self.snapshot_type = "manual"
self.status = "available"
self.create_time = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
self.iam_roles_arn = iam_roles_arn or []
@property
def resource_id(self):
return "{cluster_id}/{snapshot_id}".format(
cluster_id=self.cluster.cluster_identifier,
snapshot_id=self.snapshot_identifier,
)
def to_json(self):
return {
"SnapshotIdentifier": self.snapshot_identifier,
"ClusterIdentifier": self.cluster.cluster_identifier,
"SnapshotCreateTime": self.create_time,
"Status": self.status,
"Port": self.cluster.port,
"AvailabilityZone": self.cluster.availability_zone,
"MasterUsername": self.cluster.master_username,
"ClusterVersion": self.cluster.cluster_version,
"SnapshotType": self.snapshot_type,
"NodeType": self.cluster.node_type,
"NumberOfNodes": self.cluster.number_of_nodes,
"DBName": self.cluster.db_name,
"Tags": self.tags,
"EnhancedVpcRouting": self.cluster.enhanced_vpc_routing,
"IamRoles": [
{"ApplyStatus": "in-sync", "IamRoleArn": iam_role_arn}
for iam_role_arn in self.iam_roles_arn
],
}
class RedshiftBackend(BaseBackend):
def __init__(self, ec2_backend, region_name):
self.region = region_name
self.clusters = {}
self.subnet_groups = {}
self.security_groups = {
"Default": SecurityGroup(
"Default", "Default Redshift Security Group", self.region
)
}
self.parameter_groups = {
"default.redshift-1.0": ParameterGroup(
"default.redshift-1.0",
"redshift-1.0",
"Default Redshift parameter group",
self.region,
)
}
self.ec2_backend = ec2_backend
self.snapshots = OrderedDict()
self.RESOURCE_TYPE_MAP = {
"cluster": self.clusters,
"parametergroup": self.parameter_groups,
"securitygroup": self.security_groups,
"snapshot": self.snapshots,
"subnetgroup": self.subnet_groups,
}
self.snapshot_copy_grants = {}
def reset(self):
ec2_backend = self.ec2_backend
region_name = self.region
self.__dict__ = {}
self.__init__(ec2_backend, region_name)
def enable_snapshot_copy(self, **kwargs):
cluster_identifier = kwargs["cluster_identifier"]
cluster = self.clusters[cluster_identifier]
if not hasattr(cluster, "cluster_snapshot_copy_status"):
if (
cluster.encrypted == "true"
and kwargs["snapshot_copy_grant_name"] is None
):
raise InvalidParameterValueError(
"SnapshotCopyGrantName is required for Snapshot Copy on KMS encrypted clusters."
)
if kwargs["destination_region"] == self.region:
raise UnknownSnapshotCopyRegionFaultError(
"Invalid region {}".format(self.region)
)
status = {
"DestinationRegion": kwargs["destination_region"],
"RetentionPeriod": kwargs["retention_period"],
"SnapshotCopyGrantName": kwargs["snapshot_copy_grant_name"],
}
cluster.cluster_snapshot_copy_status = status
return cluster
else:
raise SnapshotCopyAlreadyEnabledFaultError(cluster_identifier)
def disable_snapshot_copy(self, **kwargs):
cluster_identifier = kwargs["cluster_identifier"]
cluster = self.clusters[cluster_identifier]
if hasattr(cluster, "cluster_snapshot_copy_status"):
del cluster.cluster_snapshot_copy_status
return cluster
else:
raise SnapshotCopyAlreadyDisabledFaultError(cluster_identifier)
def modify_snapshot_copy_retention_period(
self, cluster_identifier, retention_period
):
cluster = self.clusters[cluster_identifier]
if hasattr(cluster, "cluster_snapshot_copy_status"):
cluster.cluster_snapshot_copy_status["RetentionPeriod"] = retention_period
return cluster
else:
raise SnapshotCopyDisabledFaultError(cluster_identifier)
def create_cluster(self, **cluster_kwargs):
cluster_identifier = cluster_kwargs["cluster_identifier"]
if cluster_identifier in self.clusters:
raise ClusterAlreadyExistsFaultError()
cluster = Cluster(self, **cluster_kwargs)
self.clusters[cluster_identifier] = cluster
return cluster
def describe_clusters(self, cluster_identifier=None):
clusters = self.clusters.values()
if cluster_identifier:
if cluster_identifier in self.clusters:
return [self.clusters[cluster_identifier]]
else:
raise ClusterNotFoundError(cluster_identifier)
return clusters
def modify_cluster(self, **cluster_kwargs):
cluster_identifier = cluster_kwargs.pop("cluster_identifier")
new_cluster_identifier = cluster_kwargs.pop("new_cluster_identifier", None)
cluster_type = cluster_kwargs.get("cluster_type")
if cluster_type and cluster_type not in ["multi-node", "single-node"]:
raise InvalidParameterValueError(
"Invalid cluster type. Cluster type can be one of multi-node or single-node"
)
if cluster_type == "single-node":
# AWS will always silently override this value for single-node clusters.
cluster_kwargs["number_of_nodes"] = 1
elif cluster_type == "multi-node":
if cluster_kwargs.get("number_of_nodes", 0) < 2:
raise InvalidParameterCombinationError(
"Number of nodes for cluster type multi-node must be greater than or equal to 2"
)
cluster = self.describe_clusters(cluster_identifier)[0]
for key, value in cluster_kwargs.items():
setattr(cluster, key, value)
if new_cluster_identifier:
dic = {
"cluster_identifier": cluster_identifier,
"skip_final_snapshot": True,
"final_cluster_snapshot_identifier": None,
}
self.delete_cluster(**dic)
cluster.cluster_identifier = new_cluster_identifier
self.clusters[new_cluster_identifier] = cluster
return cluster
def delete_cluster(self, **cluster_kwargs):
cluster_identifier = cluster_kwargs.pop("cluster_identifier")
cluster_skip_final_snapshot = cluster_kwargs.pop("skip_final_snapshot")
cluster_snapshot_identifer = cluster_kwargs.pop(
"final_cluster_snapshot_identifier"
)
if cluster_identifier in self.clusters:
if (
cluster_skip_final_snapshot is False
and cluster_snapshot_identifer is None
):
raise InvalidParameterCombinationError(
"FinalClusterSnapshotIdentifier is required unless SkipFinalClusterSnapshot is specified."
)
elif (
cluster_skip_final_snapshot is False
and cluster_snapshot_identifer is not None
): # create snapshot
cluster = self.describe_clusters(cluster_identifier)[0]
self.create_cluster_snapshot(
cluster_identifier,
cluster_snapshot_identifer,
cluster.region,
cluster.tags,
)
return self.clusters.pop(cluster_identifier)
raise ClusterNotFoundError(cluster_identifier)
def create_cluster_subnet_group(
self, cluster_subnet_group_name, description, subnet_ids, region_name, tags=None
):
subnet_group = SubnetGroup(
self.ec2_backend,
cluster_subnet_group_name,
description,
subnet_ids,
region_name,
tags,
)
self.subnet_groups[cluster_subnet_group_name] = subnet_group
return subnet_group
def describe_cluster_subnet_groups(self, subnet_identifier=None):
subnet_groups = self.subnet_groups.values()
if subnet_identifier:
if subnet_identifier in self.subnet_groups:
return [self.subnet_groups[subnet_identifier]]
else:
raise ClusterSubnetGroupNotFoundError(subnet_identifier)
return subnet_groups
def delete_cluster_subnet_group(self, subnet_identifier):
if subnet_identifier in self.subnet_groups:
return self.subnet_groups.pop(subnet_identifier)
raise ClusterSubnetGroupNotFoundError(subnet_identifier)
def create_cluster_security_group(
self, cluster_security_group_name, description, region_name, tags=None
):
security_group = SecurityGroup(
cluster_security_group_name, description, region_name, tags
)
self.security_groups[cluster_security_group_name] = security_group
return security_group
def describe_cluster_security_groups(self, security_group_name=None):
security_groups = self.security_groups.values()
if security_group_name:
if security_group_name in self.security_groups:
return [self.security_groups[security_group_name]]
else:
raise ClusterSecurityGroupNotFoundError(security_group_name)
return security_groups
def delete_cluster_security_group(self, security_group_identifier):
if security_group_identifier in self.security_groups:
return self.security_groups.pop(security_group_identifier)
raise ClusterSecurityGroupNotFoundError(security_group_identifier)
def authorize_cluster_security_group_ingress(self, security_group_name, cidr_ip):
security_group = self.security_groups.get(security_group_name)
if not security_group:
raise ClusterSecurityGroupNotFoundFaultError()
# just adding the cidr_ip as ingress rule for now as there is no security rule
security_group.ingress_rules.append(cidr_ip)
return security_group
def create_cluster_parameter_group(
self,
cluster_parameter_group_name,
group_family,
description,
region_name,
tags=None,
):
parameter_group = ParameterGroup(
cluster_parameter_group_name, group_family, description, region_name, tags
)
self.parameter_groups[cluster_parameter_group_name] = parameter_group
return parameter_group
def describe_cluster_parameter_groups(self, parameter_group_name=None):
parameter_groups = self.parameter_groups.values()
if parameter_group_name:
if parameter_group_name in self.parameter_groups:
return [self.parameter_groups[parameter_group_name]]
else:
raise ClusterParameterGroupNotFoundError(parameter_group_name)
return parameter_groups
def delete_cluster_parameter_group(self, parameter_group_name):
if parameter_group_name in self.parameter_groups:
return self.parameter_groups.pop(parameter_group_name)
raise ClusterParameterGroupNotFoundError(parameter_group_name)
def create_cluster_snapshot(
self, cluster_identifier, snapshot_identifier, region_name, tags
):
cluster = self.clusters.get(cluster_identifier)
if not cluster:
raise ClusterNotFoundError(cluster_identifier)
if self.snapshots.get(snapshot_identifier) is not None:
raise ClusterSnapshotAlreadyExistsError(snapshot_identifier)
snapshot = Snapshot(cluster, snapshot_identifier, region_name, tags)
self.snapshots[snapshot_identifier] = snapshot
return snapshot
def describe_cluster_snapshots(
self, cluster_identifier=None, snapshot_identifier=None
):
if cluster_identifier:
cluster_snapshots = []
for snapshot in self.snapshots.values():
if snapshot.cluster.cluster_identifier == cluster_identifier:
cluster_snapshots.append(snapshot)
if cluster_snapshots:
return cluster_snapshots
if snapshot_identifier:
if snapshot_identifier in self.snapshots:
return [self.snapshots[snapshot_identifier]]
raise ClusterSnapshotNotFoundError(snapshot_identifier)
return self.snapshots.values()
def delete_cluster_snapshot(self, snapshot_identifier):
if snapshot_identifier not in self.snapshots:
raise ClusterSnapshotNotFoundError(snapshot_identifier)
deleted_snapshot = self.snapshots.pop(snapshot_identifier)
deleted_snapshot.status = "deleted"
return deleted_snapshot
def restore_from_cluster_snapshot(self, **kwargs):
snapshot_identifier = kwargs.pop("snapshot_identifier")
snapshot = self.describe_cluster_snapshots(
snapshot_identifier=snapshot_identifier
)[0]
create_kwargs = {
"node_type": snapshot.cluster.node_type,
"master_username": snapshot.cluster.master_username,
"master_user_password": snapshot.cluster.master_user_password,
"db_name": snapshot.cluster.db_name,
"cluster_type": "multi-node"
if snapshot.cluster.number_of_nodes > 1
else "single-node",
"availability_zone": snapshot.cluster.availability_zone,
"port": snapshot.cluster.port,
"cluster_version": snapshot.cluster.cluster_version,
"number_of_nodes": snapshot.cluster.number_of_nodes,
"encrypted": snapshot.cluster.encrypted,
"tags": snapshot.cluster.tags,
"restored_from_snapshot": True,
"enhanced_vpc_routing": snapshot.cluster.enhanced_vpc_routing,
}
create_kwargs.update(kwargs)
return self.create_cluster(**create_kwargs)
def create_snapshot_copy_grant(self, **kwargs):
snapshot_copy_grant_name = kwargs["snapshot_copy_grant_name"]
kms_key_id = kwargs["kms_key_id"]
if snapshot_copy_grant_name not in self.snapshot_copy_grants:
snapshot_copy_grant = SnapshotCopyGrant(
snapshot_copy_grant_name, kms_key_id
)
self.snapshot_copy_grants[snapshot_copy_grant_name] = snapshot_copy_grant
return snapshot_copy_grant
raise SnapshotCopyGrantAlreadyExistsFaultError(snapshot_copy_grant_name)
def delete_snapshot_copy_grant(self, **kwargs):
snapshot_copy_grant_name = kwargs["snapshot_copy_grant_name"]
if snapshot_copy_grant_name in self.snapshot_copy_grants:
return self.snapshot_copy_grants.pop(snapshot_copy_grant_name)
raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name)
def describe_snapshot_copy_grants(self, **kwargs):
copy_grants = self.snapshot_copy_grants.values()
snapshot_copy_grant_name = kwargs["snapshot_copy_grant_name"]
if snapshot_copy_grant_name:
if snapshot_copy_grant_name in self.snapshot_copy_grants:
return [self.snapshot_copy_grants[snapshot_copy_grant_name]]
else:
raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name)
return copy_grants
def _get_resource_from_arn(self, arn):
try:
arn_breakdown = arn.split(":")
resource_type = arn_breakdown[5]
if resource_type == "snapshot":
resource_id = arn_breakdown[6].split("/")[1]
else:
resource_id = arn_breakdown[6]
except IndexError:
resource_type = resource_id = arn
resources = self.RESOURCE_TYPE_MAP.get(resource_type)
if resources is None:
message = (
"Tagging is not supported for this type of resource: '{0}' "
"(the ARN is potentially malformed, please check the ARN "
"documentation for more information)".format(resource_type)
)
raise ResourceNotFoundFaultError(message=message)
try:
resource = resources[resource_id]
except KeyError:
raise ResourceNotFoundFaultError(resource_type, resource_id)
else:
return resource
@staticmethod
def _describe_tags_for_resources(resources):
tagged_resources = []
for resource in resources:
for tag in resource.tags:
data = {
"ResourceName": resource.arn,
"ResourceType": resource.resource_type,
"Tag": {"Key": tag["Key"], "Value": tag["Value"]},
}
tagged_resources.append(data)
return tagged_resources
def _describe_tags_for_resource_type(self, resource_type):
resources = self.RESOURCE_TYPE_MAP.get(resource_type)
if not resources:
raise ResourceNotFoundFaultError(resource_type=resource_type)
return self._describe_tags_for_resources(resources.values())
def _describe_tags_for_resource_name(self, resource_name):
resource = self._get_resource_from_arn(resource_name)
return self._describe_tags_for_resources([resource])
def create_tags(self, resource_name, tags):
resource = self._get_resource_from_arn(resource_name)
resource.create_tags(tags)
def describe_tags(self, resource_name, resource_type):
if resource_name and resource_type:
raise InvalidParameterValueError(
"You cannot filter a list of resources using an Amazon "
"Resource Name (ARN) and a resource type together in the "
"same request. Retry the request using either an ARN or "
"a resource type, but not both."
)
if resource_type:
return self._describe_tags_for_resource_type(resource_type.lower())
if resource_name:
return self._describe_tags_for_resource_name(resource_name)
# If name and type are not specified, return all tagged resources.
# TODO: Implement aws marker pagination
tagged_resources = []
for resource_type in self.RESOURCE_TYPE_MAP:
try:
tagged_resources += self._describe_tags_for_resource_type(resource_type)
except ResourceNotFoundFaultError:
pass
return tagged_resources
def delete_tags(self, resource_name, tag_keys):
resource = self._get_resource_from_arn(resource_name)
resource.delete_tags(tag_keys)
def get_cluster_credentials(
self, cluster_identifier, db_user, auto_create, duration_seconds
):
if duration_seconds < 900 or duration_seconds > 3600:
raise InvalidParameterValueError(
"Token duration must be between 900 and 3600 seconds"
)
expiration = datetime.datetime.now() + datetime.timedelta(0, duration_seconds)
if cluster_identifier in self.clusters:
user_prefix = "IAM:" if auto_create is False else "IAMA:"
db_user = user_prefix + db_user
return {
"DbUser": db_user,
"DbPassword": random_string(32),
"Expiration": expiration,
}
else:
raise ClusterNotFoundError(cluster_identifier)
redshift_backends = {}
for region in Session().get_available_regions("redshift"):
redshift_backends[region] = RedshiftBackend(ec2_backends[region], region)
for region in Session().get_available_regions("redshift", partition_name="aws-us-gov"):
redshift_backends[region] = RedshiftBackend(ec2_backends[region], region)
for region in Session().get_available_regions("redshift", partition_name="aws-cn"):
redshift_backends[region] = RedshiftBackend(ec2_backends[region], region)
|
|
# -*- coding:utf-8 -*-
import os
import sys, collections
from empty import Empty
from flask import Flask, render_template, g, session, redirect, url_for, request, flash, abort
import flask_ld as ld
from flask_ld.utils import lru
from flask_restful import Resource
from nanopub import NanopublicationManager, Nanopublication
from flask_admin import Admin, BaseView, expose
import rdflib
from flask_security import Security, \
UserMixin, RoleMixin, login_required
from flask_security.core import current_user
from flask_login import AnonymousUserMixin, login_user
from flask_security.forms import RegisterForm
from flask_security.utils import encrypt_password
from werkzeug.datastructures import ImmutableList
from flask_wtf import Form, RecaptchaField
from wtforms import TextField, TextAreaField, StringField, validators
import rdfalchemy
from rdfalchemy.orm import mapper
import sadi
import json
import sadi.mimeparse
from flask_mail import Mail, Message
import database
from datetime import datetime
import markdown
import rdflib.plugin
from rdflib.store import Store
from rdflib.parser import Parser
from rdflib.serializer import Serializer
from rdflib.query import ResultParser, ResultSerializer, Processor, Result, UpdateProcessor
from rdflib.exceptions import Error
rdflib.plugin.register('sparql', Result,
'rdflib.plugins.sparql.processor', 'SPARQLResult')
rdflib.plugin.register('sparql', Processor,
'rdflib.plugins.sparql.processor', 'SPARQLProcessor')
rdflib.plugin.register('sparql', UpdateProcessor,
'rdflib.plugins.sparql.processor', 'SPARQLUpdateProcessor')
# apps is a special folder where you can place your blueprints
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(PROJECT_PATH, "apps"))
basestring = getattr(__builtins__, 'basestring', str)
# we create some comparison keys:
# increase probability that the rule will be near or at the top
top_compare_key = False, -100, [(-2, 0)]
# increase probability that the rule will be near or at the bottom
bottom_compare_key = True, 100, [(2, 0)]
class NamespaceContainer:
@property
def prefixes(self):
result = {}
for key, value in self.__dict__.items():
if isinstance(value, Namespace):
result[key] = value
return result
from rdfalchemy import *
from flask_ld.datastore import *
# Setup Flask-Security
class ExtendedRegisterForm(RegisterForm):
identifier = TextField('Identifier', [validators.Required()])
givenName = TextField('Given Name', [validators.Required()])
familyName = TextField('Family Name', [validators.Required()])
# Form for full-text search
class SearchForm(Form):
search_query = StringField('search_query', [validators.DataRequired()])
def to_json(result):
return json.dumps([dict([(key, value.value if isinstance(value, Literal) else value) for key, value in x.items()]) for x in result.bindings])
class App(Empty):
def configure_database(self):
"""
Database configuration should be set here
"""
self.NS = NamespaceContainer()
self.NS.RDFS = rdflib.RDFS
self.NS.RDF = rdflib.RDF
self.NS.rdfs = rdflib.Namespace(rdflib.RDFS)
self.NS.rdf = rdflib.Namespace(rdflib.RDF)
self.NS.owl = rdflib.OWL
self.NS.xsd = rdflib.Namespace("http://www.w3.org/2001/XMLSchema#")
self.NS.dc = rdflib.Namespace("http://purl.org/dc/terms/")
self.NS.dcelements = rdflib.Namespace("http://purl.org/dc/elements/1.1/")
self.NS.auth = rdflib.Namespace("http://vocab.rpi.edu/auth/")
self.NS.foaf = rdflib.Namespace("http://xmlns.com/foaf/0.1/")
self.NS.prov = rdflib.Namespace("http://www.w3.org/ns/prov#")
self.NS.skos = rdflib.Namespace("http://www.w3.org/2004/02/skos/core#")
self.NS.cmo = rdflib.Namespace("http://purl.org/twc/ontologies/cmo.owl#")
self.NS.sio = rdflib.Namespace("http://semanticscience.org/resource/")
self.NS.sioc_types = rdflib.Namespace("http://rdfs.org/sioc/types#")
self.NS.sioc = rdflib.Namespace("http://rdfs.org/sioc/ns#")
self.NS.np = rdflib.Namespace("http://www.nanopub.org/nschema#")
self.NS.graphene = rdflib.Namespace("http://vocab.rpi.edu/graphene/")
self.NS.local = rdflib.Namespace(self.config['lod_prefix']+'/')
self.NS.ov = rdflib.Namespace("http://open.vocab.org/terms/")
self.db = database.engine_from_config(self.config, "db_")
load_namespaces(self.db,locals())
Resource.db = self.db
self.vocab = Graph()
#print URIRef(self.config['vocab_file'])
self.vocab.load(open(self.config['vocab_file']), format="turtle")
self.role_api = ld.LocalResource(self.NS.prov.Role,"role", self.db.store, self.vocab, self.config['lod_prefix'], RoleMixin)
self.Role = self.role_api.alchemy
self.user_api = ld.LocalResource(self.NS.prov.Agent,"user", self.db.store, self.vocab, self.config['lod_prefix'], UserMixin)
self.User = self.user_api.alchemy
self.nanopub_api = ld.LocalResource(self.NS.np.Nanopublication,"pub", self.db.store, self.vocab, self.config['lod_prefix'], name="Graph")
self.Nanopub = self.nanopub_api.alchemy
self.classes = mapper(self.Role, self.User)
self.datastore = RDFAlchemyUserDatastore(self.db, self.classes, self.User, self.Role)
self.security = Security(self, self.datastore,
register_form=ExtendedRegisterForm)
#self.mail = Mail(self)
def weighted_route(self, *args, **kwargs):
def decorator(view_func):
compare_key = kwargs.pop('compare_key', None)
# register view_func with route
self.route(*args, **kwargs)(view_func)
if compare_key is not None:
rule = self.url_map._rules[-1]
rule.match_compare_key = lambda: compare_key
return view_func
return decorator
def configure_views(self):
def sort_by(resources, property):
return sorted(resources, key=lambda x: x.value(property))
class InvitedAnonymousUser(AnonymousUserMixin):
'''A user that has been referred via kikm references but does not have a user account.'''
def __init__(self):
self.roles = ImmutableList()
def has_role(self, *args):
"""Returns `False`"""
return False
def is_active(self):
return True
@property
def is_authenticated(self):
return True
def get_label(resource):
print resource.identifier
label = resource.graph.preferredLabel(resource.identifier, default = None,
labelProperties=(self.NS.RDFS.label, self.NS.skos.prefLabel, self.NS.dc.title, self.NS.foaf.name))
if len(label) == 0:
try:
label = resource.graph.qname(resource.identifier).split(":")[1].replace("_"," ").title()
except:
label = str(resource.identifier)
else:
label = label[0]
print label
return label
@self.before_request
def load_forms():
#g.search_form = SearchForm()
g.ns = self.NS
g.get_summary = get_summary
g.get_label = get_label
g.get_entity = get_entity
g.rdflib = rdflib
g.isinstance = isinstance
@self.login_manager.user_loader
def load_user(user_id):
if user_id != None:
return self.datastore.find_user(id=user_id)
else:
return None
extensions = {
"rdf": "application/rdf+xml",
"json": "application/ld+json",
"ttl": "text/turtle",
"trig": "application/trig",
"turtle": "text/turtle",
"owl": "application/rdf+xml",
"nq": "application/n-quads",
"nt": "application/n-triples",
"html": "text/html"
}
dataFormats = {
"application/rdf+xml" : "xml",
"application/ld+json" : 'json-ld',
"text/turtle" : "turtle",
"application/trig" : "trig",
"application/n-quads" : "nquads",
"application/n-triples" : "nt",
None: "json-ld"
}
def get_graphs(graphs):
query = 'select ?s ?p ?o ?g where {graph ?g {?s ?p ?o} } values ?g { %s }'
query = query % ' '.join([graph.n3() for graph in graphs])
print query
quads = self.db.store.query(query)
result = Dataset()
result.addN(quads)
return result
def get_entity(entity):
nanopubs = self.db.query('''select distinct ?s ?p ?o ?g where {
?np np:hasAssertion?|np:hasProvenance?|np:hasPublicationInfo? ?g;
np:hasPublicationInfo ?pubinfo;
np:hasAssertion ?assertion;
sio:isAbout ?e.
graph ?pubinfo { ?assertion dc:created [].}
graph ?g {?s ?p ?o.}
}''',initBindings={'e':entity}, initNs={'np':self.NS.np, 'sio':self.NS.sio, 'dc':self.NS.dc})
result = ConjunctiveGraph()
result.addN(nanopubs)
#print result.serialize(format="json-ld")
return result.resource(entity)
self.get_entity = get_entity
def get_summary(resource):
summary_properties = [
self.NS.skos.definition,
self.NS.dc.abstract,
self.NS.dc.description,
self.NS.RDFS.comment,
self.NS.dcelements.description
]
return resource.graph.preferredLabel(resource.identifier, default=[], labelProperties=summary_properties)
self.get_summary = get_summary
@self.route('/about.<format>')
@self.route('/about')
@self.weighted_route('/<path:name>.<format>', compare_key=bottom_compare_key)
@self.weighted_route('/<path:name>', compare_key=bottom_compare_key)
@self.route('/')
@login_required
def view(name=None, format=None, view=None):
if format is not None:
if format in extensions:
content_type = extensions[format]
else:
name = '.'.join([name, format])
#print name
if name is not None:
entity = self.NS.local[name]
elif 'uri' in request.args:
entity = URIRef(request.args['uri'])
else:
entity = self.NS.local.Home
content_type = request.headers['Accept'] if 'Accept' in request.headers else '*/*'
resource = get_entity(entity)
print resource.identifier, content_type
htmls = set(['application/xhtml','text/html'])
if sadi.mimeparse.best_match(htmls, content_type) in htmls:
return render_view(resource)
else:
fmt = dataFormats[sadi.mimeparse.best_match([mt for mt in dataFormats.keys() if mt is not None],content_type)]
return resource.graph.serialize(format=fmt)
@self.route('/curate',methods=['GET','POST'])
#=========================#
#Curation team Spring 2017#
@login_required
def curate():
database.init_db()
if not database.does_exist("dblp"):
database.update_date("dblp","1900-01-01 00:00")
needs_update = database.check_update_dblp()
if request.method == "GET":
return render_template("curate.html",needs_update = needs_update)
elif request.method == "POST":
button = request.form['button']
if button == "Update DBLP":
database.update_dblp()
needs_update = database.check_update_dblp()
return render_template("curate.html",needs_update = needs_update)
#End of curation implement#
#=========================#
views = {}
def render_view(resource):
template_args = dict(ns=self.NS,
this=resource, g=g,
current_user=current_user,
isinstance=isinstance,
get_entity=get_entity,
get_summary=get_summary,
rdflib=rdflib,
hasattr=hasattr,
set=set)
view = None
if 'view' in request.args:
view = request.args['view']
# 'view' is the default view
content = resource.value(self.NS.sioc.content)
if (view == 'view' or view is None) and content is not None:
if content is not None:
return render_template('content_view.html',content=content, **template_args)
value = resource.value(self.NS.prov.value)
if value is not None and view is None:
headers = {}
headers['ContentType'] = 'text/plain'
content_type = resource.value(self.NS.ov.hasContentType)
if content_type is not None:
headers['ContentType'] = content_type
if value.datatype == XSD.base64Binary:
return base64.b64decode(value.value), 200, headers
if value.datatype == XSD.hexBinary:
return binascii.unhexlify(value.value), 200, headers
return value.value, 200, headers
if view is None:
view = 'view'
if 'as' in request.args:
types = [URIRef(request.args['as']), 0]
else:
types = list([(x.identifier, 0) for x in resource[RDF.type]])
#if len(types) == 0:
types.append([self.NS.RDFS.Resource, 100])
print types
type_string = ' '.join(["(%s %d)" % (x.n3(), i) for x, i in types])
view_query = '''select ?id ?view (count(?mid)+?priority as ?rank) ?class ?c where {
values (?c ?priority) { %s }
?c rdfs:subClassOf* ?mid.
?mid rdfs:subClassOf* ?class.
?class ?viewProperty ?view.
?viewProperty rdfs:subPropertyOf* graphene:hasView.
?viewProperty dc:identifier ?id.
} group by ?c ?class order by ?rank
''' % type_string
print view_query
views = list(self.vocab.query(view_query, initNs=dict(graphene=self.NS.graphene, dc=self.NS.dc),
initBindings=dict(id=Literal(view))))
print '\n'.join([str(x.asdict()) for x in views])
if len(views) == 0:
abort(404)
# default view (list of nanopubs)
# if available, replace with class view
# if available, replace with instance view
return render_template(views[0]['view'].value, **template_args)
self.api = ld.LinkedDataApi(self, "", self.db.store, "")
self.admin = Admin(self, name="graphene", template_mode='bootstrap3')
self.admin.add_view(ld.ModelView(self.nanopub_api, default_sort=RDFS.label))
self.admin.add_view(ld.ModelView(self.role_api, default_sort=RDFS.label))
self.admin.add_view(ld.ModelView(self.user_api, default_sort=foaf.familyName))
app = self
self.nanopub_manager = NanopublicationManager(app.db.store, app.config['nanopub_archive_path'],
Namespace('%s/pub/'%(app.config['lod_prefix'])))
class NanopublicationResource(ld.LinkedDataResource):
decorators = [login_required]
def __init__(self, ):
self.local_resource = app.nanopub_api
def _can_edit(self, uri):
if current_user.has_role('Publisher') or current_user.has_role('Editor') or current_user.has_role('Admin'):
return True
if app.db.query('''ask {
?nanopub np:hasAssertion ?assertion; np:hasPublicationInfo ?info.
graph ?info { ?assertion dc:contributor ?user. }
}''', initBindings=dict(nanopub=uri, user=current_user.resUri), initNs=dict(np=app.NS.np, dc=app.NS.dc)):
print "Is owner."
return True
return False
def _get_uri(self, ident):
return URIRef('%s/pub/%s'%(app.config['lod_prefix'], ident))
def get(self, ident):
uri = self._get_uri(ident)
result = app.nanopub_manager.get(uri)
return result
def delete(self, ident):
uri = self._get_uri(ident)
if not self._can_edit(uri):
return '<h1>Not Authorized</h1>', 401
app.nanopub_manager.retire(uri)
#self.local_resource.delete(uri)
return '', 204
def _get_graph(self):
inputGraph = ConjunctiveGraph()
contentType = request.headers['Content-Type']
sadi.deserialize(inputGraph, request.data, contentType)
return inputGraph
def put(self, ident):
nanopub_uri = self._get_uri(ident)
inputGraph = self._get_graph()
old_nanopub = self._prep_nanopub(nanopub_uri, inputGraph)
for nanopub in app.nanopub_manager.prepare(inputGraph):
modified = Literal(datetime.utcnow())
nanopub.pubinfo.add((nanopub.assertion.identifier, app.NS.prov.wasRevisionOf, old_nanopub.assertion.identifier))
nanopub.pubinfo.add((old_nanopub.assertion.identifier, app.NS.prov.invalidatedAtTime, modified))
nanopub.pubinfo.add((nanopub.assertion.identifier, app.NS.dc.modified, modified))
app.nanopub_manager.retire(nanopub_uri)
app.nanopub_manager.publish(nanopub)
def _prep_nanopub(self, nanopub_uri, graph):
nanopub = Nanopublication(store=graph.store, identifier=nanopub_uri)
about = nanopub.nanopub_resource.value(app.NS.sio.isAbout)
print nanopub.assertion_resource.identifier, about
self._prep_graph(nanopub.assertion_resource, about.identifier)
self._prep_graph(nanopub.pubinfo_resource, nanopub.assertion_resource.identifier)
self._prep_graph(nanopub.provenance_resource, nanopub.assertion_resource.identifier)
nanopub.pubinfo.add((nanopub.assertion.identifier, app.NS.dc.contributor, current_user.resUri))
return nanopub
def post(self, ident=None):
if ident is not None:
return self.put(ident)
inputGraph = self._get_graph()
for nanopub_uri in inputGraph.subjects(rdflib.RDF.type, app.NS.np.Nanopublication):
nanopub = self._prep_nanopub(nanopub_uri, inputGraph)
nanopub.pubinfo.add((nanopub.assertion.identifier, app.NS.dc.created, Literal(datetime.utcnow())))
for nanopub in app.nanopub_manager.prepare(inputGraph):
app.nanopub_manager.publish(nanopub)
return '', 201
def _prep_graph(self, resource, about = None):
#print '_prep_graph', resource.identifier, about
content_type = resource.value(app.NS.ov.hasContentType)
#print resource.graph.serialize(format="nquads")
g = Graph(store=resource.graph.store,identifier=resource.identifier)
text = resource.value(app.NS.prov.value)
if content_type is not None and text is not None:
#print 'Content type:', content_type, resource.identifier
html = None
if content_type.value in ["text/html", "application/xhtml+xml"]:
html = Literal(text.value, datatype=RDF.HTML)
if content_type.value == 'text/markdown':
#print "Aha, markdown!"
#print text.value
html = markdown.markdown(text.value, extensions=['rdfa'])
attributes = ['vocab="%s"' % app.NS.local,
'base="%s"'% app.NS.local,
'prefix="%s"' % ' '.join(['%s: %s'% x for x in app.NS.prefixes.items()])]
if about is not None:
attributes.append('resource="%s"' % about)
html = '<div %s>%s</div>' % (' '.join(attributes), html)
html = Literal(html, datatype=RDF.HTML)
text = html
content_type = "text/html"
if html is not None:
resource.add(app.NS.sioc.content, html)
try:
g.parse(data=text, format='rdfa')
except:
pass
else:
try:
sadi.deserialize(g, text, content_type)
except:
pass
#print Graph(store=resource.graph.store).serialize(format="trig")
self.api.add_resource(NanopublicationResource, '/pub', '/pub/<ident>')
def config_str_to_obj(cfg):
if isinstance(cfg, basestring):
module = __import__('config', fromlist=[cfg])
return getattr(module, cfg)
return cfg
def app_factory(config, app_name, blueprints=None):
# you can use Empty directly if you wish
app = App(app_name)
config = config_str_to_obj(config)
#print dir(config)
app.configure(config)
if blueprints:
app.add_blueprint_list(blueprints)
app.setup()
return app
def heroku():
from config import Config, project_name
# setup app through APP_CONFIG envvar
return app_factory(Config, project_name)
|
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
""" Assumes fabric environment already set up """
import subprocess
import time
import os
from zipfile import ZipFile
import sh
import fabric.api
from fabric import operations
from path import path
from cosmo_tester.framework.testenv import TestCase
from cosmo_tester.framework.util import YamlPatcher, get_actual_keypath
from cosmo_tester.framework.util import get_blueprint_path
CHEF_SERVER_COOKBOOK_ZIP_URL = (
'https://github.com/opscode-cookbooks/chef-server/archive/'
'c588a4c401d3fac14f70d3285fe49eb4dccd9759.zip'
)
KNIFE_PARAMS = '-u admin -k ~/admin.pem'
def _use_cookbook(cookbook_name,
cookbook_local_tar_path):
""" Downloads cookbook from given url and uploads it to the Chef server """
fabric.api.run('mkdir -p ~/cookbooks/{0}'.format(cookbook_name))
fabric.api.put(local_path=cookbook_local_tar_path,
remote_path='/tmp/{0}.tar.gz'.format(cookbook_name))
fabric.api.run('tar -xzvf /tmp/{0}.tar.gz --strip-components=1'
' -C ~/cookbooks/{0}'.format(cookbook_name))
fabric.api.run('knife cookbook upload {0} --cookbook-path ~/cookbooks {1}'
.format(KNIFE_PARAMS, cookbook_name))
fabric.api.run('knife cookbook list {0} | grep -F {1}'
.format(KNIFE_PARAMS, cookbook_name))
def _userize_file(original_path):
""" Places the file under user's home directory and make it
permissions-wise accessible """
fabric.api.sudo("cp -a {path} ~{user}/ && chown {user} ~{user}/{basename}"
.format(path=original_path,
basename=str(path(original_path).basename()),
user=fabric.api.env['user']))
def setup_chef_server(local_dir, cookbooks):
_userize_file("/etc/chef-server/admin.pem")
for cb in cookbooks:
_use_cookbook(*cb)
_userize_file("/etc/chef-server/chef-validator.pem")
operations.get('~/chef-validator.pem', str(local_dir))
def find_node_state(node_name, nodes_state):
pfx = node_name + '_'
matches = [v for k, v in nodes_state.items() if k.startswith(pfx)]
if len(matches) != 1:
raise RuntimeError("Failed to find node {0}".format(node_name))
return matches[0]
def get_nodes_of_type(blueprint, type_):
return [node_obj for _, node_obj in blueprint.obj[
'node_templates'].iteritems() if node_obj['type'] == type_]
def update_blueprint(env, blueprint, hostname, userdata_vars=None):
hostname_base = 'system-test-{0}-{1}'.format(
time.strftime("%Y%m%d-%H%M"), hostname)
vm = get_nodes_of_type(blueprint, 'cloudify.openstack.nodes.Server')[0]
hostnames = [hostname_base]
users = []
vm_hostname = hostname_base
sg = '{0}{1}'.format(env.resources_prefix, 'chef_sg')
inputs = {
'flavor': env.flavor_name,
'image': env.ubuntu_trusty_image_name,
'server_name': vm_hostname,
'security_groups': [sg],
}
props = vm['properties']['server']
server_userdata = """#!/bin/bash -ex
grep -q "{hostname}" /etc/hosts || echo "127.0.0.1 {hostname}" >> /etc/hosts"""
client_userdata = """#!/bin/bash -ex
grep -q "{chef_server_hostname}" /etc/hosts || \
echo "{chef_server_ip} {chef_server_hostname}" >> /etc/hosts
"""
if 'userdata' in props:
if userdata_vars:
userdata = client_userdata.format(**userdata_vars)
else:
hostname = '{0}{1}'.format(env.resources_prefix,
vm_hostname).replace('_', '-')
userdata = server_userdata.format(hostname=hostname)
inputs['userdata'] = userdata
users.append('ubuntu')
return {'hostnames': hostnames, 'users': users}, inputs
class ChefPluginClientTest(TestCase):
def setUp(self, *args, **kwargs):
super(ChefPluginClientTest, self).setUp(*args, **kwargs)
agent_key_file = get_actual_keypath(self.env,
self.env.agent_key_path)
blueprint_dir = self.copy_blueprint('chef-plugin')
self.blueprint_yaml = (
blueprint_dir / 'chef-server-by-chef-solo-blueprint.yaml')
with YamlPatcher(self.blueprint_yaml) as blueprint:
bp_info, inputs = update_blueprint(self.env, blueprint,
'chef-server')
self.chef_server_hostname = '{0}{1}'.format(
self.env.resources_prefix.replace('_', '-'),
bp_info['hostnames'][0])
cookbooks_dir = blueprint_dir / 'cookbooks'
def run(*args, **kwargs):
return subprocess.check_output(*args, **kwargs)
with cookbooks_dir:
run([
'wget', '-q', '-O', 'chef-server.zip',
CHEF_SERVER_COOKBOOK_ZIP_URL,
])
ZipFile('chef-server.zip').extractall()
chef_cookbook_dir = cookbooks_dir.glob('chef-server-*')[0]
run(['mv', chef_cookbook_dir, 'chef-server'])
# Next line because Chef cookbooks are required
# to declare all dependencies, even if they don't use them.
# We don't need git, it's only used in chef-cookbook::dev recipe.
run(['sed', '-i', "/depends 'git'/d", 'chef-server/metadata.rb'])
with blueprint_dir:
run(['tar', 'czf', 'cookbooks.tar.gz', 'cookbooks'])
self.chef_server_id = self.test_id + '-chef-server'
id_ = self.chef_server_id
before, after = self.upload_deploy_and_execute_install(
id_, id_, inputs=inputs)
fip_node = find_node_state('ip', after['node_state'][id_])
self.chef_server_ip = fip_node['runtime_properties'][
'floating_ip_address']
fabric_env = fabric.api.env
fabric_env.update({
'timeout': 30,
'user': bp_info['users'][0],
'key_filename': str(agent_key_file),
'host_string': self.chef_server_ip,
})
cookbook_local_path = os.path.abspath(
os.path.join(get_blueprint_path('chef-plugin'),
'cookbook-create-file.tar.gz'))
setup_chef_server(blueprint_dir, [[
'create-file',
cookbook_local_path,
]])
self.blueprint_dir = blueprint_dir
def tearDown(self, *args, **kwargs):
self.execute_uninstall(self.chef_server_id)
super(ChefPluginClientTest, self).tearDown(*args, **kwargs)
def test_chef_client(self):
blueprint_dir = self.blueprint_dir
self.blueprint_yaml = blueprint_dir / 'chef-client-test-blueprint.yaml'
with YamlPatcher(self.blueprint_yaml) as blueprint:
_, inputs = update_blueprint(self.env, blueprint, 'chef-server', {
'chef_server_ip': self.chef_server_ip,
'chef_server_hostname': self.chef_server_hostname,
})
chef_node = get_nodes_of_type(blueprint,
'cloudify.chef.nodes.DBMS')[0]
chef_config = chef_node['properties']['chef_config']
chef_config['chef_server_url'] = 'https://{0}:443'.format(
self.chef_server_ip)
chef_config['validation_client_name'] = 'chef-validator'
chef_config['validation_key'] = (
path(blueprint_dir) / 'chef-validator.pem').text()
id_ = self.test_id + '-chef-client-' + str(int(time.time()))
before, after = self.upload_deploy_and_execute_install(
id_, id_, inputs=inputs)
fip_node = find_node_state('ip', after['node_state'][id_])
chef_client_ip = fip_node['runtime_properties']['floating_ip_address']
fabric_env = fabric.api.env
fabric_env.update({
# XXX: sometime - same user for connection is accidental
# todo: replace it with update_blueprint()'s bp_info,
# as in setUp()
'host_string': chef_client_ip,
})
out = fabric.api.run('cat /tmp/blueprint.txt')
self.assertEquals(out, 'Great success!')
self.execute_uninstall(id_)
class ChefPluginSoloTest(TestCase):
def setUp(self, *args, **kwargs):
super(ChefPluginSoloTest, self).setUp(*args, **kwargs)
self.blueprint_dir = self.copy_blueprint('chef-plugin')
# Get resources
with self.blueprint_dir:
for res in 'cookbooks', 'data_bags', 'environments', 'roles':
sh.tar('czf', res+'.tar.gz', res)
def test_chef_solo(self):
agent_key_file = get_actual_keypath(self.env,
self.env.agent_key_path)
blueprint_dir = self.blueprint_dir
self.blueprint_yaml = blueprint_dir / 'chef-solo-test-blueprint.yaml'
with YamlPatcher(self.blueprint_yaml) as blueprint:
bp_info, inputs = update_blueprint(self.env, blueprint,
'chef-solo')
id_ = self.test_id + '-chef-solo-' + str(int(time.time()))
before, after = self.upload_deploy_and_execute_install(
id_, id_, inputs=inputs)
fip_node = find_node_state('ip', after['node_state'][id_])
chef_solo_ip = fip_node['runtime_properties']['floating_ip_address']
fabric_env = fabric.api.env
fabric_env.update({
'timeout': 30,
'user': bp_info['users'][0],
'key_filename': str(agent_key_file),
'host_string': chef_solo_ip,
})
expected_files_contents = (
('/tmp/blueprint.txt', 'Great success number #2 !'),
('/tmp/blueprint2.txt', '/tmp/blueprint.txt'),
('/tmp/chef_node_env.e1.txt', 'env:e1'),
('/tmp/chef_node_data_bag_user.db1.i1.txt', 'db1-i1-k1'),
)
for file_name, expected_content in expected_files_contents:
actual_content = fabric.api.run('cat ' + file_name)
msg = "File '{0}' should have content '{1}' but has '{2}'".format(
file_name, expected_content, actual_content)
self.assertEquals(actual_content, expected_content, msg)
self.execute_uninstall(id_)
|
|
#!/usr/bin/env python
"""
exceptions.py
"""
################################################################################
#
# exceptions.py
#
#
# Copyright (c) 10/9/2009 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
import sys, os
from collections import defaultdict
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Exceptions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#if __name__ != '__main__':
# import task
class error_task(Exception):
def __init__(self, *errmsg):
Exception.__init__(self, *errmsg)
# list of associated tasks
self.tasks = set()
# error message
self.main_msg = ""
def get_main_msg(self):
"""
Make main message with lists of task names
Prefix with new lines for added emphasis
"""
# turn tasks names into 'def xxx(...): format
task_names = "\n".join("task = %r" % t._name for t in self.tasks)
if len(self.main_msg):
return "\n\n" + self.main_msg + " for\n\n%s\n" % task_names
else:
return "\n\n%s\n" % task_names
def __str__(self):
#indent
msg = self.get_main_msg() + " ".join(map(str, self.args))
return " " + msg.replace("\n", "\n ")
def specify_task (self, task, main_msg):
self.tasks.add(task)
self.main_msg = main_msg
return self
class error_task_contruction(error_task):
"""
Exceptions when contructing pipeline tasks
"""
def __init__(self, task, main_msg, *errmsg):
error_task.__init__(self, *errmsg)
self.specify_task (task, main_msg)
class RethrownJobError(error_task):
"""
Wrap up one or more exceptions rethrown across process boundaries
See multiprocessor.Server.handle_request/serve_client for an analogous function
"""
def __init__(self, job_exceptions=[]):
error_task.__init__(self)
self.args = list(job_exceptions)
def __len__(self):
return len(self.args)
def append(self, job_exception):
self.args = self.args + (job_exception, )
def task_to_func_name (self, task_name):
if "mkdir " in task_name:
return task_name
return "def %s(...):" % task_name.replace("__main__.", "")
def get_nth_exception_str (self, nn = -1):
if nn == -1:
nn = len(self.args) - 1
task_name, job_name, exception_name, exception_value, exception_stack = self.args[nn]
message = "\nException #%d\n" % (nn + 1)
message += " '%s%s' raised in ...\n" % (exception_name, exception_value)
if task_name:
message += " Task = %s\n %s\n\n" % (self.task_to_func_name(task_name), job_name)
message += "%s\n" % (exception_stack, )
return message.replace("\n", "\n ")
def __str__(self):
message = ["\nOriginal exception%s:\n" % ("s" if len(self.args) > 1 else "")]
for ii in range(len(self.args)):
message += self.get_nth_exception_str (ii)
#
# For each exception:
# turn original exception stack message into an indented string
#
return (self.get_main_msg()).replace("\n", "\n ") + "".join(message)
class error_input_file_does_not_match(error_task):
pass
class fatal_error_input_file_does_not_match(error_task):
pass
class task_FilesArgumentsError(error_task):
pass
class task_FilesreArgumentsError(error_task):
pass
class MissingInputFileError(error_task):
pass
class JobSignalledBreak(error_task):
pass
class PostTaskArgumentError(error_task):
pass
class JobsLimitArgumentError(error_task):
pass
class error_task_get_output(error_task_contruction):
pass
class error_task_transform_inputs_multiple_args(error_task_contruction):
pass
class error_task_transform(error_task_contruction):
pass
class error_task_product(error_task_contruction):
pass
class error_task_mkdir(error_task_contruction):
pass
class error_task_permutations(error_task_contruction):
pass
class error_task_combinations(error_task_contruction):
pass
class error_task_combinations_with_replacement(error_task_contruction):
pass
class error_task_merge(error_task_contruction):
pass
class error_task_subdivide(error_task_contruction):
pass
class error_task_originate(error_task_contruction):
pass
class error_task_collate(error_task_contruction):
pass
class error_task_collate_inputs_multiple_args(error_task_contruction):
pass
class error_task_split(error_task_contruction):
pass
class error_task_files_re(error_task_contruction):
pass
class error_task_files(error_task_contruction):
pass
class error_task_parallel(error_task_contruction):
pass
class error_making_directory(error_task):
pass
class error_duplicate_task_name(error_task):
pass
class error_decorator_args(error_task):
pass
class error_task_name_lookup_failed(error_task):
pass
class error_task_decorator_takes_no_args(error_task):
pass
class error_function_is_not_a_task(error_task):
pass
class error_ambiguous_task(error_task):
pass
class error_not_a_pipeline(error_task):
pass
class error_circular_dependencies(error_task):
pass
class error_not_a_directory(error_task):
pass
class error_missing_output(error_task):
pass
class error_job_signalled_interrupt(error_task):
pass
class error_node_not_task(error_task):
pass
class error_missing_runtime_parameter(error_task):
pass
class error_unescaped_regular_expression_forms(error_task):
pass
class error_checksum_level(error_task):
pass
class error_missing_args(error_task):
pass
class error_too_many_args(error_task):
pass
class error_inputs_multiple_args(error_task):
pass
class error_set_input(error_task):
pass
class error_set_output(error_task):
pass
class error_no_head_tasks(error_task):
pass
class error_no_tail_tasks(error_task):
pass
class error_executable_str(error_task):
pass
class error_extras_wrong_type(error_task):
pass
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Testing
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
if __name__ == '__main__':
import unittest
#
# minimal task object to test exceptions
#
class task:
class Task (object):
"""
dummy task
"""
_action_mkdir = 1
def __init__(self, _name, _action_type = 0):
self._action_type = _action_type
self._name = _name
class Test_exceptions(unittest.TestCase):
# self.assertEqual(self.seq, range(10))
# self.assert_(element in self.seq)
# self.assertRaises(ValueError, random.sample, self.seq, 20)
def test_error_task(self):
"""
test
"""
fake_task1 = task.Task("task1")
fake_task2 = task.Task("task2")
fake_mkdir_task3 = task.Task("task3", task.Task._action_mkdir)
fake_mkdir_task4 = task.Task("task4", task.Task._action_mkdir)
e = error_task()
e.specify_task (fake_task1 , "Some message 0")
e.specify_task (fake_task2 , "Some message 1")
e.specify_task (fake_mkdir_task3, "Some message 2")
e.specify_task (fake_mkdir_task4, "Some message 3")
self.assertEqual(str(e),
"""
Some message 3 for
'def task1(...):'
'def task2(...):'
task3
task4
""")
def test_RethrownJobError(self):
"""
test
"""
#job_name, exception_name, exception_value, exception_stack
exception_data = [
[
"task1",
"[[temp_branching_dir/a.2, a.1] -> temp_branching_dir/a.3]",
"ruffus.task.MissingInputFileError",
"(instance value)",
"Traceback (most recent call last):\n File \"what.file.py\", line 333, in some_func\n somecode(sfasf)\n"
],
[
"task1",
"[None -> [temp_branching_dir/a.1, temp_branching_dir/b.1, temp_branching_dir/c.1]]",
"exceptions.ZeroDivisionError:",
"(1)",
"Traceback (most recent call last):\n File \"anotherfile.py\", line 345, in other_func\n badcode(rotten)\n"
]
]
e = RethrownJobError(exception_data)
fake_task1 = task.Task("task1")
fake_task2 = task.Task("task2")
fake_mkdir_task3 = task.Task("task3", task.Task._action_mkdir)
fake_mkdir_task4 = task.Task("task4", task.Task._action_mkdir)
e.specify_task (fake_task1 , "Exceptions running jobs")
e.specify_task (fake_task2 , "Exceptions running jobs")
e.specify_task (fake_mkdir_task3, "Exceptions running jobs")
e.specify_task (fake_mkdir_task4, "Exceptions running jobs")
self.assertEqual(str(e),
"""
Exceptions running jobs for
'def task1(...):'
'def task2(...):'
task3
task4
Original exceptions:
Exception #1
ruffus.task.MissingInputFileError(instance value):
for task1.[[temp_branching_dir/a.2, a.1] -> temp_branching_dir/a.3]
Traceback (most recent call last):
File "what.file.py", line 333, in some_func
somecode(sfasf)
Exception #2
exceptions.ZeroDivisionError:(1):
for task1.[None -> [temp_branching_dir/a.1, temp_branching_dir/b.1, temp_branching_dir/c.1]]
Traceback (most recent call last):
File "anotherfile.py", line 345, in other_func
badcode(rotten)
""")
#
# debug code not run if called as a module
#
if __name__ == '__main__':
if sys.argv.count("--debug"):
sys.argv.remove("--debug")
unittest.main()
|
|
"""The tests for the device tracker component."""
# pylint: disable=protected-access
import asyncio
import json
import logging
from unittest.mock import call
from datetime import datetime, timedelta
import os
from asynctest import patch
import pytest
from homeassistant.components import zone
from homeassistant.core import callback, State
from homeassistant.setup import async_setup_component
from homeassistant.helpers import discovery
from homeassistant.loader import get_component
import homeassistant.util.dt as dt_util
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_ENTITY_PICTURE, ATTR_FRIENDLY_NAME, ATTR_HIDDEN,
STATE_HOME, STATE_NOT_HOME, CONF_PLATFORM, ATTR_ICON)
import homeassistant.components.device_tracker as device_tracker
from tests.components.device_tracker import common
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.json import JSONEncoder
from tests.common import (
async_fire_time_changed, patch_yaml_files, assert_setup_component,
mock_restore_cache)
TEST_PLATFORM = {device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}}
_LOGGER = logging.getLogger(__name__)
@pytest.fixture
def yaml_devices(hass):
"""Get a path for storing yaml devices."""
yaml_devices = hass.config.path(device_tracker.YAML_DEVICES)
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
yield yaml_devices
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
async def test_is_on(hass):
"""Test is_on method."""
entity_id = device_tracker.ENTITY_ID_FORMAT.format('test')
hass.states.async_set(entity_id, STATE_HOME)
assert device_tracker.is_on(hass, entity_id)
hass.states.async_set(entity_id, STATE_NOT_HOME)
assert not device_tracker.is_on(hass, entity_id)
async def test_reading_broken_yaml_config(hass):
"""Test when known devices contains invalid data."""
files = {'empty.yaml': '',
'nodict.yaml': '100',
'badkey.yaml': '@:\n name: Device',
'noname.yaml': 'my_device:\n',
'allok.yaml': 'My Device:\n name: Device',
'oneok.yaml': ('My Device!:\n name: Device\n'
'bad_device:\n nme: Device')}
args = {'hass': hass, 'consider_home': timedelta(seconds=60)}
with patch_yaml_files(files):
assert await device_tracker.async_load_config(
'empty.yaml', **args) == []
assert await device_tracker.async_load_config(
'nodict.yaml', **args) == []
assert await device_tracker.async_load_config(
'noname.yaml', **args) == []
assert await device_tracker.async_load_config(
'badkey.yaml', **args) == []
res = await device_tracker.async_load_config('allok.yaml', **args)
assert len(res) == 1
assert res[0].name == 'Device'
assert res[0].dev_id == 'my_device'
res = await device_tracker.async_load_config('oneok.yaml', **args)
assert len(res) == 1
assert res[0].name == 'Device'
assert res[0].dev_id == 'my_device'
async def test_reading_yaml_config(hass, yaml_devices):
"""Test the rendering of the YAML configuration."""
dev_id = 'test'
device = device_tracker.Device(
hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', picture='http://test.picture',
hide_if_away=True, icon='mdi:kettle')
device_tracker.update_config(yaml_devices, dev_id, device)
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN,
TEST_PLATFORM)
config = (await device_tracker.async_load_config(yaml_devices, hass,
device.consider_home))[0]
assert device.dev_id == config.dev_id
assert device.track == config.track
assert device.mac == config.mac
assert device.config_picture == config.config_picture
assert device.away_hide == config.away_hide
assert device.consider_home == config.consider_home
assert device.icon == config.icon
# pylint: disable=invalid-name
@patch('homeassistant.components.device_tracker._LOGGER.warning')
async def test_track_with_duplicate_mac_dev_id(mock_warning, hass):
"""Test adding duplicate MACs or device IDs to DeviceTracker."""
devices = [
device_tracker.Device(hass, True, True, 'my_device', 'AB:01',
'My device', None, None, False),
device_tracker.Device(hass, True, True, 'your_device',
'AB:01', 'Your device', None, None, False)]
device_tracker.DeviceTracker(hass, False, True, {}, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert mock_warning.call_count == 1, \
"The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert 'Duplicate device MAC' in args[0], \
'Duplicate MAC warning expected'
mock_warning.reset_mock()
devices = [
device_tracker.Device(hass, True, True, 'my_device',
'AB:01', 'My device', None, None, False),
device_tracker.Device(hass, True, True, 'my_device',
None, 'Your device', None, None, False)]
device_tracker.DeviceTracker(hass, False, True, {}, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert mock_warning.call_count == 1, \
"The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert 'Duplicate device IDs' in args[0], \
'Duplicate device IDs warning expected'
async def test_setup_without_yaml_file(hass):
"""Test with no YAML file."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN,
TEST_PLATFORM)
async def test_gravatar(hass):
"""Test the Gravatar generation."""
dev_id = 'test'
device = device_tracker.Device(
hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', gravatar='[email protected]')
gravatar_url = ("https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar")
assert device.config_picture == gravatar_url
async def test_gravatar_and_picture(hass):
"""Test that Gravatar overrides picture."""
dev_id = 'test'
device = device_tracker.Device(
hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', picture='http://test.picture',
gravatar='[email protected]')
gravatar_url = ("https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar")
assert device.config_picture == gravatar_url
@patch(
'homeassistant.components.device_tracker.DeviceTracker.see')
@patch(
'homeassistant.components.demo.device_tracker.setup_scanner',
autospec=True)
async def test_discover_platform(mock_demo_setup_scanner, mock_see, hass):
"""Test discovery of device_tracker demo platform."""
assert device_tracker.DOMAIN not in hass.config.components
await discovery.async_load_platform(
hass, device_tracker.DOMAIN, 'demo', {'test_key': 'test_val'},
{'demo': {}})
await hass.async_block_till_done()
assert device_tracker.DOMAIN in hass.config.components
assert mock_demo_setup_scanner.called
assert mock_demo_setup_scanner.call_args[0] == (
hass, {}, mock_see, {'test_key': 'test_val'})
async def test_update_stale(hass):
"""Test stalled update."""
scanner = get_component(hass, 'device_tracker.test').SCANNER
scanner.reset()
scanner.come_home('DEV1')
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=register_time):
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'test',
device_tracker.CONF_CONSIDER_HOME: 59,
}})
await hass.async_block_till_done()
assert STATE_HOME == \
hass.states.get('device_tracker.dev1').state
scanner.leave_home('DEV1')
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=scan_time):
async_fire_time_changed(hass, scan_time)
await hass.async_block_till_done()
assert STATE_NOT_HOME == \
hass.states.get('device_tracker.dev1').state
async def test_entity_attributes(hass, yaml_devices):
"""Test the entity attributes."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
friendly_name = 'Paulus'
picture = 'http://placehold.it/200x200'
icon = 'mdi:kettle'
device = device_tracker.Device(
hass, timedelta(seconds=180), True, dev_id, None,
friendly_name, picture, hide_if_away=True, icon=icon)
device_tracker.update_config(yaml_devices, dev_id, device)
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN,
TEST_PLATFORM)
attrs = hass.states.get(entity_id).attributes
assert friendly_name == attrs.get(ATTR_FRIENDLY_NAME)
assert icon == attrs.get(ATTR_ICON)
assert picture == attrs.get(ATTR_ENTITY_PICTURE)
async def test_device_hidden(hass, yaml_devices):
"""Test hidden devices."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
device = device_tracker.Device(
hass, timedelta(seconds=180), True, dev_id, None,
hide_if_away=True)
device_tracker.update_config(yaml_devices, dev_id, device)
scanner = get_component(hass, 'device_tracker.test').SCANNER
scanner.reset()
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN,
TEST_PLATFORM)
assert hass.states.get(entity_id).attributes.get(ATTR_HIDDEN)
async def test_group_all_devices(hass, yaml_devices):
"""Test grouping of devices."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
device = device_tracker.Device(
hass, timedelta(seconds=180), True, dev_id, None,
hide_if_away=True)
device_tracker.update_config(yaml_devices, dev_id, device)
scanner = get_component(hass, 'device_tracker.test').SCANNER
scanner.reset()
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN,
TEST_PLATFORM)
await hass.async_block_till_done()
state = hass.states.get(device_tracker.ENTITY_ID_ALL_DEVICES)
assert state is not None
assert STATE_NOT_HOME == state.state
assert (entity_id,) == state.attributes.get(ATTR_ENTITY_ID)
@patch('homeassistant.components.device_tracker.DeviceTracker.async_see')
async def test_see_service(mock_see, hass):
"""Test the see service with a unicode dev_id and NO MAC."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN,
TEST_PLATFORM)
params = {
'dev_id': 'some_device',
'host_name': 'example.com',
'location_name': 'Work',
'gps': [.3, .8],
'attributes': {
'test': 'test'
}
}
common.async_see(hass, **params)
await hass.async_block_till_done()
assert mock_see.call_count == 1
assert mock_see.call_count == 1
assert mock_see.call_args == call(**params)
mock_see.reset_mock()
params['dev_id'] += chr(233) # e' acute accent from icloud
common.async_see(hass, **params)
await hass.async_block_till_done()
assert mock_see.call_count == 1
assert mock_see.call_count == 1
assert mock_see.call_args == call(**params)
async def test_new_device_event_fired(hass):
"""Test that the device tracker will fire an event."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN,
TEST_PLATFORM)
test_events = []
@callback
def listener(event):
"""Record that our event got called."""
test_events.append(event)
hass.bus.async_listen("device_tracker_new_device", listener)
common.async_see(hass, 'mac_1', host_name='hello')
common.async_see(hass, 'mac_1', host_name='hello')
await hass.async_block_till_done()
assert len(test_events) == 1
# Assert we can serialize the event
json.dumps(test_events[0].as_dict(), cls=JSONEncoder)
assert test_events[0].data == {
'entity_id': 'device_tracker.hello',
'host_name': 'hello',
'mac': 'MAC_1',
}
# pylint: disable=invalid-name
async def test_not_write_duplicate_yaml_keys(hass, yaml_devices):
"""Test that the device tracker will not generate invalid YAML."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN,
TEST_PLATFORM)
common.async_see(hass, 'mac_1', host_name='hello')
common.async_see(hass, 'mac_2', host_name='hello')
await hass.async_block_till_done()
config = await device_tracker.async_load_config(yaml_devices, hass,
timedelta(seconds=0))
assert len(config) == 2
# pylint: disable=invalid-name
async def test_not_allow_invalid_dev_id(hass, yaml_devices):
"""Test that the device tracker will not allow invalid dev ids."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN,
TEST_PLATFORM)
common.async_see(hass, dev_id='hello-world')
config = await device_tracker.async_load_config(yaml_devices, hass,
timedelta(seconds=0))
assert len(config) == 0
async def test_see_state(hass, yaml_devices):
"""Test device tracker see records state correctly."""
assert await async_setup_component(hass, device_tracker.DOMAIN,
TEST_PLATFORM)
params = {
'mac': 'AA:BB:CC:DD:EE:FF',
'dev_id': 'some_device',
'host_name': 'example.com',
'location_name': 'Work',
'gps': [.3, .8],
'gps_accuracy': 1,
'battery': 100,
'attributes': {
'test': 'test',
'number': 1,
},
}
common.async_see(hass, **params)
await hass.async_block_till_done()
config = await device_tracker.async_load_config(yaml_devices, hass,
timedelta(seconds=0))
assert len(config) == 1
state = hass.states.get('device_tracker.example_com')
attrs = state.attributes
assert state.state == 'Work'
assert state.object_id == 'example_com'
assert state.name == 'example.com'
assert attrs['friendly_name'] == 'example.com'
assert attrs['battery'] == 100
assert attrs['latitude'] == 0.3
assert attrs['longitude'] == 0.8
assert attrs['test'] == 'test'
assert attrs['gps_accuracy'] == 1
assert attrs['source_type'] == 'gps'
assert attrs['number'] == 1
async def test_see_passive_zone_state(hass):
"""Test that the device tracker sets gps for passive trackers."""
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with assert_setup_component(1, zone.DOMAIN):
zone_info = {
'name': 'Home',
'latitude': 1,
'longitude': 2,
'radius': 250,
'passive': False
}
await async_setup_component(hass, zone.DOMAIN, {
'zone': zone_info
})
scanner = get_component(hass, 'device_tracker.test').SCANNER
scanner.reset()
scanner.come_home('dev1')
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=register_time):
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'test',
device_tracker.CONF_CONSIDER_HOME: 59,
}})
await hass.async_block_till_done()
state = hass.states.get('device_tracker.dev1')
attrs = state.attributes
assert STATE_HOME == state.state
assert state.object_id == 'dev1'
assert state.name == 'dev1'
assert attrs.get('friendly_name') == 'dev1'
assert attrs.get('latitude') == 1
assert attrs.get('longitude') == 2
assert attrs.get('gps_accuracy') == 0
assert attrs.get('source_type') == \
device_tracker.SOURCE_TYPE_ROUTER
scanner.leave_home('dev1')
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=scan_time):
async_fire_time_changed(hass, scan_time)
await hass.async_block_till_done()
state = hass.states.get('device_tracker.dev1')
attrs = state.attributes
assert STATE_NOT_HOME == state.state
assert state.object_id == 'dev1'
assert state.name == 'dev1'
assert attrs.get('friendly_name') == 'dev1'
assert attrs.get('latitude')is None
assert attrs.get('longitude')is None
assert attrs.get('gps_accuracy')is None
assert attrs.get('source_type') == \
device_tracker.SOURCE_TYPE_ROUTER
@patch('homeassistant.components.device_tracker._LOGGER.warning')
async def test_see_failures(mock_warning, hass, yaml_devices):
"""Test that the device tracker see failures."""
tracker = device_tracker.DeviceTracker(
hass, timedelta(seconds=60), 0, {}, [])
# MAC is not a string (but added)
await tracker.async_see(mac=567, host_name="Number MAC")
# No device id or MAC(not added)
with pytest.raises(HomeAssistantError):
await tracker.async_see()
assert mock_warning.call_count == 0
# Ignore gps on invalid GPS (both added & warnings)
await tracker.async_see(mac='mac_1_bad_gps', gps=1)
await tracker.async_see(mac='mac_2_bad_gps', gps=[1])
await tracker.async_see(mac='mac_3_bad_gps', gps='gps')
await hass.async_block_till_done()
config = await device_tracker.async_load_config(yaml_devices, hass,
timedelta(seconds=0))
assert mock_warning.call_count == 3
assert len(config) == 4
@asyncio.coroutine
def test_async_added_to_hass(hass):
"""Test restoring state."""
attr = {
device_tracker.ATTR_LONGITUDE: 18,
device_tracker.ATTR_LATITUDE: -33,
device_tracker.ATTR_LATITUDE: -33,
device_tracker.ATTR_SOURCE_TYPE: 'gps',
device_tracker.ATTR_GPS_ACCURACY: 2,
device_tracker.ATTR_BATTERY: 100
}
mock_restore_cache(hass, [State('device_tracker.jk', 'home', attr)])
path = hass.config.path(device_tracker.YAML_DEVICES)
files = {
path: 'jk:\n name: JK Phone\n track: True',
}
with patch_yaml_files(files):
yield from device_tracker.async_setup(hass, {})
state = hass.states.get('device_tracker.jk')
assert state
assert state.state == 'home'
for key, val in attr.items():
atr = state.attributes.get(key)
assert atr == val, "{}={} expected: {}".format(key, atr, val)
@asyncio.coroutine
def test_bad_platform(hass):
"""Test bad platform."""
config = {
'device_tracker': [{
'platform': 'bad_platform'
}]
}
with assert_setup_component(0, device_tracker.DOMAIN):
assert (yield from device_tracker.async_setup(hass, config))
async def test_adding_unknown_device_to_config(mock_device_tracker_conf, hass):
"""Test the adding of unknown devices to configuration file."""
scanner = get_component(hass, 'device_tracker.test').SCANNER
scanner.reset()
scanner.come_home('DEV1')
await async_setup_component(hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}})
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
device = mock_device_tracker_conf[0]
assert device.dev_id == 'dev1'
assert device.track
async def test_picture_and_icon_on_see_discovery(mock_device_tracker_conf,
hass):
"""Test that picture and icon are set in initial see."""
tracker = device_tracker.DeviceTracker(
hass, timedelta(seconds=60), False, {}, [])
await tracker.async_see(dev_id=11, picture='pic_url', icon='mdi:icon')
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].icon == 'mdi:icon'
assert mock_device_tracker_conf[0].entity_picture == 'pic_url'
async def test_default_hide_if_away_is_used(mock_device_tracker_conf, hass):
"""Test that default track_new is used."""
tracker = device_tracker.DeviceTracker(
hass, timedelta(seconds=60), False,
{device_tracker.CONF_AWAY_HIDE: True}, [])
await tracker.async_see(dev_id=12)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].away_hide
async def test_backward_compatibility_for_track_new(mock_device_tracker_conf,
hass):
"""Test backward compatibility for track new."""
tracker = device_tracker.DeviceTracker(
hass, timedelta(seconds=60), False,
{device_tracker.CONF_TRACK_NEW: True}, [])
await tracker.async_see(dev_id=13)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].track is False
async def test_old_style_track_new_is_skipped(mock_device_tracker_conf, hass):
"""Test old style config is skipped."""
tracker = device_tracker.DeviceTracker(
hass, timedelta(seconds=60), None,
{device_tracker.CONF_TRACK_NEW: False}, [])
await tracker.async_see(dev_id=14)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].track is False
def test_see_schema_allowing_ios_calls():
"""Test SEE service schema allows extra keys.
Temp work around because the iOS app sends incorrect data.
"""
device_tracker.SERVICE_SEE_PAYLOAD_SCHEMA({
'dev_id': 'Test',
"battery": 35,
"battery_status": 'Not Charging',
"gps": [10.0, 10.0],
"gps_accuracy": 300,
"hostname": 'beer',
})
|
|
import os
import sys
import getpass
from time import time
import argparse
import textwrap
import json
import io
from insta_logging.insta_logging import insta_logger
__author__ = 'sehlat57'
def load_ignore_list():
"""
Get names of users from file to be excluded from liking list
:return: list of users to be ignored, empty list if file structure is
modified or not found
"""
dir_name = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(dir_name, 'ignore_list.txt')):
with open('ignore_list.txt', 'r') as ignore_list_file:
full_text = ignore_list_file.read()
if full_text.find('Ignore list:') != -1:
start_index = full_text.index('Ignore list:') + len(
'Ignore list:')
list_raw = full_text[start_index:].split(',')
insta_logger.info('Ignore list extracted')
print('Ignore list extracted')
return [account.strip() for account in list_raw]
print('"Ignore list.txt" was edited incorrectly. '
'Can\'t create ignore list.'
' Please see description.')
insta_logger.error('Ignore list file incorrectly edited')
return []
print('No ignore list found')
insta_logger.error('No ignore list found')
return []
class PassAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
values = getpass.getpass()
setattr(namespace, self.dest, values)
def parse_credentials():
"""
Parse arguments:
required:
-login - instagram user name
-password - password for the account
optional:
-number_of_posts - number of posts to like (default: 12)
-ignore_limit - ignore 1000 likes by bot per day limit (default: False)
:return: parsed arguments from command line
"""
parser = argparse.ArgumentParser(
prog='InstaLikeBot',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
--------------------------------------------
Instagram bot for liking posts of followings.
You need to enter your Instagram credentials
and number of post to check for your likes
(default number is 12).
--------------------------------------------
Be warned: there is possibility to be banned
if bot likes to many posts (1000+ per day).
If you want to ignore '1000 likes per day limit'
add --ignore_limit argument
--------------------------------------------
Example:
python3 instabot.py -l ACCOUNT_NAME -p PASSWORD -n 12 -limit False
____________________________________________
'''))
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-l', '--login',
required=True,
help='\nInstagram account name')
required.add_argument('-p', '--password',
action=PassAction,
nargs='?',
dest='password',
help='\nInstagram password',
required=True)
required.add_argument('-n', '--number_of_posts',
type=int,
default=12,
help='\nNumber of posts to check for user likes, '
'default value is 12',)
required.add_argument('--ignore_limit',
action='store_true',
default=False,
help='\nAdd --ignore_limit if you want to '
'ignore limit of 1000 likes per day')
args = parser.parse_args()
if args.number_of_posts <= 0:
parser.error('Number of posts must be greater than 0')
return args
def draw_line_separator():
"""
Draw line in stdout
:return:
"""
print(''.center(50, '-'))
def draw_worker_text(text):
"""
Formatting text
:param text: text to format
:return:
"""
print('@{}@'.format(text).center(50))
def progress_bar(completion, total, start_time, width=20):
"""
Print progress bar in stdout
:param completion: extracted/liked posts to the moment
:param total: total posts
:param start_time: time stamp before first appear of progress bar in stdout
:param width: width of progress bar
:return:
"""
progress = int(completion / total * 100)
completion = completion
total = total
seconds_passed = time() - start_time
time_stamp = '{:02.0f}:{:02.0f}:{:02.0f}'.format(
seconds_passed // 3600, seconds_passed % 3600 // 60,
seconds_passed % 60)
bar = '\x1b[1;30;46m \x1b[0m' * int(
width * completion / total) + ' ' * int(
width - (width * completion / total))
show = ('\r{}%|{}|{}/{}|time passed: {}'.format(
progress if progress < 100 else 100, bar, completion, total,
time_stamp))
sys.stdout.write(show)
sys.stdout.flush()
if completion >= total:
sys.stdout.write('\n')
def write_likes(total_likes):
"""
Write number of likes made by bot and current time stamp to json file:
{"total_likes": "total likes", "timestamp": "timestamp"}
:param total_likes: number of likes
:return:
"""
dir_name = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dir_name, 'likes_count.json'),
'w') as likes_file:
try:
likes = json.load(likes_file)
except io.UnsupportedOperation:
likes = {'total_likes': total_likes, 'timestamp': time()}
likes['total_likes'] = total_likes
likes['timestamp'] = time()
json.dump(likes, likes_file)
insta_logger.info('Likes made by bot is writen to file, '
'total posts liked: {}'.format(total_likes))
def check_today_likes(previous_timestamp):
"""
Read number of likes made by bot from json file,
reset total_likes if bot didn't like posts in 24 hours
:return: number of likes made by bot in 24 hours, 0 if bot didn't like
posts in 24 hours
"""
dir_name = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(dir_name, 'likes_count.json')
if os.path.exists(file_path):
with open(file_path, 'r+') as likes_file:
try:
likes = json.load(likes_file)
likes_today = likes.get('total_likes')
previous_timestamp = previous_timestamp
time_passed = time() - previous_timestamp
if likes_today:
if time_passed <= 24 * 60 * 60:
return likes_today
likes['timestamp'] = time()
likes['total_likes'] = 0
likes_file.seek(0)
likes_file.truncate()
json.dump(likes, likes_file)
return 0
return 0
except io.UnsupportedOperation:
return 0
return 0
def draw_logo():
print("""
_ _ _ _
(_) _ __ ___ | |_ __ _ | |__ ___ | |_
| || '_ \ / __|| __|/ _` || '_ \ / _ \ | __|
| || | | |\__ \| |_| (_| || |_) || (_) || |_
|_||_| |_||___/ \__|\__,_||_.__/ \___/ \__|
""")
|
|
from ietf.doc.models import Document
from ietf.group.models import Group
from ietf.person.models import Person
from ietf.doc.models import State
class RuleManager(object):
codename = ''
description = ''
def __init__(self, value):
self.value = self.get_value(value)
def get_value(self, value):
return value
def get_documents(self):
return Document.objects.none()
def options(self):
return None
def show_value(self):
return self.value
class WgAsociatedRule(RuleManager):
codename = 'wg_asociated'
description = 'All I-Ds associated with a particular WG'
def get_documents(self):
return Document.objects.filter(type='draft', states__slug='active').filter(group__acronym=self.value).distinct()
def options(self):
return [(i.acronym, "%s — %s"%(i.acronym, i.name)) for i in Group.objects.filter(type='wg', state='active').distinct().order_by('acronym')]
def show_value(self):
try:
return Group.objects.get(acronym=self.value).name
except Group.DoesNotExist:
return self.value
class AreaAsociatedRule(RuleManager):
codename = 'area_asociated'
description = 'All I-Ds associated with all WGs in a particular Area'
def get_documents(self):
return Document.objects.filter(type='draft', states__slug='active').filter(group__parent__acronym=self.value, group__parent__type='area').distinct()
def options(self):
return [(i.acronym, "%s — %s"%(i.acronym, i.name)) for i in Group.objects.filter(type='area', state='active').distinct().order_by('name')]
def show_value(self):
try:
return Group.objects.get(acronym=self.value).name
except Group.DoesNotExist:
return self.value
class AdResponsibleRule(RuleManager):
codename = 'ad_responsible'
description = 'All I-Ds with a particular responsible AD'
def get_documents(self):
return Document.objects.filter(type='draft', states__slug='active').filter(ad=self.value).distinct()
def options(self):
return [(i.pk, i.name) for i in Person.objects.filter(role__name='ad',role__group__state='active').distinct().order_by('name')]
def show_value(self):
try:
return Person.objects.get(pk=self.value).name
except Person.DoesNotExist:
return self.value
class AuthorRule(RuleManager):
codename = 'author'
description = 'All I-Ds with a particular author'
def get_documents(self):
return Document.objects.filter(type='draft', states__slug='active').filter(authors__person__name__icontains=self.value).distinct()
class ShepherdRule(RuleManager):
codename = 'shepherd'
description = 'All I-Ds with a particular document shepherd'
def get_documents(self):
return Document.objects.filter(type='draft', states__slug='active').filter(shepherd__person__name__icontains=self.value).distinct()
# class ReferenceToRFCRule(RuleManager):
# codename = 'reference_to_rfc'
# description = 'All I-Ds that have a reference to a particular RFC'
#
# def get_documents(self):
# return Document.objects.filter(type='draft', states__slug='active').filter(relateddocument__target__document__states__slug='rfc', relateddocument__target__name__icontains=self.value).distinct()
#
#
# class ReferenceToIDRule(RuleManager):
# codename = 'reference_to_id'
# description = 'All I-Ds that have a reference to a particular I-D'
#
# def get_documents(self):
# return Document.objects.filter(type='draft', states__slug='active').filter(relateddocument__target__document__type='draft', relateddocument__target__name__icontains=self.value).distinct()
#
#
# class ReferenceFromRFCRule(RuleManager):
# codename = 'reference_from_rfc'
# description = 'All I-Ds that are referenced by a particular RFC'
#
# def get_documents(self):
# return Document.objects.filter(type='draft', states__slug='active').filter(relateddocument__source__states__slug='rfc', relateddocument__source__name__icontains=self.value).distinct()
#
#
#
# class ReferenceFromIDRule(RuleManager):
# codename = 'reference_from_id'
# description = 'All I-Ds that are referenced by a particular I-D'
#
# def get_documents(self):
# return Document.objects.filter(type='draft', states__slug='active').filter(relateddocument__source__type='draft', relateddocument__source__name__icontains=self.value).distinct()
class WithTextRule(RuleManager):
codename = 'with_text'
description = 'All I-Ds that contain a particular text string in the name'
def get_documents(self):
return Document.objects.filter(type='draft', states__slug='active').filter(name__icontains=self.value).distinct()
class IABInState(RuleManager):
codename = 'in_iab_state'
description = 'All I-Ds that are in a particular IAB state'
def get_documents(self):
return Document.objects.filter(states__type='draft-stream-iab', states__slug=self.value).distinct()
def options(self):
return [(i.slug, i.name) for i in State.objects.filter(type='draft-stream-iab').order_by('name')]
def show_value(self):
try:
return State.objects.get(type='draft-stream-iab', slug=self.value).name
except State.DoesNotExist:
return self.value
class IANAInState(RuleManager):
codename = 'in_iana_state'
description = 'All I-Ds that are in a particular IANA state'
def get_documents(self):
return Document.objects.filter(states__type='draft-iana-review', states__slug=self.value).distinct()
def options(self):
return [(i.slug, i.name) for i in State.objects.filter(type='draft-iana-review').order_by('name')]
def show_value(self):
try:
return State.objects.get(type='draft-iana-review', slug=self.value).name
except State.DoesNotExist:
return self.value
class IESGInState(RuleManager):
codename = 'in_iesg_state'
description = 'All I-Ds that are in a particular IESG state'
def get_documents(self):
return Document.objects.filter(states__type='draft-iesg', states__slug=self.value).distinct()
def options(self):
return [(i.slug, i.name) for i in State.objects.filter(type='draft-iesg').order_by('name')]
def show_value(self):
try:
return State.objects.get(type='draft-iesg', slug=self.value).name
except State.DoesNotExist:
return self.value
class IRTFInState(RuleManager):
codename = 'in_irtf_state'
description = 'All I-Ds that are in a particular IRTF state'
def get_documents(self):
return Document.objects.filter(states__type='draft-stream-irtf', states__slug=self.value).distinct()
def options(self):
return [(i.slug, i.name) for i in State.objects.filter(type='draft-stream-irtf').order_by('name')]
def show_value(self):
try:
return State.objects.get(type='draft-stream-irtf', slug=self.value).name
except State.DoesNotExist:
return self.value
class ISEInState(RuleManager):
codename = 'in_ise_state'
description = 'All I-Ds that are in a particular ISE state'
def get_documents(self):
return Document.objects.filter(states__type='draft-stream-ise', states__slug=self.value).distinct()
def options(self):
return [(i.slug, i.name) for i in State.objects.filter(type='draft-stream-ise').order_by('name')]
def show_value(self):
try:
return State.objects.get(type='draft-stream-ise', slug=self.value).name
except State.DoesNotExist:
return self.value
class RfcEditorInState(RuleManager):
codename = 'in_rfcEdit_state'
description = 'All I-Ds that are in a particular RFC Editor state'
def get_documents(self):
return Document.objects.filter(states__type='draft-rfceditor', states__slug=self.value).distinct()
def options(self):
return [(i.slug, i.type_id + ": " + i.name) for i in State.objects.filter(type='draft-rfceditor').order_by('name')]
def show_value(self):
try:
return State.objects.get(type='draft-rfceditor', slug=self.value).name
except State.DoesNotExist:
return self.value
class WGInState(RuleManager):
codename = 'in_wg_state'
description = 'All I-Ds that are in a particular Working Group state'
def get_documents(self):
return Document.objects.filter(states__type='draft-stream-ietf', states__slug=self.value).distinct()
def options(self):
return [(i.slug, i.type_id + ": " + i.name) for i in State.objects.filter(type='draft-stream-ietf').order_by('name')]
def show_value(self):
try:
return State.objects.get(type='draft-stream-ietf', slug=self.value).name
except State.DoesNotExist:
return self.value
class RfcWgAsociatedRule(RuleManager):
codename = 'wg_asociated_rfc'
description = 'All RFCs associated with a particular WG'
def get_documents(self):
return Document.objects.filter(type='draft', states__slug='rfc').filter(group__acronym=self.value).distinct()
def options(self):
return [(i.acronym, "%s — %s"%(i.acronym, i.name)) for i in Group.objects.filter(type='wg').distinct().order_by('acronym')]
def show_value(self):
try:
return Group.objects.get(type='draft', acronym=self.value).name
except Group.DoesNotExist:
return self.value
class RfcAreaAsociatedRule(RuleManager):
codename = 'area_asociated_rfc'
description = 'All RFCs associated with all WGs in a particular Area'
def get_documents(self):
return Document.objects.filter(type='draft', states__slug='rfc').filter(group__parent__acronym=self.value, group__parent__type='area').distinct()
def options(self):
return [(i.acronym, "%s — %s"%(i.acronym, i.name)) for i in Group.objects.filter(type='area').distinct().order_by('name')]
def show_value(self):
try:
return Group.objects.get(type='draft', acronym=self.value).name
except Group.DoesNotExist:
return self.value
class RfcAuthorRule(RuleManager):
codename = 'author_rfc'
description = 'All RFCs with a particular author'
def get_documents(self):
return Document.objects.filter(type='draft', states__slug='rfc').filter(authors__person__name__icontains=self.value).distinct()
TYPES_OF_RULES = [(i.codename, i.description) for i in RuleManager.__subclasses__()]
|
|
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import Series, TimedeltaIndex, timedelta_range
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, Hour
class TestTimedeltaIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
f = lambda x: isinstance(x, TimedeltaIndex)
self.check_ops_properties(TimedeltaIndex._field_ops, f)
self.check_ops_properties(TimedeltaIndex._object_ops, f)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range("1 days 09:00:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range("1 days 18:00:00", freq="-1H", periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range("1 days 09:00:00", freq="H", periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(
[
"1 days 09:00:00",
"1 days 09:00:00",
"1 days 09:00:00",
"1 days 08:00:00",
"1 days 08:00:00",
pd.NaT,
]
)
exp_idx = TimedeltaIndex(["1 days 09:00:00", "1 days 08:00:00"])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(["1 days 09:00:00", "1 days 08:00:00", pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
TimedeltaIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["00:01:00", "00:01:00", "00:02:00"],
["00:01:00", "00:01:00", "00:00:01"],
),
):
assert idx[0] in idx
def test_unknown_attribute(self):
# see gh-9680
tdi = pd.timedelta_range(start=0, periods=10, freq="1s")
ts = pd.Series(np.random.normal(size=10), index=tdi)
assert "foo" not in ts.__dict__.keys()
msg = "'Series' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
ts.foo
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(["1 day", "2 day", "3 day"], freq="D", name="idx")
idx2 = TimedeltaIndex(["1 hour", "2 hour", "3 hour"], freq="H", name="idx")
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, idx[::-1])
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
idx1 = TimedeltaIndex(
["1 hour", "3 hour", "5 hour", "2 hour ", "1 hour"], name="idx1"
)
exp1 = TimedeltaIndex(
["1 hour", "1 hour", "2 hour", "3 hour", "5 hour"], name="idx1"
)
idx2 = TimedeltaIndex(
["1 day", "3 day", "5 day", "2 day", "1 day"], name="idx2"
)
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range("1 day", "31 day", freq="D", name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range("1 day", "31 day", freq="D", name="idx")
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep="last")
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep="last")
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize(
"freq", ["D", "3D", "-3D", "H", "2H", "-2H", "T", "2T", "S", "-3S"]
)
def test_infer_freq(self, freq):
# GH#11018
idx = pd.timedelta_range("1", freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_shift(self):
pass # handled in test_arithmetic.py
def test_repeat(self):
index = pd.timedelta_range("1 days", periods=2, freq="D")
exp = pd.TimedeltaIndex(["1 days", "1 days", "2 days", "2 days"])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = TimedeltaIndex(["1 days", "NaT", "3 days"])
exp = TimedeltaIndex(
[
"1 days",
"1 days",
"1 days",
"NaT",
"NaT",
"NaT",
"3 days",
"3 days",
"3 days",
]
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_nat(self):
assert pd.TimedeltaIndex._na_value is pd.NaT
assert pd.TimedeltaIndex([])._na_value is pd.NaT
idx = pd.TimedeltaIndex(["1 days", "2 days"])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(["1 days", "NaT"])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(["1 days", "2 days", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.TimedeltaIndex(["2 days", "1 days", "NaT"])
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.astype(object).equals(idx2.astype(object))
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
@pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)])
def test_freq_setter(self, values, freq):
# GH 20678
idx = TimedeltaIndex(values)
# can set to an offset, converting from string if necessary
idx.freq = freq
assert idx.freq == freq
assert isinstance(idx.freq, ABCDateOffset)
# can reset to None
idx.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = TimedeltaIndex(["0 days", "2 days", "4 days"])
# setting with an incompatible freq
msg = (
"Inferred frequency 2D from passed values does not conform to "
"passed frequency 5D"
)
with pytest.raises(ValueError, match=msg):
idx.freq = "5D"
# setting with a non-fixed frequency
msg = r"<2 \* BusinessDays> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
idx.freq = "2B"
# setting with non-freq string
with pytest.raises(ValueError, match="Invalid frequency"):
idx.freq = "foo"
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import socket
import ssl
import time
import uuid
import eventlet
import greenlet
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo.config import cfg
from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _ # noqa
from heat.openstack.common import network_utils
from heat.openstack.common.rpc import amqp as rpc_amqp
from heat.openstack.common.rpc import common as rpc_common
from heat.openstack.common import sslutils
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
help='SSL version to use (valid only if SSL enabled). '
'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may '
'be available on some distributions'
),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
help='SSL key file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_certfile',
default='',
help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
help=('SSL certification authority file '
'(valid only if SSL enabled)')),
cfg.StrOpt('rabbit_host',
default='localhost',
help='The RabbitMQ broker address where a single node is used'),
cfg.IntOpt('rabbit_port',
default=5672,
help='The RabbitMQ broker port where a single node is used'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='the RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='the RabbitMQ password',
secret=True),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='the RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='how frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='how long to backoff for between retries when connecting '
'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='maximum retries with trying to connect to RabbitMQ '
'(the default of 0 implies an infinite retry count)'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
'You need to wipe RabbitMQ database when '
'changing this option.'),
]
cfg.CONF.register_opts(kombu_opts)
LOG = rpc_common.LOG
def _get_queue_arguments(conf):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster.
"""
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, channel, callback, tag, **kwargs):
"""Declare a queue on an amqp channel.
'channel' is the amqp channel to use
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
queue name, exchange name, and other kombu options are
passed in here as a dictionary.
"""
self.callback = callback
self.tag = str(tag)
self.kwargs = kwargs
self.queue = None
self.ack_on_error = kwargs.get('ack_on_error', True)
self.reconnect(channel)
def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect."""
self.channel = channel
self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare()
def _callback_handler(self, message, callback):
"""Call callback with deserialized message.
Messages that are processed without exception are ack'ed.
If the message processing generates an exception, it will be
ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
"""
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
except Exception:
if self.ack_on_error:
LOG.exception(_("Failed to process message"
" ... skipping it."))
message.ack()
else:
LOG.exception(_("Failed to process message"
" ... will requeue."))
message.requeue()
else:
message.ack()
def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.iterconsume() iterator will process the messages,
calling the appropriate callback.
If a callback is specified in kwargs, use that. Otherwise,
use the callback passed during __init__()
If kwargs['nowait'] is True, then this call will block until
a message is read.
"""
options = {'consumer_tag': self.tag}
options['nowait'] = kwargs.get('nowait', False)
callback = kwargs.get('callback', self.callback)
if not callback:
raise ValueError("No callback defined")
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
self._callback_handler(message, callback)
self.queue.consume(*args, callback=_callback, **options)
def cancel(self):
"""Cancel the consuming from the queue, if it has started."""
try:
self.queue.cancel(self.tag)
except KeyError as e:
# NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue.
'channel' is the amqp channel to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(DirectConsumer, self).__init__(channel,
callback,
tag,
name=msg_id,
exchange=exchange,
routing_key=msg_id,
**options)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, channel, topic, callback, tag, name=None,
exchange_name=None, **kwargs):
"""Init a 'topic' queue.
:param channel: the amqp channel to use
:param topic: the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param tag: a unique ID for the consumer on the channel
:param name: optional queue name, defaults to topic
:paramtype name: str
Other kombu options may be passed as keyword arguments
"""
# Default options
options = {'durable': conf.amqp_durable_queues,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': conf.amqp_auto_delete,
'exclusive': False}
options.update(kwargs)
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
exchange = kombu.entity.Exchange(name=exchange_name,
type='topic',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(TopicConsumer, self).__init__(channel,
callback,
tag,
name=name or topic,
exchange=exchange,
routing_key=topic,
**options)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue.
'channel' is the amqp channel to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(FanoutConsumer, self).__init__(channel, callback, tag,
name=queue_name,
exchange=exchange,
routing_key=topic,
**options)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.exchange_name = exchange_name
self.routing_key = routing_key
self.kwargs = kwargs
self.reconnect(channel)
def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection."""
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange,
channel=channel,
routing_key=self.routing_key)
def send(self, msg, timeout=None):
"""Send a message."""
if timeout:
#
# AMQP TTL is in milliseconds when set in the header.
#
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
else:
self.producer.publish(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': conf.amqp_durable_queues,
'auto_delete': conf.amqp_auto_delete,
'exclusive': False}
options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(channel,
exchange_name,
topic,
type='topic',
**options)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'."""
def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
self.queue_arguments = _get_queue_arguments(conf)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
def reconnect(self, channel):
super(NotifyPublisher, self).reconnect(channel)
# NOTE(jerdfelt): Normally the consumer would create the queue, but
# we do this to ensure that messages don't get dropped if the
# consumer is started after we do
queue = kombu.entity.Queue(channel=channel,
exchange=self.exchange,
durable=self.durable,
name=self.routing_key,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
queue.declare()
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
self.interval_start = self.conf.rabbit_retry_interval
self.interval_stepping = self.conf.rabbit_retry_backoff
# max retry-interval = 30 seconds
self.interval_max = 30
self.memory_transport = False
if server_params is None:
server_params = {}
# Keys to translate from server_params to kombu params
server_params_to_kombu_params = {'username': 'userid'}
ssl_params = self._fetch_ssl_params()
params_list = []
for adr in self.conf.rabbit_hosts:
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
params = {
'hostname': hostname,
'port': port,
'userid': self.conf.rabbit_userid,
'password': self.conf.rabbit_password,
'virtual_host': self.conf.rabbit_virtual_host,
}
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
params['ssl'] = ssl_params
params_list.append(params)
self.params_list = params_list
self.memory_transport = self.conf.fake_rabbit
self.connection = None
self.reconnect()
def _fetch_ssl_params(self):
"""Handles fetching what ssl params should be used for the connection
(if any).
"""
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.conf.kombu_ssl_version:
ssl_params['ssl_version'] = sslutils.validate_ssl_version(
self.conf.kombu_ssl_version)
if self.conf.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
if self.conf.kombu_ssl_certfile:
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
if self.conf.kombu_ssl_ca_certs:
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
# Return the extended behavior or just have the default behavior
return ssl_params or True
def _connect(self, params):
"""Connect to rabbit. Re-establish any queues that may have
been declared before if we are reconnecting. Exceptions should
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
self.connection.release()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
# it shouldn't be doing any network operations, yet.
self.connection = None
self.connection = kombu.connection.BrokerConnection(**params)
self.connection_errors = self.connection.connection_errors
if self.memory_transport:
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
self.consumer_num = itertools.count(1)
self.connection.connect()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
Will retry up to self.max_retries number of times.
self.max_retries = 0 means to retry forever.
Sleep between tries, starting at self.interval_start
seconds, backing off self.interval_stepping number of seconds
each attempt.
"""
attempt = 0
while True:
params = self.params_list[attempt % len(self.params_list)]
attempt += 1
try:
self._connect(params)
return
except (IOError, self.connection_errors) as e:
pass
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
log_info = {}
log_info['err_str'] = str(e)
log_info['max_retries'] = self.max_retries
log_info.update(params)
if self.max_retries and attempt == self.max_retries:
msg = _('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info
LOG.error(msg)
raise rpc_common.RPCException(msg)
if attempt == 1:
sleep_time = self.interval_start or 1
elif attempt > 1:
sleep_time += self.interval_stepping
if self.interval_max:
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError) as e:
if error_callback:
error_callback(e)
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
if error_callback:
error_callback(e)
self.reconnect()
def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues."""
return self.channel
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
self.consumers = []
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.channel, topic, callback,
self.consumer_num.next())
self.consumers.append(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
info = {'do_consume': True}
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
def _consume():
if info['do_consume']:
queues_head = self.consumers[:-1] # not fanout.
queues_tail = self.consumers[-1] # fanout
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
info['do_consume'] = False
return self.connection.drain_events(timeout=timeout)
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
"""Send to a publisher based on the publisher class."""
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():
publisher = cls(self.conf, self.channel, topic, **kwargs)
publisher.send(msg, timeout)
self.ensure(_error_callback, _publish)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None, ack_on_error=True):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
ack_on_error=ack_on_error,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
self.publisher_send(TopicPublisher, topic, msg, timeout)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
else:
self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
self.declare_topic_consumer(
queue_name=pool_name,
topic=topic,
exchange_name=exchange_name,
callback=callback_wrapper,
ack_on_error=ack_on_error,
)
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations:
"""PacketCapturesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> "_models.PacketCaptureResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureResult"]:
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureResult":
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def _get_status_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureQueryStatusResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
async def begin_get_status(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureQueryStatusResult"]:
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PacketCaptureListResult"]:
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
|
|
from __future__ import absolute_import, division, unicode_literals
from six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
"""Serializes the input token stream using the specified treewalker
:arg input: the token stream to serialize
:arg tree: the treewalker to use
:arg encoding: the encoding to use
:arg serializer_opts: any options to pass to the
:py:class:`html5lib.serializer.HTMLSerializer` that gets created
:returns: the tree serialized as a string
Example:
>>> from html5lib.html5parser import parse
>>> from html5lib.serializer import serialize
>>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
>>> serialize(token_stream, omit_optional_tags=False)
'<html><head></head><body><p>Hi!</p></body></html>'
"""
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer
:arg inject_meta_charset: Whether or not to inject the meta charset.
Defaults to ``True``.
:arg quote_attr_values: Whether to quote attribute values that don't
require quoting per legacy browser behavior (``"legacy"``), when
required by the standard (``"spec"``), or always (``"always"``).
Defaults to ``"legacy"``.
:arg quote_char: Use given quote character for attribute quoting.
Defaults to ``"`` which will use double quotes unless attribute
value contains a double quote, in which case single quotes are
used.
:arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute
values.
Defaults to ``False``.
:arg escape_rcdata: Whether to escape characters that need to be
escaped within normal elements within rcdata elements such as
style.
Defaults to ``False``.
:arg resolve_entities: Whether to resolve named character entities that
appear in the source tree. The XML predefined entities < >
& " ' are unaffected by this setting.
Defaults to ``True``.
:arg strip_whitespace: Whether to remove semantically meaningless
whitespace. (This compresses all whitespace to a single space
except within ``pre``.)
Defaults to ``False``.
:arg minimize_boolean_attributes: Shortens boolean attributes to give
just the attribute value, for example::
<input disabled="disabled">
becomes::
<input disabled>
Defaults to ``True``.
:arg use_trailing_solidus: Includes a close-tag slash at the end of the
start tag of void elements (empty elements whose end tag is
forbidden). E.g. ``<hr/>``.
Defaults to ``False``.
:arg space_before_trailing_solidus: Places a space immediately before
the closing slash in a tag using a trailing solidus. E.g.
``<hr />``. Requires ``use_trailing_solidus=True``.
Defaults to ``True``.
:arg sanitize: Strip all unsafe or unknown constructs from output.
See :py:class:`html5lib.filters.sanitizer.Filter`.
Defaults to ``False``.
:arg omit_optional_tags: Omit start/end tags that are optional.
Defaults to ``True``.
:arg alphabetical_attributes: Reorder attributes to be in alphabetical order.
Defaults to ``False``.
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
"""Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
"""
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
"""Error in serialized tree"""
pass
|
|
#!/usr/bin/env python
"""Service registry for apitools."""
import collections
import logging
import re
import textwrap
from apitools.base.py import base_api
from apitools.gen import util
# We're a code generator. I don't care.
# pylint:disable=too-many-statements
_MIME_PATTERN_RE = re.compile(r'(?i)[a-z0-9_*-]+/[a-z0-9_*-]+')
class ServiceRegistry(object):
"""Registry for service types."""
def __init__(self, client_info, message_registry, command_registry,
base_url, base_path, names,
root_package_dir, base_files_package,
unelidable_request_methods):
self.__client_info = client_info
self.__package = client_info.package
self.__names = names
self.__service_method_info_map = collections.OrderedDict()
self.__message_registry = message_registry
self.__command_registry = command_registry
self.__base_url = base_url
self.__base_path = base_path
self.__root_package_dir = root_package_dir
self.__base_files_package = base_files_package
self.__unelidable_request_methods = unelidable_request_methods
self.__all_scopes = set(self.__client_info.scopes)
def Validate(self):
self.__message_registry.Validate()
@property
def scopes(self):
return sorted(list(self.__all_scopes))
def __GetServiceClassName(self, service_name):
return self.__names.ClassName(
'%sService' % self.__names.ClassName(service_name))
def __PrintDocstring(self, printer, method_info, method_name, name):
"""Print a docstring for a service method."""
if method_info.description:
description = util.CleanDescription(method_info.description)
first_line, newline, remaining = method_info.description.partition(
'\n')
if not first_line.endswith('.'):
first_line = '%s.' % first_line
description = '%s%s%s' % (first_line, newline, remaining)
else:
description = '%s method for the %s service.' % (method_name, name)
with printer.CommentContext():
printer('"""%s' % description)
printer()
printer('Args:')
printer(' request: (%s) input message', method_info.request_type_name)
printer(' global_params: (StandardQueryParameters, default: None) '
'global arguments')
if method_info.upload_config:
printer(' upload: (Upload, default: None) If present, upload')
printer(' this stream with the request.')
if method_info.supports_download:
printer(
' download: (Download, default: None) If present, download')
printer(' data from the request via this stream.')
printer('Returns:')
printer(' (%s) The response message.', method_info.response_type_name)
printer('"""')
def __WriteSingleService(
self, printer, name, method_info_map, client_class_name):
printer()
class_name = self.__GetServiceClassName(name)
printer('class %s(base_api.BaseApiService):', class_name)
with printer.Indent():
printer('"""Service class for the %s resource."""', name)
printer()
printer('_NAME = %s', repr(name))
# Print the configs for the methods first.
printer()
printer('def __init__(self, client):')
with printer.Indent():
printer('super(%s.%s, self).__init__(client)',
client_class_name, class_name)
printer('self._method_configs = {')
with printer.Indent(indent=' '):
for method_name, method_info in method_info_map.items():
printer("'%s': base_api.ApiMethodInfo(", method_name)
with printer.Indent(indent=' '):
attrs = sorted(
x.name for x in method_info.all_fields())
for attr in attrs:
if attr in ('upload_config', 'description'):
continue
printer(
'%s=%r,', attr, getattr(method_info, attr))
printer('),')
printer('}')
printer()
printer('self._upload_configs = {')
with printer.Indent(indent=' '):
for method_name, method_info in method_info_map.items():
upload_config = method_info.upload_config
if upload_config is not None:
printer(
"'%s': base_api.ApiUploadInfo(", method_name)
with printer.Indent(indent=' '):
attrs = sorted(
x.name for x in upload_config.all_fields())
for attr in attrs:
printer('%s=%r,',
attr, getattr(upload_config, attr))
printer('),')
printer('}')
# Now write each method in turn.
for method_name, method_info in method_info_map.items():
printer()
params = ['self', 'request', 'global_params=None']
if method_info.upload_config:
params.append('upload=None')
if method_info.supports_download:
params.append('download=None')
printer('def %s(%s):', method_name, ', '.join(params))
with printer.Indent():
self.__PrintDocstring(
printer, method_info, method_name, name)
printer("config = self.GetMethodConfig('%s')", method_name)
upload_config = method_info.upload_config
if upload_config is not None:
printer("upload_config = self.GetUploadConfig('%s')",
method_name)
arg_lines = [
'config, request, global_params=global_params']
if method_info.upload_config:
arg_lines.append(
'upload=upload, upload_config=upload_config')
if method_info.supports_download:
arg_lines.append('download=download')
printer('return self._RunMethod(')
with printer.Indent(indent=' '):
for line in arg_lines[:-1]:
printer('%s,', line)
printer('%s)', arg_lines[-1])
def __WriteProtoServiceDeclaration(self, printer, name, method_info_map):
"""Write a single service declaration to a proto file."""
printer()
printer('service %s {', self.__GetServiceClassName(name))
with printer.Indent():
for method_name, method_info in method_info_map.items():
for line in textwrap.wrap(method_info.description,
printer.CalculateWidth() - 3):
printer('// %s', line)
printer('rpc %s (%s) returns (%s);',
method_name,
method_info.request_type_name,
method_info.response_type_name)
printer('}')
def WriteProtoFile(self, printer):
"""Write the services in this registry to out as proto."""
self.Validate()
client_info = self.__client_info
printer('// Generated services for %s version %s.',
client_info.package, client_info.version)
printer()
printer('syntax = "proto2";')
printer('package %s;', self.__package)
printer('import "%s";', client_info.messages_proto_file_name)
printer()
for name, method_info_map in self.__service_method_info_map.items():
self.__WriteProtoServiceDeclaration(printer, name, method_info_map)
def WriteFile(self, printer):
"""Write the services in this registry to out."""
self.Validate()
client_info = self.__client_info
printer('"""Generated client library for %s version %s."""',
client_info.package, client_info.version)
printer('# NOTE: This file is autogenerated and should not be edited '
'by hand.')
printer('from %s import base_api', self.__base_files_package)
import_prefix = ''
printer('%simport %s as messages', import_prefix,
client_info.messages_rule_name)
printer()
printer()
printer('class %s(base_api.BaseApiClient):',
client_info.client_class_name)
with printer.Indent():
printer(
'"""Generated client library for service %s version %s."""',
client_info.package, client_info.version)
printer()
printer('MESSAGES_MODULE = messages')
printer()
client_info_items = client_info._asdict(
).items() # pylint:disable=protected-access
for attr, val in client_info_items:
if attr == 'scopes' and not val:
val = ['https://www.googleapis.com/auth/userinfo.email']
printer('_%s = %r' % (attr.upper(), val))
printer()
printer("def __init__(self, url='', credentials=None,")
with printer.Indent(indent=' '):
printer('get_credentials=True, http=None, model=None,')
printer('log_request=False, log_response=False,')
printer('credentials_args=None, default_global_params=None,')
printer('additional_http_headers=None):')
with printer.Indent():
printer('"""Create a new %s handle."""', client_info.package)
printer('url = url or %r', self.__base_url)
printer(
'super(%s, self).__init__(', client_info.client_class_name)
printer(' url, credentials=credentials,')
printer(' get_credentials=get_credentials, http=http, '
'model=model,')
printer(' log_request=log_request, '
'log_response=log_response,')
printer(' credentials_args=credentials_args,')
printer(' default_global_params=default_global_params,')
printer(' additional_http_headers=additional_http_headers)')
for name in self.__service_method_info_map.keys():
printer('self.%s = self.%s(self)',
name, self.__GetServiceClassName(name))
for name, method_info in self.__service_method_info_map.items():
self.__WriteSingleService(
printer, name, method_info, client_info.client_class_name)
def __RegisterService(self, service_name, method_info_map):
if service_name in self.__service_method_info_map:
raise ValueError(
'Attempt to re-register descriptor %s' % service_name)
self.__service_method_info_map[service_name] = method_info_map
def __CreateRequestType(self, method_description, body_type=None):
"""Create a request type for this method."""
schema = {}
schema['id'] = self.__names.ClassName('%sRequest' % (
self.__names.ClassName(method_description['id'], separator='.'),))
schema['type'] = 'object'
schema['properties'] = collections.OrderedDict()
if 'parameterOrder' not in method_description:
ordered_parameters = list(method_description.get('parameters', []))
else:
ordered_parameters = method_description['parameterOrder'][:]
for k in method_description['parameters']:
if k not in ordered_parameters:
ordered_parameters.append(k)
for parameter_name in ordered_parameters:
field_name = self.__names.CleanName(parameter_name)
field = dict(method_description['parameters'][parameter_name])
if 'type' not in field:
raise ValueError('No type found in parameter %s' % field)
schema['properties'][field_name] = field
if body_type is not None:
body_field_name = self.__GetRequestField(
method_description, body_type)
if body_field_name in schema['properties']:
raise ValueError('Failed to normalize request resource name')
if 'description' not in body_type:
body_type['description'] = (
'A %s resource to be passed as the request body.' % (
self.__GetRequestType(body_type),))
schema['properties'][body_field_name] = body_type
self.__message_registry.AddDescriptorFromSchema(schema['id'], schema)
return schema['id']
def __CreateVoidResponseType(self, method_description):
"""Create an empty response type."""
schema = {}
method_name = self.__names.ClassName(
method_description['id'], separator='.')
schema['id'] = self.__names.ClassName('%sResponse' % method_name)
schema['type'] = 'object'
schema['description'] = 'An empty %s response.' % method_name
self.__message_registry.AddDescriptorFromSchema(schema['id'], schema)
return schema['id']
def __NeedRequestType(self, method_description, request_type):
"""Determine if this method needs a new request type created."""
if not request_type:
return True
method_id = method_description.get('id', '')
if method_id in self.__unelidable_request_methods:
return True
message = self.__message_registry.LookupDescriptorOrDie(request_type)
if message is None:
return True
field_names = [x.name for x in message.fields]
parameters = method_description.get('parameters', {})
for param_name, param_info in parameters.items():
if (param_info.get('location') != 'path' or
self.__names.CleanName(param_name) not in field_names):
break
else:
return False
return True
def __MaxSizeToInt(self, max_size):
"""Convert max_size to an int."""
size_groups = re.match(r'(?P<size>\d+)(?P<unit>.B)?$', max_size)
if size_groups is None:
raise ValueError('Could not parse maxSize')
size, unit = size_groups.group('size', 'unit')
shift = 0
if unit is not None:
unit_dict = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
shift = unit_dict.get(unit.upper())
if shift is None:
raise ValueError('Unknown unit %s' % unit)
return int(size) * (1 << shift)
def __ComputeUploadConfig(self, media_upload_config, method_id):
"""Fill out the upload config for this method."""
config = base_api.ApiUploadInfo()
if 'maxSize' in media_upload_config:
config.max_size = self.__MaxSizeToInt(
media_upload_config['maxSize'])
if 'accept' not in media_upload_config:
logging.warn(
'No accept types found for upload configuration in '
'method %s, using */*', method_id)
config.accept.extend([
str(a) for a in media_upload_config.get('accept', '*/*')])
for accept_pattern in config.accept:
if not _MIME_PATTERN_RE.match(accept_pattern):
logging.warn('Unexpected MIME type: %s', accept_pattern)
protocols = media_upload_config.get('protocols', {})
for protocol in ('simple', 'resumable'):
media = protocols.get(protocol, {})
for attr in ('multipart', 'path'):
if attr in media:
setattr(config, '%s_%s' % (protocol, attr), media[attr])
return config
def __ComputeMethodInfo(self, method_description, request, response,
request_field):
"""Compute the base_api.ApiMethodInfo for this method."""
relative_path = self.__names.NormalizeRelativePath(
''.join((self.__base_path, method_description['path'])))
method_id = method_description['id']
ordered_params = []
for param_name in method_description.get('parameterOrder', []):
param_info = method_description['parameters'][param_name]
if param_info.get('required', False):
ordered_params.append(param_name)
method_info = base_api.ApiMethodInfo(
relative_path=relative_path,
method_id=method_id,
http_method=method_description['httpMethod'],
description=util.CleanDescription(
method_description.get('description', '')),
query_params=[],
path_params=[],
ordered_params=ordered_params,
request_type_name=self.__names.ClassName(request),
response_type_name=self.__names.ClassName(response),
request_field=request_field,
)
if method_description.get('supportsMediaUpload', False):
method_info.upload_config = self.__ComputeUploadConfig(
method_description.get('mediaUpload'), method_id)
method_info.supports_download = method_description.get(
'supportsMediaDownload', False)
self.__all_scopes.update(method_description.get('scopes', ()))
for param, desc in method_description.get('parameters', {}).items():
param = self.__names.CleanName(param)
location = desc['location']
if location == 'query':
method_info.query_params.append(param)
elif location == 'path':
method_info.path_params.append(param)
else:
raise ValueError(
'Unknown parameter location %s for parameter %s' % (
location, param))
method_info.path_params.sort()
method_info.query_params.sort()
return method_info
def __BodyFieldName(self, body_type):
if body_type is None:
return ''
return self.__names.FieldName(body_type['$ref'])
def __GetRequestType(self, body_type):
return self.__names.ClassName(body_type.get('$ref'))
def __GetRequestField(self, method_description, body_type):
"""Determine the request field for this method."""
body_field_name = self.__BodyFieldName(body_type)
if body_field_name in method_description.get('parameters', {}):
body_field_name = self.__names.FieldName(
'%s_resource' % body_field_name)
# It's exceedingly unlikely that we'd get two name collisions, which
# means it's bound to happen at some point.
while body_field_name in method_description.get('parameters', {}):
body_field_name = self.__names.FieldName(
'%s_body' % body_field_name)
return body_field_name
def AddServiceFromResource(self, service_name, methods):
"""Add a new service named service_name with the given methods."""
method_descriptions = methods.get('methods', {})
method_info_map = collections.OrderedDict()
items = sorted(method_descriptions.items())
for method_name, method_description in items:
method_name = self.__names.MethodName(method_name)
# NOTE: According to the discovery document, if the request or
# response is present, it will simply contain a `$ref`.
body_type = method_description.get('request')
if body_type is None:
request_type = None
else:
request_type = self.__GetRequestType(body_type)
if self.__NeedRequestType(method_description, request_type):
request = self.__CreateRequestType(
method_description, body_type=body_type)
request_field = self.__GetRequestField(
method_description, body_type)
else:
request = request_type
request_field = base_api.REQUEST_IS_BODY
if 'response' in method_description:
response = method_description['response']['$ref']
else:
response = self.__CreateVoidResponseType(method_description)
method_info_map[method_name] = self.__ComputeMethodInfo(
method_description, request, response, request_field)
self.__command_registry.AddCommandForMethod(
service_name, method_name, method_info_map[method_name],
request, response)
nested_services = methods.get('resources', {})
services = sorted(nested_services.items())
for subservice_name, submethods in services:
new_service_name = '%s_%s' % (service_name, subservice_name)
self.AddServiceFromResource(new_service_name, submethods)
self.__RegisterService(service_name, method_info_map)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_DATA_TYPES = [dtypes.half, dtypes.float32, dtypes.float64]
# TODO(b/143684500): Eigen to support complex sqrt
if (not test_util.IsBuiltWithNvcc() and not test.is_built_with_rocm()):
_DATA_TYPES += [dtypes.complex64, dtypes.complex128]
_TEST_PARAM_VALUES = [
# learning_rate, rho, momentum, epsilon, centered
[0.05, 0.9, 0.0, 1e-3, True],
[0.05, 0.9, 0.0, 1e-3, False],
[0.1, 0.9, 0.0, 1e-3, True],
[0.01, 0.9, 0.0, 1e-5, True],
[0.01, 0.9, 0.9, 1e-5, True],
]
_TESTPARAMS = [
[data_type] + values
for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES)
]
class RMSpropOptimizerTest(test.TestCase):
def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum,
epsilon, centered):
rms_t = rms * rho + (1 - rho) * g * g
if centered:
mg_t = mg * rho + (1 - rho) * g
denom_t = rms_t - mg_t * mg_t
else:
mg_t = mg
denom_t = rms_t
if momentum > 0.:
mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon))
var_t = var - mom_t
else:
mom_t = mom
var_t = var - lr * g / (np.sqrt(denom_t) + epsilon)
return var_t, mg_t, rms_t, mom_t
def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,
lr, rho, momentum, epsilon, centered):
mg_t = copy.deepcopy(mg)
rms_t = copy.deepcopy(rms)
mom_t = copy.deepcopy(mom)
var_t = copy.deepcopy(var)
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue
if centered:
mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue
denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex]
else:
denom_t = rms_t[gindex]
if momentum > 0.:
mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t +
epsilon)
var_t[gindex] = var[gindex] - mom_t[gindex]
else:
mom_t[gindex] = mom[gindex]
var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon)
return var_t, mg_t, rms_t, mom_t
@test_util.run_deprecated_v1
def testDense(self):
for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
with test_util.use_gpu():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np, dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(var1_np, dtype=dtype)
grads0 = constant_op.constant(grads0_np, dtype=dtype)
grads1 = constant_op.constant(grads1_np, dtype=dtype)
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
if centered:
mg0 = opt.get_slot(var0, "mg")
mg1 = opt.get_slot(var1, "mg")
else:
mg0 = None
mg1 = None
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSprop
for _ in range(1, 4):
self.evaluate(update)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho,
momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho,
momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testDenseWithLearningRateDecay(self):
var0_np = np.array([1.0, 2.0])
grads0_np = np.array([0.1, 0.2])
var1_np = np.array([3.0, 4.0])
grads1_np = np.array([0.01, 0.2])
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.01
rho = 0.9
momentum = 0.0
epsilon = 1e-7
centered = False
decay = 0.5
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0])
mg1_np = np.array([0.0, 0.0])
rms0_np = np.array([0.0, 0.0])
rms1_np = np.array([0.0, 0.0])
mom0_np = np.array([0.0, 0.0])
mom1_np = np.array([0.0, 0.0])
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSprop
for t in range(2):
self.evaluate(update)
lr = learning_rate / (1 + decay * t)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,
epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,
epsilon, centered)
# Validate updated params
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testDenseWithLearningRateInverseTimeDecay(self):
var0_np = np.array([1.0, 2.0])
grads0_np = np.array([0.1, 0.2])
var1_np = np.array([3.0, 4.0])
grads1_np = np.array([0.01, 0.2])
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.01
rho = 0.9
momentum = 0.0
epsilon = 1e-7
centered = False
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
opt = rmsprop.RMSprop(
learning_rate=lr_schedule,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0])
mg1_np = np.array([0.0, 0.0])
rms0_np = np.array([0.0, 0.0])
rms1_np = np.array([0.0, 0.0])
mom0_np = np.array([0.0, 0.0])
mom1_np = np.array([0.0, 0.0])
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSprop
for t in range(2):
self.evaluate(update)
lr = learning_rate / (1 + decay * t)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,
epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,
epsilon, centered)
# Validate updated params
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in _DATA_TYPES:
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = rmsprop.RMSprop(
learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0,
centered=False).minimize(
loss, var_list=[var0])
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[0., 1.]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariableCentered(self):
for dtype in _DATA_TYPES:
if test_util.is_xla_enabled() and dtype.is_complex:
self.skipTest("b/143578550")
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
# loss = lambda: pred * pred # pylint: disable=cell-var-from-loop
sgd_op = rmsprop.RMSprop(
learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0,
centered=True).minimize(
loss, var_list=[var0])
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testSparse(self):
for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
with test_util.use_gpu():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([1]))
grads1_np_indices = np.array([1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([1]))
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
if centered:
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
else:
mg0 = None
mg1 = None
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSprop
for _ in range(1, 4):
self.evaluate(update)
var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(
var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,
learning_rate, rho, momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(
var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,
learning_rate, rho, momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testCallableParams(self):
with context.eager_mode():
for dtype in _DATA_TYPES:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
rho = lambda: 0.9
momentum = lambda: 0.0
epsilon = 1.0
opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0))
]), self.evaluate(var1))
# Step 2: the root mean square accumulators contain the previous update.
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0))
]), self.evaluate(var1))
def testConstructRMSpropWithLR(self):
opt = rmsprop.RMSprop(lr=1.0)
opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0)
opt_3 = rmsprop.RMSprop(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = variables.Variable(1.)
v2 = variables.Variable(1.)
opt = rmsprop.RMSprop(1., momentum=0., centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and one unique slot variable for v1 and v2.
self.assertEqual(3, len(set({id(v) for v in opt.variables()})))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(5, len(set({id(v) for v in opt.variables()})))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and three unique slot variables for v1 and v2
self.assertEqual(7, len(set({id(v) for v in opt.variables()})))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
class SlotColocationTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters([True, False])
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testRunMinimizeOnGPUForCPUVariables(self, use_resource):
with ops.device("/device:CPU:0"):
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0],
dtype=dtypes.float32)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
var1 = variables.Variable([3.0, 4.0], dtype=dtypes.float32)
def loss():
return 5 * var0 + 3 * var1
opt = rmsprop.RMSprop(
learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0)
# Fetch params to validate initial values
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step through optimizer on GPU.
# Slot variables are created the first time optimizer is used on some
# variable. This tests that slot variables will be colocated with the base
# variable.
with ops.device("/device:GPU:0"):
# Note that for eager execution, minimize expects a function instead of a
# Tensor.
opt_op = opt.minimize(loss, [var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params, All variables should have decreased.
self.assertTrue(all(v < 0.0 for v in self.evaluate(var0)),
msg="updated variables: %s" % self.evaluate(var0))
self.assertTrue(all(v < 2.0 for v in self.evaluate(var1)),
msg="updated variables: %s" % self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
|
from .. import VEXObject
# IRStmt heirarchy
class IRStmt(VEXObject):
def __init__(self, c_stmt, irsb):
VEXObject.__init__(self)
self.arch = irsb.arch
#self.c_stmt = c_stmt
self.tag = ints_to_enums[c_stmt.tag]
def pp(self):
print self.__str__()
@property
def expressions(self):
expressions = [ ]
for _,v in self.__dict__.iteritems():
if isinstance(v, IRExpr):
expressions.append(v)
expressions.extend(v.child_expressions)
return expressions
@property
def constants(self):
return sum((e.constants for e in self.expressions), [ ])
@staticmethod
def _translate(c_stmt, irsb):
if c_stmt[0] == ffi.NULL:
return None
tag = c_stmt.tag
try:
stmt_class = _tag_to_class[tag]
except KeyError:
raise PyVEXError('Unknown/unsupported IRStmtTag %s\n' % ints_to_enums[tag])
return stmt_class(c_stmt, irsb)
class NoOp(IRStmt):
def __init__(self, c_stmt, irsb): #pylint:disable=unused-argument
IRStmt.__init__(self, c_stmt, irsb)
def __str__(self):
return "IR-NoOp"
class IMark(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.addr = c_stmt.Ist.IMark.addr
self.len = c_stmt.Ist.IMark.len
self.delta = c_stmt.Ist.IMark.delta
def __str__(self):
return "------ IMark(0x%x, %d, %d) ------" % (self.addr, self.len, self.delta)
class AbiHint(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.base = IRExpr._translate(c_stmt.Ist.AbiHint.base, irsb)
self.len = c_stmt.Ist.AbiHint.len
self.nia = IRExpr._translate(c_stmt.Ist.AbiHint.nia, irsb)
def __str__(self):
return "====== AbiHint(0x%s, %d, %s) ======" % (self.base, self.len, self.nia)
class Put(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.data = IRExpr._translate(c_stmt.Ist.Put.data, irsb)
self.offset = c_stmt.Ist.Put.offset
def __str__(self):
return "PUT(%s) = %s" % (self.arch.translate_register_name(self.offset), self.data)
class PutI(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.descr = IRRegArray(c_stmt.Ist.PutI.details.descr)
self.ix = IRExpr._translate(c_stmt.Ist.PutI.details.ix, irsb)
self.data = IRExpr._translate(c_stmt.Ist.PutI.details.data, irsb)
self.bias = c_stmt.Ist.PutI.details.bias
def __str__(self):
return "PutI(%s)[%s,%d] = %s" % (self.descr, self.ix, self.bias, self.data)
class WrTmp(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.data = IRExpr._translate(c_stmt.Ist.WrTmp.data, irsb)
self.tmp = c_stmt.Ist.WrTmp.tmp
def __str__(self):
return "t%d = %s" % (self.tmp, self.data)
class Store(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.addr = IRExpr._translate(c_stmt.Ist.Store.addr, irsb)
self.data = IRExpr._translate(c_stmt.Ist.Store.data, irsb)
self.end = ints_to_enums[c_stmt.Ist.Store.end]
@property
def endness(self):
return self.end
def __str__(self):
return "ST%s(%s) = %s" % (self.endness[-2:].lower(), self.addr, self.data)
class CAS(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.addr = IRExpr._translate(c_stmt.Ist.CAS.details.addr, irsb)
self.dataLo = IRExpr._translate(c_stmt.Ist.CAS.details.dataLo, irsb)
self.dataHi = IRExpr._translate(c_stmt.Ist.CAS.details.dataHi, irsb)
self.expdLo = IRExpr._translate(c_stmt.Ist.CAS.details.expdLo, irsb)
self.expdHi = IRExpr._translate(c_stmt.Ist.CAS.details.expdHi, irsb)
self.oldLo = c_stmt.Ist.CAS.details.oldLo
self.oldHi = c_stmt.Ist.CAS.details.oldHi
self.end = ints_to_enums[c_stmt.Ist.CAS.details.end]
@property
def endness(self):
return self.end
def __str__(self):
return "t(%s,%s) = CAS%s(%s :: (%s,%s)->(%s,%s))" % (self.oldLo, self.oldHi, self.end[-2:].lower(), self.addr, self.expdLo, self.expdHi, self.dataLo, self.dataHi)
class LLSC(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.addr = IRExpr._translate(c_stmt.Ist.LLSC.addr, irsb)
self.storedata = IRExpr._translate(c_stmt.Ist.LLSC.storedata, irsb)
self.result = c_stmt.Ist.LLSC.result
self.end = ints_to_enums[c_stmt.Ist.LLSC.end]
@property
def endness(self):
return self.end
def __str__(self):
if self.storedata is None:
return "result = LD%s-Linked(%s)" % (self.end[-2:].lower(), self.addr)
else:
return "result = ( ST%s-Cond(%s) = %s )" % (self.end[-2:].lower(), self.addr, self.storedata)
class MBE(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.event = ints_to_enums[c_stmt.Ist.MBE.event]
def __str__(self):
return "MBusEvent-" + self.event
class Dirty(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.cee = IRCallee(c_stmt.Ist.Dirty.details.cee)
self.guard = IRExpr._translate(c_stmt.Ist.Dirty.details.guard, irsb)
self.tmp = c_stmt.Ist.Dirty.details.tmp
self.mFx = ints_to_enums[c_stmt.Ist.Dirty.details.mFx]
self.mAddr = IRExpr._translate(c_stmt.Ist.Dirty.details.mAddr, irsb)
self.mSize = c_stmt.Ist.Dirty.details.mSize
self.nFxState = c_stmt.Ist.Dirty.details.nFxState
self.args = [ ]
for i in range(20):
a = c_stmt.Ist.Dirty.details.args[i]
if a == ffi.NULL:
break
self.args.append(IRExpr._translate(a, irsb))
self.args = tuple(self.args)
def __str__(self):
return "t%s = DIRTY %s %s ::: %s(%s)" % (self.tmp, self.guard, "TODO(effects)", self.cee, ','.join(str(a) for a in self.args))
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [ ])
expressions.extend(self.args)
expressions.append(self.guard)
expressions.extend(self.guard.child_expressions)
return expressions
class Exit(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.guard = IRExpr._translate(c_stmt.Ist.Exit.guard, irsb)
self.dst = IRConst._translate(c_stmt.Ist.Exit.dst)
self.offsIP = c_stmt.Ist.Exit.offsIP
self.jk = ints_to_enums[c_stmt.Ist.Exit.jk]
@property
def jumpkind(self):
return self.jk
def __str__(self):
return "if (%s) { PUT(%s) = %s; %s }" % (self.guard, self.arch.translate_register_name(self.offsIP), hex(self.dst.value), self.jumpkind)
@property
def child_expressions(self):
return [self.guard, self.dst] + self.guard.child_expressions
class LoadG(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.addr = IRExpr._translate(c_stmt.Ist.LoadG.details.addr, irsb)
self.alt = IRExpr._translate(c_stmt.Ist.LoadG.details.alt, irsb)
self.guard = IRExpr._translate(c_stmt.Ist.LoadG.details.guard, irsb)
self.dst = c_stmt.Ist.LoadG.details.dst
self.cvt = ints_to_enums[c_stmt.Ist.LoadG.details.cvt]
self.end = ints_to_enums[c_stmt.Ist.LoadG.details.end]
type_in = ffi.new('IRType *')
type_out = ffi.new('IRType *')
pvc.typeOfIRLoadGOp(c_stmt.Ist.LoadG.details.cvt, type_out, type_in)
type_in = ffi.cast('int *', type_in)[0]
type_out = ffi.cast('int *', type_out)[0]
self.cvt_types = (ints_to_enums[type_in], ints_to_enums[type_out])
@property
def endness(self):
return self.end
def __str__(self):
return "t%d = if (%s) %s(LD%s(%s)) else %s" % (self.dst, self.guard, self.cvt, self.end[-2:].lower(), self.addr, self.alt)
class StoreG(IRStmt):
def __init__(self, c_stmt, irsb):
IRStmt.__init__(self, c_stmt, irsb)
self.addr = IRExpr._translate(c_stmt.Ist.StoreG.details.addr, irsb)
self.data = IRExpr._translate(c_stmt.Ist.StoreG.details.data, irsb)
self.guard = IRExpr._translate(c_stmt.Ist.StoreG.details.guard, irsb)
self.end = ints_to_enums[c_stmt.Ist.StoreG.details.end]
@property
def endness(self):
return self.end
def __str__(self):
return "if (%s) ST%s(%s) = %s" % (self.guard, self.end[-2:].lower(), self.addr, self.data)
from ..IRExpr import IRExpr
from ..IRConst import IRConst
from .. import IRRegArray, ints_to_enums, enums_to_ints, IRCallee, ffi, pvc, PyVEXError
_tag_to_class = {
enums_to_ints['Ist_NoOp']: NoOp,
enums_to_ints['Ist_IMark']: IMark,
enums_to_ints['Ist_AbiHint']: AbiHint,
enums_to_ints['Ist_Put']: Put,
enums_to_ints['Ist_PutI']: PutI,
enums_to_ints['Ist_WrTmp']: WrTmp,
enums_to_ints['Ist_Store']: Store,
enums_to_ints['Ist_LoadG']: LoadG,
enums_to_ints['Ist_StoreG']: StoreG,
enums_to_ints['Ist_CAS']: CAS,
enums_to_ints['Ist_LLSC']: LLSC,
enums_to_ints['Ist_Dirty']: Dirty,
enums_to_ints['Ist_MBE']: MBE,
enums_to_ints['Ist_Exit']: Exit,
}
|
|
#!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Stage the Chromium checkout to update CTS test version."""
import contextlib
import json
import os
import re
import sys
import tempfile
import threading
import urllib
import zipfile
sys.path.append(
os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, 'third_party',
'catapult', 'devil'))
from devil.utils import cmd_helper
sys.path.append(
os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, 'third_party',
'catapult', 'common', 'py_utils'))
from py_utils import tempfile_ext
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
TOOLS_DIR = os.path.join('android_webview', 'tools')
CONFIG_FILE = os.path.join('cts_config', 'webview_cts_gcs_path.json')
CONFIG_PATH = os.path.join(SRC_DIR, TOOLS_DIR, CONFIG_FILE)
CIPD_FILE = os.path.join('cts_archive', 'cipd.yaml')
CIPD_PATH = os.path.join(SRC_DIR, TOOLS_DIR, CIPD_FILE)
DEPS_FILE = 'DEPS'
TEST_SUITES_FILE = os.path.join('testing', 'buildbot', 'test_suites.pyl')
# Android desserts that are no longer receiving CTS updates at
# https://source.android.com/compatibility/cts/downloads
# Please update this list as more versions reach end-of-service.
END_OF_SERVICE_DESSERTS = ['L', 'M']
CTS_DEP_NAME = 'src/android_webview/tools/cts_archive'
CTS_DEP_PACKAGE = 'chromium/android_webview/tools/cts_archive'
CIPD_REFERRERS = [DEPS_FILE, TEST_SUITES_FILE]
_GENERATE_BUILDBOT_JSON = os.path.join('testing', 'buildbot',
'generate_buildbot_json.py')
_ENSURE_FORMAT = """$ParanoidMode CheckIntegrity
@Subdir cipd
{} {}"""
_ENSURE_SUBDIR = 'cipd'
_RE_COMMENT_OR_BLANK = re.compile(r'^ *(#.*)?$')
class CTSConfig(object):
"""Represents a CTS config file."""
def __init__(self, file_path=CONFIG_PATH):
"""Constructs a representation of the CTS config file.
Only read operations are provided by this object. Users should edit the
file manually for any modifications.
Args:
file_path: Path to file.
"""
self._path = os.path.abspath(file_path)
with open(self._path) as f:
self._config = json.load(f)
def get_platforms(self):
return sorted(self._config.keys())
def get_archs(self, platform):
return sorted(self._config[platform]['arch'].keys())
def iter_platform_archs(self):
for p in self.get_platforms():
for a in self.get_archs(p):
yield p, a
def get_cipd_zip(self, platform, arch):
return self._config[platform]['arch'][arch]['filename']
def get_origin(self, platform, arch):
return self._config[platform]['arch'][arch]['_origin']
def get_origin_zip(self, platform, arch):
return os.path.basename(self.get_origin(platform, arch))
def get_apks(self, platform):
return sorted([r['apk'] for r in self._config[platform]['test_runs']])
class CTSCIPDYaml(object):
"""Represents a CTS CIPD yaml file."""
RE_PACKAGE = r'^package:\s*(\S+)\s*$'
RE_DESC = r'^description:\s*(.+)$'
RE_DATA = r'^data:\s*$'
RE_FILE = r'^\s+-\s+file:\s*(.+)$'
# TODO(crbug.com/1049432): Replace with yaml parser
@classmethod
def parse(cls, lines):
result = {}
for line in lines:
if len(line) == 0 or line[0] == '#':
continue
package_match = re.match(cls.RE_PACKAGE, line)
if package_match:
result['package'] = package_match.group(1)
continue
desc_match = re.match(cls.RE_DESC, line)
if desc_match:
result['description'] = desc_match.group(1)
continue
if re.match(cls.RE_DATA, line):
result['data'] = []
if 'data' in result:
file_match = re.match(cls.RE_FILE, line)
if file_match:
result['data'].append({'file': file_match.group(1)})
return result
def __init__(self, file_path=CIPD_PATH):
"""Constructs a representation of CTS CIPD yaml file.
Note the file won't be modified unless write is called
with its path.
Args:
file_path: Path to file.
"""
self._path = os.path.abspath(file_path)
self._header = []
# Read header comments
with open(self._path) as f:
for l in f.readlines():
if re.match(_RE_COMMENT_OR_BLANK, l):
self._header.append(l)
else:
break
# Read yaml data
with open(self._path) as f:
self._yaml = CTSCIPDYaml.parse(f.readlines())
def get_file_path(self):
"""Get full file path of yaml file that this was constructed from."""
return self._path
def get_file_basename(self):
"""Get base file name that this was constructed from."""
return os.path.basename(self._path)
def get_package(self):
"""Get package name."""
return self._yaml['package']
def clear_files(self):
"""Clears all files in file (only in local memory, does not modify file)."""
self._yaml['data'] = []
def append_file(self, file_name):
"""Add file_name to list of files."""
self._yaml['data'].append({'file': str(file_name)})
def remove_file(self, file_name):
"""Remove file_name from list of files."""
old_file_names = self.get_files()
new_file_names = [name for name in old_file_names if name != file_name]
self._yaml['data'] = [{'file': name} for name in new_file_names]
def get_files(self):
"""Get list of files in yaml file."""
return [e['file'] for e in self._yaml['data']]
def write(self, file_path):
"""(Over)write file_path with the cipd.yaml representation."""
dir_name = os.path.dirname(file_path)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
with open(file_path, 'w') as f:
f.writelines(self._get_yamls())
def _get_yamls(self):
"""Return the cipd.yaml file contents of this object."""
output = []
output += self._header
output.append('package: {}\n'.format(self._yaml['package']))
output.append('description: {}\n'.format(self._yaml['description']))
output.append('data:\n')
self._yaml['data'].sort()
for d in self._yaml['data']:
output.append(' - file: {}\n'.format(d.get('file')))
return output
def cipd_ensure(package, version, root_dir):
"""Ensures CIPD package is installed at root_dir.
Args:
package: CIPD name of package
version: Package version
root_dir: Directory to install package into
"""
def _createEnsureFile(package, version, file_path):
with open(file_path, 'w') as f:
f.write(_ENSURE_FORMAT.format(package, version))
def _ensure(root, ensure_file):
ret = cmd_helper.RunCmd(
['cipd', 'ensure', '-root', root, '-ensure-file', ensure_file])
if ret:
raise IOError('Error while running cipd ensure: ' + ret)
with tempfile.NamedTemporaryFile() as f:
_createEnsureFile(package, version, f.name)
_ensure(root_dir, f.name)
def cipd_download(cipd, version, download_dir):
"""Downloads CIPD package files.
This is different from cipd ensure in that actual files will exist at
download_dir instead of symlinks.
Args:
cipd: CTSCIPDYaml object
version: Version of package
download_dir: Destination directory
"""
package = cipd.get_package()
download_dir_abs = os.path.abspath(download_dir)
if not os.path.isdir(download_dir_abs):
os.makedirs(download_dir_abs)
with tempfile_ext.NamedTemporaryDirectory() as workDir, chdir(workDir):
cipd_ensure(package, version, '.')
for file_name in cipd.get_files():
src_path = os.path.join(_ENSURE_SUBDIR, file_name)
dest_path = os.path.join(download_dir_abs, file_name)
dest_dir = os.path.dirname(dest_path)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
ret = cmd_helper.RunCmd(['cp', '--reflink=never', src_path, dest_path])
if ret:
raise IOError('Error file copy from ' + file_name + ' to ' + dest_path)
def filter_cts_file(cts_config, cts_zip_file, dest_dir):
"""Filters out non-webview test apks from downloaded CTS zip file.
Args:
cts_config: CTSConfig object
cts_zip_file: Path to downloaded CTS zip, retaining the original filename
dest_dir: Destination directory to filter to, filename will be unchanged
"""
for p in cts_config.get_platforms():
for a in cts_config.get_archs(p):
o = cts_config.get_origin(p, a)
base_name = os.path.basename(o)
if base_name == os.path.basename(cts_zip_file):
filterzip(cts_zip_file, cts_config.get_apks(p),
os.path.join(dest_dir, base_name))
return
raise ValueError('Could not find platform and arch for: ' + cts_zip_file)
class ChromiumRepoHelper(object):
"""Performs operations on Chromium checkout."""
def __init__(self, root_dir=SRC_DIR):
self._root_dir = os.path.abspath(root_dir)
self._cipd_referrers = [
os.path.join(self._root_dir, p) for p in CIPD_REFERRERS
]
@property
def cipd_referrers(self):
return self._cipd_referrers
@property
def cts_cipd_package(self):
return CTS_DEP_PACKAGE
def get_cipd_dependency_rev(self):
"""Return CTS CIPD revision in the checkout's DEPS file."""
deps_file = os.path.join(self._root_dir, DEPS_FILE)
# Use the gclient command instead of gclient_eval since the latter is not
# intended for direct use outside of depot_tools.
cmd = [
'gclient', 'getdep', '--revision',
'%s:%s' % (CTS_DEP_NAME, CTS_DEP_PACKAGE), '--deps-file', deps_file
]
env = os.environ
# Disable auto-update of depot tools since update_depot_tools may not be
# available (for example, on the presubmit bot), and it's probably best not
# to perform surprise updates anyways.
env.update({'DEPOT_TOOLS_UPDATE': '0'})
status, output, err = cmd_helper.GetCmdStatusOutputAndError(cmd, env=env)
if status != 0:
raise Exception('Command "%s" failed: %s' % (' '.join(cmd), err))
return output.strip()
def update_cts_cipd_rev(self, new_version):
"""Update references to CTS CIPD revision in checkout.
Args:
new_version: New version to use
"""
old_version = self.get_cipd_dependency_rev()
for path in self.cipd_referrers:
replace_cipd_revision(path, old_version, new_version)
def git_status(self, path):
"""Returns canonical git status of file.
Args:
path: Path to file.
Returns:
Output of git status --porcelain.
"""
with chdir(self._root_dir):
output = cmd_helper.GetCmdOutput(['git', 'status', '--porcelain', path])
return output
def update_testing_json(self):
"""Performs generate_buildbot_json.py.
Raises:
IOError: If generation failed.
"""
with chdir(self._root_dir):
ret = cmd_helper.RunCmd(['python', _GENERATE_BUILDBOT_JSON])
if ret:
raise IOError('Error while generating_buildbot_json.py')
def rebase(self, *rel_path_parts):
"""Construct absolute path from parts relative to root_dir.
Args:
rel_path_parts: Parts of the root relative path.
Returns:
The absolute path.
"""
return os.path.join(self._root_dir, *rel_path_parts)
def replace_cipd_revision(file_path, old_revision, new_revision):
"""Replaces cipd revision strings in file.
Args:
file_path: Path to file.
old_revision: Old cipd revision to be replaced.
new_revision: New cipd revision to use as replacement.
Returns:
Number of replaced occurrences.
Raises:
IOError: If no occurrences were found.
"""
with open(file_path) as f:
contents = f.read()
num = contents.count(old_revision)
if not num:
raise IOError('Did not find old CIPD revision {} in {}'.format(
old_revision, file_path))
newcontents = contents.replace(old_revision, new_revision)
with open(file_path, 'w') as f:
f.write(newcontents)
return num
@contextlib.contextmanager
def chdir(dirPath):
"""Context manager that changes working directory."""
cwd = os.getcwd()
os.chdir(dirPath)
try:
yield
finally:
os.chdir(cwd)
def filterzip(inputPath, pathList, outputPath):
"""Copy a subset of files from input archive into output archive.
Args:
inputPath: Input archive path
pathList: List of file names from input archive to copy
outputPath: Output archive path
"""
with zipfile.ZipFile(os.path.abspath(inputPath), 'r') as inputZip,\
zipfile.ZipFile(os.path.abspath(outputPath), 'w') as outputZip,\
tempfile_ext.NamedTemporaryDirectory() as workDir,\
chdir(workDir):
for p in pathList:
inputZip.extract(p)
outputZip.write(p)
def download(url, destination):
"""Asynchronously download url to path specified by destination.
Args:
url: Url location of file.
destination: Path where file should be saved to.
If destination parent directories do not exist, they will be created.
Returns the download thread which can then be joined by the caller to
wait for download completion.
"""
dest_dir = os.path.dirname(destination)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
t = threading.Thread(target=urllib.urlretrieve, args=(url, destination))
t.start()
return t
def update_cipd_package(cipd_yaml_path):
"""Updates the CIPD package specified by cipd_yaml_path.
Args:
cipd_yaml_path: Path of cipd yaml specification file
"""
cipd_yaml_path_abs = os.path.abspath(cipd_yaml_path)
with chdir(os.path.dirname(cipd_yaml_path_abs)),\
tempfile.NamedTemporaryFile() as jsonOut:
ret = cmd_helper.RunCmd([
'cipd', 'create', '-pkg-def', cipd_yaml_path_abs, '-json-output',
jsonOut.name
])
if ret:
raise IOError('Error during cipd create.')
return json.load(jsonOut)['result']['instance_id']
|
|
#!/usr/bin/env python
"""
head_tracker.py - Version 1.1 2013-12-20
Move the head to track a target published on the /roi topic.
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2010 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
from sensor_msgs.msg import JointState, RegionOfInterest, CameraInfo
from dynamixel_controllers.srv import *
from std_msgs.msg import Float64
from math import radians
import thread
class HeadTracker():
def __init__(self):
rospy.init_node("head_tracker")
rospy.on_shutdown(self.shutdown)
rate = rospy.get_param("~rate", 20)
r = rospy.Rate(rate)
tick = 1.0 / rate
# Keep the speed updates below about 5 Hz; otherwise the servos
# can behave erratically.
speed_update_rate = rospy.get_param("~speed_update_rate", 10)
speed_update_interval = 1.0 / speed_update_rate
# How big a change do we need in speed before we push an update
# to the servos?
self.speed_update_threshold = rospy.get_param("~speed_update_threshold", 0.01)
# What are the names of the pan and tilt joints in the list of dynamixels?
self.head_pan_joint = rospy.get_param('~head_pan_joint', 'head_pan_joint')
self.head_tilt_joint = rospy.get_param('~head_tilt_joint', 'head_tilt_joint')
self.joints = [self.head_pan_joint, self.head_tilt_joint]
# Joint speeds are given in radians per second
self.default_joint_speed = rospy.get_param('~default_joint_speed', 0.3)
self.max_joint_speed = rospy.get_param('~max_joint_speed', 0.5)
# How far ahead or behind the target (in radians) should we aim for?
self.lead_target_angle = rospy.get_param('~lead_target_angle', 1.0)
# The pan/tilt thresholds indicate what percentage of the image window
# the ROI needs to be off-center before we make a movement
self.pan_threshold = rospy.get_param("~pan_threshold", 0.05)
self.tilt_threshold = rospy.get_param("~tilt_threshold", 0.05)
# The gain_pan and gain_tilt parameter determine how responsive the
# servo movements are. If these are set too high, oscillation can result.
self.gain_pan = rospy.get_param("~gain_pan", 1.0)
self.gain_tilt = rospy.get_param("~gain_tilt", 1.0)
# Set limits on the pan and tilt angles
self.max_pan = rospy.get_param("~max_pan", radians(145))
self.min_pan = rospy.get_param("~min_pan", radians(-145))
self.max_tilt = rospy.get_param("~max_tilt", radians(90))
self.min_tilt = rospy.get_param("~min_tilt", radians(-90))
# How long we are willing to wait (in seconds) for a target before re-centering the servos?
self.recenter_timeout = rospy.get_param('~recenter_timeout', 5)
# Monitor the joint states of the pan and tilt servos
self.joint_state = JointState()
rospy.Subscriber('joint_states', JointState, self.update_joint_state)
# Wait until we actually have joint state values
while self.joint_state == JointState():
rospy.sleep(1)
# Initialize the servo services and publishers
self.init_servos()
# Center the pan and tilt servos at the start
self.center_head_servos()
# Set a flag to indicate when the target has been lost
self.target_visible = False
# Set a timer to determine how long a target is no longer visible
target_lost_timer = 0.0
# Set a timer to track when we do a speed update
speed_update_timer = 0.0
# Initialize the pan and tilt speeds to zero
pan_speed = tilt_speed = 0.0
# Get a lock for updating the self.move_cmd values
self.lock = thread.allocate_lock()
# Wait for messages on the three topics we need to monitor
rospy.loginfo("Waiting for roi and camera_info topics.")
rospy.wait_for_message('camera_info', CameraInfo)
rospy.wait_for_message('joint_states', JointState)
rospy.wait_for_message('roi', RegionOfInterest)
# Subscribe to camera_info topics and set the callback
self.image_width = self.image_height = 0
rospy.Subscriber('camera_info', CameraInfo, self.get_camera_info)
# Wait until we actually have the camera data
while self.image_width == 0 or self.image_height == 0:
rospy.sleep(1)
# Subscribe to roi topics and set the callback
rospy.Subscriber('roi', RegionOfInterest, self.set_joint_cmd)
rospy.loginfo("Ready to track target.")
while not rospy.is_shutdown():
# Acquire the lock
self.lock.acquire()
try:
# If we have lost the target, stop the servos
if not self.target_visible:
self.pan_speed = 0.0
self.tilt_speed = 0.0
# Keep track of how long the target is lost
target_lost_timer += tick
else:
self.target_visible = False
target_lost_timer = 0.0
# If the target is lost long enough, re-center the servos
if target_lost_timer > self.recenter_timeout:
rospy.loginfo("Cannot find target.")
self.center_head_servos()
target_lost_timer = 0.0
else:
# Update the servo speeds at the appropriate interval
if speed_update_timer > speed_update_interval:
if abs(self.last_pan_speed - self.pan_speed) > self.speed_update_threshold:
self.set_servo_speed(self.head_pan_joint, self.pan_speed)
self.last_pan_speed = self.pan_speed
if abs(self.last_tilt_speed - self.tilt_speed) > self.speed_update_threshold:
self.set_servo_speed(self.head_tilt_joint, self.tilt_speed)
self.last_tilt_speed = self.tilt_speed
speed_update_timer = 0.0
# Update the pan position
if self.last_pan_position != self.pan_position:
self.set_servo_position(self.head_pan_joint, self.pan_position)
self.last_pan_position = self.pan_position
# Update the tilt position
if self.last_tilt_position != self.tilt_position:
self.set_servo_position(self.head_tilt_joint, self.tilt_position)
self.last_tilt_position = self.tilt_position
speed_update_timer += tick
finally:
# Release the lock
self.lock.release()
r.sleep()
def set_joint_cmd(self, msg):
# Acquire the lock
self.lock.acquire()
try:
# If we receive an ROI messages with 0 width or height, the target is not visible
if msg.width == 0 or msg.height == 0:
self.target_visible = False
return
# If the ROI stops updating this next statement will not happen
self.target_visible = True
# Compute the displacement of the ROI from the center of the image
target_offset_x = msg.x_offset + msg.width / 2 - self.image_width / 2
target_offset_y = msg.y_offset + msg.height / 2 - self.image_height / 2
try:
percent_offset_x = float(target_offset_x) / (float(self.image_width) / 2.0)
percent_offset_y = float(target_offset_y) / (float(self.image_height) / 2.0)
except:
percent_offset_x = 0
percent_offset_y = 0
# Set the target position ahead or behind the current position
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
# Pan the camera only if the x target offset exceeds the threshold
if abs(percent_offset_x) > self.pan_threshold:
# Set the pan speed proportional to the target offset
self.pan_speed = min(self.max_joint_speed, max(0, self.gain_pan * abs(percent_offset_x)))
if target_offset_x > 0:
self.pan_position = max(self.min_pan, current_pan - self.lead_target_angle)
else:
self.pan_position = min(self.max_pan, current_pan + self.lead_target_angle)
else:
self.pan_speed = 0
self.pan_position = current_pan
# Set the target position ahead or behind the current position
current_tilt = self.joint_state.position[self.joint_state.name.index(self.head_tilt_joint)]
# Tilt the camera only if the y target offset exceeds the threshold
if abs(percent_offset_y) > self.tilt_threshold:
# Set the tilt speed proportional to the target offset
self.tilt_speed = min(self.max_joint_speed, max(0, self.gain_tilt * abs(percent_offset_y)))
if target_offset_y < 0:
self.tilt_position = max(self.min_tilt, current_tilt - self.lead_target_angle)
else:
self.tilt_position = min(self.max_tilt, current_tilt + self.lead_target_angle)
else:
self.tilt_speed = 0
self.tilt_position = current_tilt
finally:
# Release the lock
self.lock.release()
def center_head_servos(self):
rospy.loginfo("Centering servos.")
self.servo_speed[self.head_pan_joint](self.default_joint_speed)
self.servo_speed[self.head_tilt_joint](self.default_joint_speed)
current_tilt = self.joint_state.position[self.joint_state.name.index(self.head_tilt_joint)]
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
while abs(current_tilt) > 0.05 or abs(current_pan) > 0.05:
self.servo_position[self.head_pan_joint].publish(0)
self.servo_position[self.head_tilt_joint].publish(0)
rospy.sleep(0.1)
current_tilt = self.joint_state.position[self.joint_state.name.index(self.head_tilt_joint)]
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
self.servo_speed[self.head_pan_joint](0.0)
self.servo_speed[self.head_tilt_joint](0.0)
def init_servos(self):
# Create dictionaries to hold the speed, position and torque controllers
self.servo_speed = dict()
self.servo_position = dict()
self.torque_enable = dict()
# Connect to the set_speed services and define a position publisher for each servo
rospy.loginfo("Waiting for joint controllers services...")
for joint in sorted(self.joints):
# The set_speed services
set_speed_service = '/' + joint + '/set_speed'
rospy.wait_for_service(set_speed_service)
self.servo_speed[joint] = rospy.ServiceProxy(set_speed_service, SetSpeed, persistent=True)
# Initialize the servo speed to the default_joint_speed
self.servo_speed[joint](self.default_joint_speed)
# The position controllers
self.servo_position[joint] = rospy.Publisher('/' + joint + '/command', Float64)
# A service to enable/disable servo torque
torque_enable = '/' + joint + '/torque_enable'
rospy.wait_for_service(torque_enable)
self.torque_enable[joint] = rospy.ServiceProxy(torque_enable, TorqueEnable)
self.torque_enable[joint](False)
self.pan_position = 0
self.tilt_position = 0
self.pan_speed = 0
self.tilt_speed = 0
self.last_pan_position = 0
self.last_tilt_position = 0
self.last_tilt_speed = 0
self.last_pan_speed = 0
def set_servo_speed(self, servo, speed):
# Guard against a speed of exactly zero which means
# "move as fast as you can" to a Dynamixel servo.
if speed == 0:
speed = 0.01
self.servo_speed[servo](speed)
def set_servo_position(self, servo, position):
self.servo_position[servo].publish(position)
def update_joint_state(self, msg):
self.joint_state = msg
def get_camera_info(self, msg):
self.image_width = msg.width
self.image_height = msg.height
def shutdown(self):
rospy.loginfo("Shutting down head tracking node.")
self.center_head_servos()
# Relax all servos to give them a rest.
rospy.loginfo("Relaxing pan and tilt servos.")
for servo in self.joints:
self.torque_enable[servo](False)
if __name__ == '__main__':
try:
HeadTracker()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Head tracking node terminated.")
|
|
#!/usr/bin/env python
DOCUMENTATION = '''
---
module: dockerimp
short_description: improved docker container management module
description:
- Manage docker containers with ansible
options:
name:
description:
- Set the name of the container
required: false
default: null
aliases: ["id"]
state:
description:
- Set the state of the container
required: true
default: null
choices: [
"present", "running", "stopped", "absent",
"restarted", "image_present", "image_latest",
"image_absent"
]
aliases: []
image:
description:
- Set the image for the container
required: false
default: null
aliases: []
env:
description:
- Set the container environment variables
required: false
default: null
aliases: []
volumes:
description:
- Mount voulumes for the container
required: false
default: null
aliases: []
ports:
description:
- Map ports for container
required: false
default: null
aliases: []
command:
description:
- Set the command for container
required: false
default: null
aliases: []
expose:
description:
- Expose ports
required: false
default: null
aliases: []
links:
description:
- Link containers
required: false
default: null
aliases: []
client_url:
description:
- Client base url
required: false
default: "unix://var/run/docker.sock"
aliases: []
insecure_registry:
description:
- Trust insecure registrys
required: false
default: false
aliases: []
'''
import sys
import copy
try:
import docker.client
except ImportError as e:
print("Failed to import module {0}".format(e))
sys.exit(1)
class ContainerManagerException(Exception):
pass
class ContainerManager():
def __init__(self, module):
self.module = module
self.client = docker.Client(base_url = module.params.get('client_url'))
self.changed = False
self.check_mode = module.check_mode
self.changes_made = []
self.params = self.fix_parameters()
def fix_parameters(self):
params = copy.deepcopy(self.module.params)
if params.get('volumes'):
try:
if type(params['volumes']) is str:
volumes = params['volumes'].split(",")
elif type(params['volumes']) is list:
volumes = params['volumes']
else:
raise ContainerManagerException({'Invalid argument': params['volumes']})
mount_points = [x.split(":")[1] for x in volumes]
binds = {}
for i in volumes:
j = i.split(":")
len_j = len(j)
if len_j != 2 and len_j != 3:
raise ContainerManagerException({'Invalid argument': params['volumes']})
ro = False
if len_j == 3:
if j[2] == "ro":
ro = True
elif j[2] == "rw":
ro = False
else:
raise ContainerManagerException({'Invalid argument': params['volumes']})
binds[j[0]] = {'bind': j[1], 'ro': ro}
params['binds'] = binds
params['volumes'] = mount_points
except IndexError as e:
raise ContainerManagerException({'Invalid argument': params['volumes']})
if params.get('image'):
# add 'latest' tag to the image name if no tag is already provided
image = params['image']
image_split = image.split("/")[-1].split(":")
if len(image_split) == 1:
params['image'] = "{0}:latest".format(params['image'])
if params.get('ports'):
try:
if type(params['ports']) is str:
port_params = params['ports'].split(",")
elif type(params['ports']) is list:
port_params = params['ports']
else:
raise ContainerManagerException({'Invalid argument': params['ports']})
ports = []
for i in port_params:
values = i.split(":")
len_values = len(values)
if len_values != 2 and len_values != 3:
raise ContainerManagerException({'Invalid argument': params['ports']})
port_and_prot = values[-1].split("/")
len_port_and_prot = len(port_and_prot)
if len_port_and_prot > 2:
raise ContainerManagerException({'Invalid argument': params['ports']})
p = (port_and_prot[0], port_and_prot[1]) if len_port_and_prot == 2 else port_and_prot[0]
ports.append(p)
port_bindings = {}
for i in port_params:
values = i.split(":")
len_values = len(values)
if len_values == 2:
host_port = values[0]
prot_and_port = values[1]
bind_ip = None
elif len_values == 3:
host_port = values[1]
prot_and_port = values[2]
bind_ip = values[0]
else:
raise ContainerManagerException({'Invalid argument': params['ports']})
prot_and_port = prot_and_port.split("/")
len_prot_and_port = len(prot_and_port)
if len_prot_and_port == 2:
key = "{0}/{1}".format(prot_and_port[0], prot_and_port[1])
elif len_prot_and_port == 1:
key = prot_and_port[0]
else:
raise ContainerManagerException({'Invalid argument': params['ports']})
if bind_ip:
val = (bind_ip, host_port) if host_port else (bind_ip,)
else:
val = host_port or None
port_bindings[key] = val
params['ports'] = ports
params['port_bindings'] = port_bindings
except IndexError as e:
raise ContainerManagerException({'Invalid argument': params['ports'], 'error': e})
if params.get('env'):
if type(params['env']) is str:
envs = params['env'].split(",")
elif type(params['env']) is list:
envs = params['env']
elif type(params['env']) is dict:
envs = ["{0}={1}".format(x, params['env'][x]) for x in params['env']]
else:
raise ContainerManagerException({'Invalid argument': params['env']})
# Add special ANSIBLE_MANAGED_ENVS variable so we can track which
# variables are managed by ansible
envs.append("ANSIBLE_MANAGED_ENVS={0}".format(":".join([x.split("=")[0] for x in envs])))
params['environment'] = envs
return params
def ensure_present(self):
required_params = ("name", "image")
self.check_required_parameters(required_params)
container = self.find_container(self.params['name'])
self.__ensure_present(container)
def ensure_running(self):
required_params = ("name", "image")
self.check_required_parameters(required_params)
container = self.find_container(self.params['name'])
container = self.__ensure_present(container)
if not container['State']['Running']:
container = self.start_container(container)
def ensure_running_latest(self):
required_params = ("name", "image")
self.check_required_parameters(required_params)
container = self.find_container(self.params['name'])
image = self.find_image(self.params['image'])
if not container:
container = self.__ensure_present(container)
elif not self.is_running_latest_image(container, image):
self.remove_container(container)
container = self.__ensure_present()
elif not self.ensure_same(container):
self.ensure_absent()
container = self.__ensure_present()
if not container['State']['Running']:
self.start_container(container)
def ensure_stopped(self):
required_params = ("name",)
self.check_required_parameters(required_params)
container = self.find_container(self.params['name'])
if not container:
raise ContainerManagerException("Container not found")
if container['State']['Running']:
self.stop_container(container)
def ensure_absent(self):
required_params = ("name",)
self.check_required_parameters(required_params)
container = self.find_container(self.params['name'])
self.remove_container(container)
def restart(self):
required_params = ("name",)
self.check_required_parameters(required_params)
container = self.find_container(self.params.get('name'))
if not container:
raise ContainerManagerException("Container not found")
if not container['State']['Running']:
raise ContainerManagerException("Container not running")
self.restart_container(container)
def ensure_image_present(self):
required_params = ("image",)
self.check_required_parameters(required_params)
self.__ensure_image_present(self.params['image'])
def ensure_image_latest(self):
required_params = ("image",)
self.check_required_parameters(required_params)
self.__ensure_image_latest(self.params['image'])
def ensure_image_absent(self):
required_params = ("image",)
self.check_required_parameters(required_params)
name = self.params['image']
if self.find_image(name):
self.client.remove_image(name)
def __ensure_present(self, container = None):
if not container:
self.__ensure_image_present(self.params['image'])
container = self.create_container()
elif not self.ensure_same(container):
self.ensure_absent()
container = self.__ensure_present()
return container
def __ensure_image_present(self, name):
image = self.find_image(name)
if not image:
self.pull_image(name)
def __ensure_image_latest(self, name):
self.pull_image(name)
return self.find_image(name)
def check_required_parameters(self, required):
for i in required:
if not self.params.get(i):
state = self.params['state']
error_msg = "{0} required for {1} satate".format(i, state)
raise ContainerManagerException(error_msg)
def find_container(self, name):
containers = self.client.containers(all = True)
c = [x for x in containers if
((x['Names'] or [""])[0] == "/{0}".format(name)) or
(len(name) > 9 and x['Id'].startswith(name))]
if len(c) > 1:
error_msg = "Found more than one container with name or id"
raise ContainerManagerException({'Unexpected error': error_msg})
if c:
container = self.get_info(c[0])
return container
return None
def find_image(self, name):
# client.images method does not throw an error if image is not found, it just
# returns an empty array. client.inspect_image throws an error if image is not
# found. Propably cleaner to do this way than to catch an error.
image_name = name.split(":")
# image name may contain port, so rejoin everything exept last item which is tag
images = self.client.images(name = ":".join(image_name[:-1]))
image_len = len(images)
if image_len == 0:
return None
else:
for i in images:
if name in i['RepoTags']:
return self.client.inspect_image(name)
else:
return None
def is_running_latest_image(self, container, image):
if not image:
return False
if image['Id'] == container['Image']:
return True
else:
return False
def get_info(self, container):
return self.client.inspect_container(container)
def get_image_info(self, image):
return self.client.inspect_image(image)
def pull_image(self, name):
insecure_registry = self.params['insecure_registry']
old = self.find_image(name)
self.client.pull(name, insecure_registry = insecure_registry)
new = self.find_image(name)
if not new:
error_msg = "Cannot find {0}".format(name)
raise ContainerManagerException({'Image not found': error_msg})
elif new['Id'] != (old or {}).get('Id'):
self.write_log('PULLED', new)
def create_container(self):
params = self.params
key_filter = (
'image', 'command', 'hostname', 'user',
'detach', 'stdin_open', 'tty', 'mem_limit',
'ports', 'environment', 'dns', 'volumes',
'volumes_from', 'network_disabled', 'name',
'entrypoint', 'cpu_shares', 'working_dir',
'memswap_limit'
)
filtered = { x: params[x] for x in key_filter if x in params }
c = self.client.create_container(**filtered)
container = self.get_info(c)
self.write_log('CREATED', container)
return container
def start_container(self, container):
params = self.params
key_filter = (
'binds', 'port_bindings', 'lxc_conf',
'publish_all_ports', 'links', 'privileged',
'dns', 'dns_search', 'volumes_from', 'network_mode',
'restart_policy', 'cap_add', 'cap_drop'
)
filtered = { x: params[x] for x in key_filter if x in params }
self.client.start(container, **filtered)
container = self.get_info(container)
self.write_log('STARTED', container)
return container
def stop_container(self, container):
self.client.stop(container)
container = self.get_info(container)
self.write_log('STOPPED', container)
return container
def remove_container(self, container):
if container['State']['Running']:
container = self.stop_container(container)
self.client.remove_container(container)
c = self.find_container(container['Id'])
if c:
raise ContainerManagerException("Could not remove the container")
self.write_log('REMOVED', container)
def restart_container(self, container):
self.client.restart(container)
container = self.get_info(container)
self.write_log('RESTARTED', container)
def ensure_same(self, container):
params = self.params
require_restart = False
# Ensure running the right image
if container['Config']['Image'] != params['image']:
require_restart = True
# Ensure running latest image if the parameter is provided
same = True
if params.get('latest_image'):
self.client.pull(params['image'])
if not self.running_latest_image(container, params['image']):
same = False
require_restart = True
# Ensure environment vars are up to date
for i in container['Config']['Env']:
if "ANSIBLE_MANAGED_ENVS" in i:
ansible_managed_envs = i.split("=")[1].split(":")
# Add the magic ANSIBLE_MANAGED_ENVS key value here
# so that the two lists are easily comparable with
# set() below
ansible_managed_envs.append("ANSIBLE_MANAGED_ENVS")
has_ansible_managed_envs = True
break
else:
has_ansible_managed_envs = False
has_env_params = params.get('environment') != None
if has_env_params or has_ansible_managed_envs:
if has_env_params and has_ansible_managed_envs:
env_params = params['environment']
# Check same variables are set
if set(ansible_managed_envs) != set([x.split("=")[0] for x in env_params]):
require_restart = True
# Check that the values are right
else:
for env in env_params:
if env not in container['Config']['Env']:
require_restart = True
break
else:
require_restart = True
# Ensure volume mountings are right
container_binds = container['HostConfig']['Binds']
bind_params = params.get('binds')
if container_binds or bind_params:
if container_binds and bind_params:
_bind_params = [
":".join([
x, bind_params[x]['bind'], "ro" if bind_params[x]['ro'] else "rw"
]) for x in bind_params
]
if set(_bind_params) != set(container_binds):
require_restart = True
else:
require_restart = True
# Ensure command is right
if params.get('command'):
if params['command'] != container['Command']:
require_restart = True
return require_restart != True
def generate_message(self):
if not self.has_changes():
msg = "Up to date. No changes made"
else:
msg = self.changes_made
return msg
def write_log(self, action, info):
key_filter = (
'Name', 'Id', 'Image',
)
filtered = { x: info[x] for x in key_filter if x in info }
self.changes_made.append({action: filtered})
def has_changes(self):
if self.changes_made:
return True
return False
def main():
arguments = {
'state': {
'required': True,
'choices': [
"present", "running", "running_latest",
"stopped", "absent", "restarted",
"image_present", "image_latest",
]
},
'name': { 'default': None, 'aliases': ["id"] },
'image': { 'default': None },
'env': { 'default': None },
'volumes': { 'default': None },
'ports': { 'default': None },
'command': { 'default': None },
'expose': { 'default': None },
'links': { 'default': None },
'insecure_registry': { 'default': False, 'choises': BOOLEANS },
}
#module = AnsibleModule(argument_spec = arguments, supports_check_mode = True)
module = AnsibleModule(argument_spec = arguments)
try:
manager = ContainerManager(module)
state = module.params.get('state')
if state == "present":
manager.ensure_present()
elif state == "running":
manager.ensure_running()
elif state == "running_latest":
manager.ensure_running_latest()
elif state == "stopped":
manager.ensure_stopped()
elif state == "absent":
manager.ensure_absent()
elif state == "restarted":
manager.restart()
elif state == "image_present":
manager.ensure_image_present()
elif state == "image_latest":
manager.ensure_image_latest()
elif state == "image_absent":
manager.ensure_image_absent()
module.exit_json(changed = manager.has_changes(), msg = manager.generate_message())
except ContainerManagerException as e:
module.fail_json(msg = str(e))
except docker.errors.APIError as e:
module.fail_json(msg = str(e))
except docker.errors.DockerException as e:
module.fail_json(msg = str(e))
from ansible.module_utils.basic import *
main()
|
|
"""Helpers for config validation using voluptuous."""
from datetime import (
date as date_sys,
datetime as datetime_sys,
time as time_sys,
timedelta,
)
from enum import Enum
import inspect
import logging
from numbers import Number
import os
import re
from socket import _GLOBAL_DEFAULT_TIMEOUT # type: ignore # private, not in typeshed
from typing import (
Any,
Callable,
Dict,
Hashable,
List,
Optional,
Pattern,
Type,
TypeVar,
Union,
cast,
)
from urllib.parse import urlparse
from uuid import UUID
from pkg_resources import parse_version
import voluptuous as vol
import voluptuous_serialize
from homeassistant.const import (
ATTR_AREA_ID,
ATTR_ENTITY_ID,
CONF_ABOVE,
CONF_ALIAS,
CONF_ATTRIBUTE,
CONF_BELOW,
CONF_CHOOSE,
CONF_CONDITION,
CONF_CONDITIONS,
CONF_CONTINUE_ON_TIMEOUT,
CONF_COUNT,
CONF_DEFAULT,
CONF_DELAY,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_ENTITY_NAMESPACE,
CONF_EVENT,
CONF_EVENT_DATA,
CONF_EVENT_DATA_TEMPLATE,
CONF_FOR,
CONF_PLATFORM,
CONF_REPEAT,
CONF_SCAN_INTERVAL,
CONF_SCENE,
CONF_SEQUENCE,
CONF_SERVICE,
CONF_SERVICE_TEMPLATE,
CONF_STATE,
CONF_TIMEOUT,
CONF_UNIT_SYSTEM_IMPERIAL,
CONF_UNIT_SYSTEM_METRIC,
CONF_UNTIL,
CONF_VALUE_TEMPLATE,
CONF_VARIABLES,
CONF_WAIT_FOR_TRIGGER,
CONF_WAIT_TEMPLATE,
CONF_WHILE,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
WEEKDAYS,
__version__,
)
from homeassistant.core import split_entity_id, valid_entity_id
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import (
script_variables as script_variables_helper,
template as template_helper,
)
from homeassistant.helpers.logging import KeywordStyleAdapter
from homeassistant.util import sanitize_path, slugify as util_slugify
import homeassistant.util.dt as dt_util
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM', 'HH:MM:SS' or 'HH:MM:SS.F'"
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
positive_float = vol.All(vol.Coerce(float), vol.Range(min=0))
latitude = vol.All(
vol.Coerce(float), vol.Range(min=-90, max=90), msg="invalid latitude"
)
longitude = vol.All(
vol.Coerce(float), vol.Range(min=-180, max=180), msg="invalid longitude"
)
gps = vol.ExactSequence([latitude, longitude])
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
port = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
# typing typevar
T = TypeVar("T")
def path(value: Any) -> str:
"""Validate it's a safe path."""
if not isinstance(value, str):
raise vol.Invalid("Expected a string")
if sanitize_path(value) != value:
raise vol.Invalid("Invalid path")
return value
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys: str) -> Callable:
"""Validate that at least one key exists."""
def validate(obj: Dict) -> Dict:
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid("expected dictionary")
for k in obj:
if k in keys:
return obj
raise vol.Invalid("must contain at least one of {}.".format(", ".join(keys)))
return validate
def has_at_most_one_key(*keys: str) -> Callable[[Dict], Dict]:
"""Validate that zero keys exist or one key exists."""
def validate(obj: Dict) -> Dict:
"""Test zero keys exist or one key exists in dict."""
if not isinstance(obj, dict):
raise vol.Invalid("expected dictionary")
if len(set(keys) & set(obj)) > 1:
raise vol.Invalid("must contain at most one of {}.".format(", ".join(keys)))
return obj
return validate
def boolean(value: Any) -> bool:
"""Validate and coerce a boolean value."""
if isinstance(value, bool):
return value
if isinstance(value, str):
value = value.lower().strip()
if value in ("1", "true", "yes", "on", "enable"):
return True
if value in ("0", "false", "no", "off", "disable"):
return False
elif isinstance(value, Number):
# type ignore: https://github.com/python/mypy/issues/3186
return value != 0 # type: ignore
raise vol.Invalid(f"invalid boolean value {value}")
_WS = re.compile("\\s*")
def whitespace(value: Any) -> str:
"""Validate result contains only whitespace."""
if isinstance(value, str) and _WS.fullmatch(value):
return value
raise vol.Invalid(f"contains non-whitespace: {value}")
def isdevice(value: Any) -> str:
"""Validate that value is a real device."""
try:
os.stat(value)
return str(value)
except OSError as err:
raise vol.Invalid(f"No device at {value} found") from err
def matches_regex(regex: str) -> Callable[[Any], str]:
"""Validate that the value is a string that matches a regex."""
compiled = re.compile(regex)
def validator(value: Any) -> str:
"""Validate that value matches the given regex."""
if not isinstance(value, str):
raise vol.Invalid(f"not a string value: {value}")
if not compiled.match(value):
raise vol.Invalid(
f"value {value} does not match regular expression {compiled.pattern}"
)
return value
return validator
def is_regex(value: Any) -> Pattern[Any]:
"""Validate that a string is a valid regular expression."""
try:
r = re.compile(value)
return r
except TypeError as err:
raise vol.Invalid(
f"value {value} is of the wrong type for a regular expression"
) from err
except re.error as err:
raise vol.Invalid(f"value {value} is not a valid regular expression") from err
def isfile(value: Any) -> str:
"""Validate that the value is an existing file."""
if value is None:
raise vol.Invalid("None is not file")
file_in = os.path.expanduser(str(value))
if not os.path.isfile(file_in):
raise vol.Invalid("not a file")
if not os.access(file_in, os.R_OK):
raise vol.Invalid("file not readable")
return file_in
def isdir(value: Any) -> str:
"""Validate that the value is an existing dir."""
if value is None:
raise vol.Invalid("not a directory")
dir_in = os.path.expanduser(str(value))
if not os.path.isdir(dir_in):
raise vol.Invalid("not a directory")
if not os.access(dir_in, os.R_OK):
raise vol.Invalid("directory not readable")
return dir_in
def ensure_list(value: Union[T, List[T], None]) -> List[T]:
"""Wrap value in list if it is not one."""
if value is None:
return []
return value if isinstance(value, list) else [value]
def entity_id(value: Any) -> str:
"""Validate Entity ID."""
str_value = string(value).lower()
if valid_entity_id(str_value):
return str_value
raise vol.Invalid(f"Entity ID {value} is an invalid entity id")
def entity_ids(value: Union[str, List]) -> List[str]:
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid("Entity IDs can not be None")
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(",")]
return [entity_id(ent_id) for ent_id in value]
comp_entity_ids = vol.Any(
vol.All(vol.Lower, vol.Any(ENTITY_MATCH_ALL, ENTITY_MATCH_NONE)), entity_ids
)
def entity_domain(domain: str) -> Callable[[Any], str]:
"""Validate that entity belong to domain."""
def validate(value: Any) -> str:
"""Test if entity domain is domain."""
ent_domain = entities_domain(domain)
return ent_domain(value)[0]
return validate
def entities_domain(domain: str) -> Callable[[Union[str, List]], List[str]]:
"""Validate that entities belong to domain."""
def validate(values: Union[str, List]) -> List[str]:
"""Test if entity domain is domain."""
values = entity_ids(values)
for ent_id in values:
if split_entity_id(ent_id)[0] != domain:
raise vol.Invalid(
f"Entity ID '{ent_id}' does not belong to domain '{domain}'"
)
return values
return validate
def enum(enumClass: Type[Enum]) -> vol.All:
"""Create validator for specified enum."""
return vol.All(vol.In(enumClass.__members__), enumClass.__getitem__)
def icon(value: Any) -> str:
"""Validate icon."""
str_value = str(value)
if ":" in str_value:
return str_value
raise vol.Invalid('Icons should be specified in the form "prefix:name"')
time_period_dict = vol.All(
dict,
vol.Schema(
{
"days": vol.Coerce(float),
"hours": vol.Coerce(float),
"minutes": vol.Coerce(float),
"seconds": vol.Coerce(float),
"milliseconds": vol.Coerce(float),
}
),
has_at_least_one_key("days", "hours", "minutes", "seconds", "milliseconds"),
lambda value: timedelta(**value),
)
def time(value: Any) -> time_sys:
"""Validate and transform a time."""
if isinstance(value, time_sys):
return value
try:
time_val = dt_util.parse_time(value)
except TypeError as err:
raise vol.Invalid("Not a parseable type") from err
if time_val is None:
raise vol.Invalid(f"Invalid time specified: {value}")
return time_val
def date(value: Any) -> date_sys:
"""Validate and transform a date."""
if isinstance(value, date_sys):
return value
try:
date_val = dt_util.parse_date(value)
except TypeError as err:
raise vol.Invalid("Not a parseable type") from err
if date_val is None:
raise vol.Invalid("Could not parse date")
return date_val
def time_period_str(value: str) -> timedelta:
"""Validate and transform time offset."""
if isinstance(value, int): # type: ignore
raise vol.Invalid("Make sure you wrap time values in quotes")
if not isinstance(value, str):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
negative_offset = False
if value.startswith("-"):
negative_offset = True
value = value[1:]
elif value.startswith("+"):
value = value[1:]
parsed = value.split(":")
if len(parsed) not in (2, 3):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
try:
hour = int(parsed[0])
minute = int(parsed[1])
try:
second = float(parsed[2])
except IndexError:
second = 0
except ValueError as err:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value)) from err
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
def time_period_seconds(value: Union[float, str]) -> timedelta:
"""Validate and transform seconds to a time offset."""
try:
return timedelta(seconds=float(value))
except (ValueError, TypeError) as err:
raise vol.Invalid(f"Expected seconds, got {value}") from err
time_period = vol.Any(time_period_str, time_period_seconds, timedelta, time_period_dict)
def match_all(value: T) -> T:
"""Validate that matches all values."""
return value
def positive_timedelta(value: timedelta) -> timedelta:
"""Validate timedelta is positive."""
if value < timedelta(0):
raise vol.Invalid("Time period should be positive")
return value
positive_time_period_dict = vol.All(time_period_dict, positive_timedelta)
positive_time_period = vol.All(time_period, positive_timedelta)
def remove_falsy(value: List[T]) -> List[T]:
"""Remove falsy values from a list."""
return [v for v in value if v]
def service(value: Any) -> str:
"""Validate service."""
# Services use same format as entities so we can use same helper.
str_value = string(value).lower()
if valid_entity_id(str_value):
return str_value
raise vol.Invalid(f"Service {value} does not match format <domain>.<name>")
def slug(value: Any) -> str:
"""Validate value is a valid slug."""
if value is None:
raise vol.Invalid("Slug should not be None")
str_value = str(value)
slg = util_slugify(str_value)
if str_value == slg:
return str_value
raise vol.Invalid(f"invalid slug {value} (try {slg})")
def schema_with_slug_keys(
value_schema: Union[T, Callable], *, slug_validator: Callable[[Any], str] = slug
) -> Callable:
"""Ensure dicts have slugs as keys.
Replacement of vol.Schema({cv.slug: value_schema}) to prevent misleading
"Extra keys" errors from voluptuous.
"""
schema = vol.Schema({str: value_schema})
def verify(value: Dict) -> Dict:
"""Validate all keys are slugs and then the value_schema."""
if not isinstance(value, dict):
raise vol.Invalid("expected dictionary")
for key in value.keys():
slug_validator(key)
return cast(Dict, schema(value))
return verify
def slugify(value: Any) -> str:
"""Coerce a value to a slug."""
if value is None:
raise vol.Invalid("Slug should not be None")
slg = util_slugify(str(value))
if slg:
return slg
raise vol.Invalid(f"Unable to slugify {value}")
def string(value: Any) -> str:
"""Coerce value to string, except for None."""
if value is None:
raise vol.Invalid("string value is None")
if isinstance(value, template_helper.ResultWrapper):
value = value.render_result
elif isinstance(value, (list, dict)):
raise vol.Invalid("value should be a string")
return str(value)
def string_with_no_html(value: Any) -> str:
"""Validate that the value is a string without HTML."""
value = string(value)
regex = re.compile(r"<[a-z][\s\S]*>")
if regex.search(value):
raise vol.Invalid("the string should not contain HTML")
return str(value)
def temperature_unit(value: Any) -> str:
"""Validate and transform temperature unit."""
value = str(value).upper()
if value == "C":
return TEMP_CELSIUS
if value == "F":
return TEMP_FAHRENHEIT
raise vol.Invalid("invalid temperature unit (expected C or F)")
unit_system = vol.All(
vol.Lower, vol.Any(CONF_UNIT_SYSTEM_METRIC, CONF_UNIT_SYSTEM_IMPERIAL)
)
def template(value: Optional[Any]) -> template_helper.Template:
"""Validate a jinja2 template."""
if value is None:
raise vol.Invalid("template value is None")
if isinstance(value, (list, dict, template_helper.Template)):
raise vol.Invalid("template value should be a string")
template_value = template_helper.Template(str(value)) # type: ignore
try:
template_value.ensure_valid() # type: ignore[no-untyped-call]
return template_value
except TemplateError as ex:
raise vol.Invalid(f"invalid template ({ex})") from ex
def dynamic_template(value: Optional[Any]) -> template_helper.Template:
"""Validate a dynamic (non static) jinja2 template."""
if value is None:
raise vol.Invalid("template value is None")
if isinstance(value, (list, dict, template_helper.Template)):
raise vol.Invalid("template value should be a string")
if not template_helper.is_template_string(str(value)):
raise vol.Invalid("template value does not contain a dynmamic template")
template_value = template_helper.Template(str(value)) # type: ignore
try:
template_value.ensure_valid() # type: ignore[no-untyped-call]
return template_value
except TemplateError as ex:
raise vol.Invalid(f"invalid template ({ex})") from ex
def template_complex(value: Any) -> Any:
"""Validate a complex jinja2 template."""
if isinstance(value, list):
return_list = value.copy()
for idx, element in enumerate(return_list):
return_list[idx] = template_complex(element)
return return_list
if isinstance(value, dict):
return {
template_complex(key): template_complex(element)
for key, element in value.items()
}
if isinstance(value, str) and template_helper.is_template_string(value):
return template(value)
return value
positive_time_period_template = vol.Any(
positive_time_period, template, template_complex
)
def datetime(value: Any) -> datetime_sys:
"""Validate datetime."""
if isinstance(value, datetime_sys):
return value
try:
date_val = dt_util.parse_datetime(value)
except TypeError:
date_val = None
if date_val is None:
raise vol.Invalid(f"Invalid datetime specified: {value}")
return date_val
def time_zone(value: str) -> str:
"""Validate timezone."""
if dt_util.get_time_zone(value) is not None:
return value
raise vol.Invalid(
"Invalid time zone passed in. Valid options can be found here: "
"http://en.wikipedia.org/wiki/List_of_tz_database_time_zones"
)
weekdays = vol.All(ensure_list, [vol.In(WEEKDAYS)])
def socket_timeout(value: Optional[Any]) -> object:
"""Validate timeout float > 0.0.
None coerced to socket._GLOBAL_DEFAULT_TIMEOUT bare object.
"""
if value is None:
return _GLOBAL_DEFAULT_TIMEOUT
try:
float_value = float(value)
if float_value > 0.0:
return float_value
raise vol.Invalid("Invalid socket timeout value. float > 0.0 required.")
except Exception as err:
raise vol.Invalid(f"Invalid socket timeout: {err}")
# pylint: disable=no-value-for-parameter
def url(value: Any) -> str:
"""Validate an URL."""
url_in = str(value)
if urlparse(url_in).scheme in ["http", "https"]:
return cast(str, vol.Schema(vol.Url())(url_in))
raise vol.Invalid("invalid url")
def x10_address(value: str) -> str:
"""Validate an x10 address."""
regex = re.compile(r"([A-Pa-p]{1})(?:[2-9]|1[0-6]?)$")
if not regex.match(value):
raise vol.Invalid("Invalid X10 Address")
return str(value).lower()
def uuid4_hex(value: Any) -> str:
"""Validate a v4 UUID in hex format."""
try:
result = UUID(value, version=4)
except (ValueError, AttributeError, TypeError) as error:
raise vol.Invalid("Invalid Version4 UUID", error_message=str(error))
if result.hex != value.lower():
# UUID() will create a uuid4 if input is invalid
raise vol.Invalid("Invalid Version4 UUID")
return result.hex
def ensure_list_csv(value: Any) -> List:
"""Ensure that input is a list or make one from comma-separated string."""
if isinstance(value, str):
return [member.strip() for member in value.split(",")]
return ensure_list(value)
class multi_select:
"""Multi select validator returning list of selected values."""
def __init__(self, options: dict) -> None:
"""Initialize multi select."""
self.options = options
def __call__(self, selected: list) -> list:
"""Validate input."""
if not isinstance(selected, list):
raise vol.Invalid("Not a list")
for value in selected:
if value not in self.options:
raise vol.Invalid(f"{value} is not a valid option")
return selected
def deprecated(
key: str,
replacement_key: Optional[str] = None,
invalidation_version: Optional[str] = None,
default: Optional[Any] = None,
) -> Callable[[Dict], Dict]:
"""
Log key as deprecated and provide a replacement (if exists).
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning if neither key nor replacement_key are provided
- Adds replacement_key with default value in this case
- Once the invalidation_version is crossed, raises vol.Invalid if key
is detected
"""
module = inspect.getmodule(inspect.stack()[1][0])
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the call stack frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/core/issues/24982
module_name = __name__
if replacement_key and invalidation_version:
warning = (
"The '{key}' option is deprecated,"
" please replace it with '{replacement_key}'."
" This option {invalidation_status} invalid in version"
" {invalidation_version}"
)
elif replacement_key:
warning = (
"The '{key}' option is deprecated,"
" please replace it with '{replacement_key}'"
)
elif invalidation_version:
warning = (
"The '{key}' option is deprecated,"
" please remove it from your configuration."
" This option {invalidation_status} invalid in version"
" {invalidation_version}"
)
else:
warning = (
"The '{key}' option is deprecated,"
" please remove it from your configuration"
)
def check_for_invalid_version() -> None:
"""Raise error if current version has reached invalidation."""
if not invalidation_version:
return
if parse_version(__version__) >= parse_version(invalidation_version):
raise vol.Invalid(
warning.format(
key=key,
replacement_key=replacement_key,
invalidation_status="became",
invalidation_version=invalidation_version,
)
)
def validator(config: Dict) -> Dict:
"""Check if key is in config and log warning."""
if key in config:
check_for_invalid_version()
KeywordStyleAdapter(logging.getLogger(module_name)).warning(
warning,
key=key,
replacement_key=replacement_key,
invalidation_status="will become",
invalidation_version=invalidation_version,
)
value = config[key]
if replacement_key:
config.pop(key)
else:
value = default
keys = [key]
if replacement_key:
keys.append(replacement_key)
if value is not None and (
replacement_key not in config or default == config.get(replacement_key)
):
config[replacement_key] = value
return has_at_most_one_key(*keys)(config)
return validator
def key_value_schemas(
key: str, value_schemas: Dict[str, vol.Schema]
) -> Callable[[Any], Dict[str, Any]]:
"""Create a validator that validates based on a value for specific key.
This gives better error messages.
"""
def key_value_validator(value: Any) -> Dict[str, Any]:
if not isinstance(value, dict):
raise vol.Invalid("Expected a dictionary")
key_value = value.get(key)
if key_value not in value_schemas:
raise vol.Invalid(
f"Unexpected value for {key}: '{key_value}'. Expected {', '.join(value_schemas)}"
)
return cast(Dict[str, Any], value_schemas[key_value](value))
return key_value_validator
# Validator helpers
def key_dependency(
key: Hashable, dependency: Hashable
) -> Callable[[Dict[Hashable, Any]], Dict[Hashable, Any]]:
"""Validate that all dependencies exist for key."""
def validator(value: Dict[Hashable, Any]) -> Dict[Hashable, Any]:
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid("key dependencies require a dict")
if key in value and dependency not in value:
raise vol.Invalid(
f'dependency violation - key "{key}" requires '
f'key "{dependency}" to exist'
)
return value
return validator
def custom_serializer(schema: Any) -> Any:
"""Serialize additional types for voluptuous_serialize."""
if schema is positive_time_period_dict:
return {"type": "positive_time_period_dict"}
if schema is string:
return {"type": "string"}
if schema is boolean:
return {"type": "boolean"}
if isinstance(schema, multi_select):
return {"type": "multi_select", "options": schema.options}
return voluptuous_serialize.UNSUPPORTED
# Schemas
PLATFORM_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): string,
vol.Optional(CONF_ENTITY_NAMESPACE): string,
vol.Optional(CONF_SCAN_INTERVAL): time_period,
}
)
PLATFORM_SCHEMA_BASE = PLATFORM_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA)
ENTITY_SERVICE_FIELDS = (ATTR_ENTITY_ID, ATTR_AREA_ID)
def make_entity_service_schema(
schema: dict, *, extra: int = vol.PREVENT_EXTRA
) -> vol.All:
"""Create an entity service schema."""
return vol.All(
vol.Schema(
{
**schema,
vol.Optional(ATTR_ENTITY_ID): comp_entity_ids,
vol.Optional(ATTR_AREA_ID): vol.Any(
ENTITY_MATCH_NONE, vol.All(ensure_list, [str])
),
},
extra=extra,
),
has_at_least_one_key(*ENTITY_SERVICE_FIELDS),
)
SCRIPT_VARIABLES_SCHEMA = vol.All(
vol.Schema({str: template_complex}),
# pylint: disable=unnecessary-lambda
lambda val: script_variables_helper.ScriptVariables(val),
)
def script_action(value: Any) -> dict:
"""Validate a script action."""
if not isinstance(value, dict):
raise vol.Invalid("expected dictionary")
return ACTION_TYPE_SCHEMAS[determine_script_action(value)](value)
SCRIPT_SCHEMA = vol.All(ensure_list, [script_action])
EVENT_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ALIAS): string,
vol.Required(CONF_EVENT): string,
vol.Optional(CONF_EVENT_DATA): vol.All(dict, template_complex),
vol.Optional(CONF_EVENT_DATA_TEMPLATE): vol.All(dict, template_complex),
}
)
SERVICE_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_ALIAS): string,
vol.Exclusive(CONF_SERVICE, "service name"): vol.Any(
service, dynamic_template
),
vol.Exclusive(CONF_SERVICE_TEMPLATE, "service name"): vol.Any(
service, dynamic_template
),
vol.Optional("data"): vol.All(dict, template_complex),
vol.Optional("data_template"): vol.All(dict, template_complex),
vol.Optional(CONF_ENTITY_ID): comp_entity_ids,
}
),
has_at_least_one_key(CONF_SERVICE, CONF_SERVICE_TEMPLATE),
)
NUMERIC_STATE_CONDITION_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_CONDITION): "numeric_state",
vol.Required(CONF_ENTITY_ID): entity_ids,
vol.Optional(CONF_ATTRIBUTE): str,
CONF_BELOW: vol.Any(
vol.Coerce(float), vol.All(str, entity_domain("input_number"))
),
CONF_ABOVE: vol.Any(
vol.Coerce(float), vol.All(str, entity_domain("input_number"))
),
vol.Optional(CONF_VALUE_TEMPLATE): template,
}
),
has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
STATE_CONDITION_BASE_SCHEMA = {
vol.Required(CONF_CONDITION): "state",
vol.Required(CONF_ENTITY_ID): entity_ids,
vol.Optional(CONF_ATTRIBUTE): str,
vol.Optional(CONF_FOR): positive_time_period,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional("from"): str,
}
STATE_CONDITION_STATE_SCHEMA = vol.Schema(
{
**STATE_CONDITION_BASE_SCHEMA,
vol.Required(CONF_STATE): vol.Any(str, [str]),
}
)
STATE_CONDITION_ATTRIBUTE_SCHEMA = vol.Schema(
{
**STATE_CONDITION_BASE_SCHEMA,
vol.Required(CONF_STATE): match_all,
}
)
def STATE_CONDITION_SCHEMA(value: Any) -> dict: # pylint: disable=invalid-name
"""Validate a state condition."""
if not isinstance(value, dict):
raise vol.Invalid("Expected a dictionary")
if CONF_ATTRIBUTE in value:
validated: dict = STATE_CONDITION_ATTRIBUTE_SCHEMA(value)
else:
validated = STATE_CONDITION_STATE_SCHEMA(value)
return key_dependency("for", "state")(validated)
SUN_CONDITION_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_CONDITION): "sun",
vol.Optional("before"): sun_event,
vol.Optional("before_offset"): time_period,
vol.Optional("after"): vol.All(
vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE)
),
vol.Optional("after_offset"): time_period,
}
),
has_at_least_one_key("before", "after"),
)
TEMPLATE_CONDITION_SCHEMA = vol.Schema(
{
vol.Required(CONF_CONDITION): "template",
vol.Required(CONF_VALUE_TEMPLATE): template,
}
)
TIME_CONDITION_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_CONDITION): "time",
"before": vol.Any(time, vol.All(str, entity_domain("input_datetime"))),
"after": vol.Any(time, vol.All(str, entity_domain("input_datetime"))),
"weekday": weekdays,
}
),
has_at_least_one_key("before", "after", "weekday"),
)
ZONE_CONDITION_SCHEMA = vol.Schema(
{
vol.Required(CONF_CONDITION): "zone",
vol.Required(CONF_ENTITY_ID): entity_ids,
"zone": entity_ids,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional("event"): vol.Any("enter", "leave"),
}
)
AND_CONDITION_SCHEMA = vol.Schema(
{
vol.Required(CONF_CONDITION): "and",
vol.Required(CONF_CONDITIONS): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
),
}
)
OR_CONDITION_SCHEMA = vol.Schema(
{
vol.Required(CONF_CONDITION): "or",
vol.Required(CONF_CONDITIONS): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
),
}
)
NOT_CONDITION_SCHEMA = vol.Schema(
{
vol.Required(CONF_CONDITION): "not",
vol.Required(CONF_CONDITIONS): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
),
}
)
DEVICE_CONDITION_BASE_SCHEMA = vol.Schema(
{
vol.Required(CONF_CONDITION): "device",
vol.Required(CONF_DEVICE_ID): str,
vol.Required(CONF_DOMAIN): str,
}
)
DEVICE_CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA)
CONDITION_SCHEMA: vol.Schema = vol.Schema(
vol.Any(
key_value_schemas(
CONF_CONDITION,
{
"numeric_state": NUMERIC_STATE_CONDITION_SCHEMA,
"state": STATE_CONDITION_SCHEMA,
"sun": SUN_CONDITION_SCHEMA,
"template": TEMPLATE_CONDITION_SCHEMA,
"time": TIME_CONDITION_SCHEMA,
"zone": ZONE_CONDITION_SCHEMA,
"and": AND_CONDITION_SCHEMA,
"or": OR_CONDITION_SCHEMA,
"not": NOT_CONDITION_SCHEMA,
"device": DEVICE_CONDITION_SCHEMA,
},
),
dynamic_template,
)
)
TRIGGER_SCHEMA = vol.All(
ensure_list, [vol.Schema({vol.Required(CONF_PLATFORM): str}, extra=vol.ALLOW_EXTRA)]
)
_SCRIPT_DELAY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ALIAS): string,
vol.Required(CONF_DELAY): positive_time_period_template,
}
)
_SCRIPT_WAIT_TEMPLATE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ALIAS): string,
vol.Required(CONF_WAIT_TEMPLATE): template,
vol.Optional(CONF_TIMEOUT): positive_time_period_template,
vol.Optional(CONF_CONTINUE_ON_TIMEOUT): boolean,
}
)
DEVICE_ACTION_BASE_SCHEMA = vol.Schema(
{vol.Required(CONF_DEVICE_ID): string, vol.Required(CONF_DOMAIN): str}
)
DEVICE_ACTION_SCHEMA = DEVICE_ACTION_BASE_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA)
_SCRIPT_SCENE_SCHEMA = vol.Schema({vol.Required(CONF_SCENE): entity_domain("scene")})
_SCRIPT_REPEAT_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ALIAS): string,
vol.Required(CONF_REPEAT): vol.All(
{
vol.Exclusive(CONF_COUNT, "repeat"): vol.Any(vol.Coerce(int), template),
vol.Exclusive(CONF_WHILE, "repeat"): vol.All(
ensure_list, [CONDITION_SCHEMA]
),
vol.Exclusive(CONF_UNTIL, "repeat"): vol.All(
ensure_list, [CONDITION_SCHEMA]
),
vol.Required(CONF_SEQUENCE): SCRIPT_SCHEMA,
},
has_at_least_one_key(CONF_COUNT, CONF_WHILE, CONF_UNTIL),
),
}
)
_SCRIPT_CHOOSE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ALIAS): string,
vol.Required(CONF_CHOOSE): vol.All(
ensure_list,
[
{
vol.Required(CONF_CONDITIONS): vol.All(
ensure_list, [CONDITION_SCHEMA]
),
vol.Required(CONF_SEQUENCE): SCRIPT_SCHEMA,
}
],
),
vol.Optional(CONF_DEFAULT): SCRIPT_SCHEMA,
}
)
_SCRIPT_WAIT_FOR_TRIGGER_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ALIAS): string,
vol.Required(CONF_WAIT_FOR_TRIGGER): TRIGGER_SCHEMA,
vol.Optional(CONF_TIMEOUT): positive_time_period_template,
vol.Optional(CONF_CONTINUE_ON_TIMEOUT): boolean,
}
)
_SCRIPT_SET_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ALIAS): string,
vol.Required(CONF_VARIABLES): SCRIPT_VARIABLES_SCHEMA,
}
)
SCRIPT_ACTION_DELAY = "delay"
SCRIPT_ACTION_WAIT_TEMPLATE = "wait_template"
SCRIPT_ACTION_CHECK_CONDITION = "condition"
SCRIPT_ACTION_FIRE_EVENT = "event"
SCRIPT_ACTION_CALL_SERVICE = "call_service"
SCRIPT_ACTION_DEVICE_AUTOMATION = "device"
SCRIPT_ACTION_ACTIVATE_SCENE = "scene"
SCRIPT_ACTION_REPEAT = "repeat"
SCRIPT_ACTION_CHOOSE = "choose"
SCRIPT_ACTION_WAIT_FOR_TRIGGER = "wait_for_trigger"
SCRIPT_ACTION_VARIABLES = "variables"
def determine_script_action(action: dict) -> str:
"""Determine action type."""
if CONF_DELAY in action:
return SCRIPT_ACTION_DELAY
if CONF_WAIT_TEMPLATE in action:
return SCRIPT_ACTION_WAIT_TEMPLATE
if CONF_CONDITION in action:
return SCRIPT_ACTION_CHECK_CONDITION
if CONF_EVENT in action:
return SCRIPT_ACTION_FIRE_EVENT
if CONF_DEVICE_ID in action:
return SCRIPT_ACTION_DEVICE_AUTOMATION
if CONF_SCENE in action:
return SCRIPT_ACTION_ACTIVATE_SCENE
if CONF_REPEAT in action:
return SCRIPT_ACTION_REPEAT
if CONF_CHOOSE in action:
return SCRIPT_ACTION_CHOOSE
if CONF_WAIT_FOR_TRIGGER in action:
return SCRIPT_ACTION_WAIT_FOR_TRIGGER
if CONF_VARIABLES in action:
return SCRIPT_ACTION_VARIABLES
return SCRIPT_ACTION_CALL_SERVICE
ACTION_TYPE_SCHEMAS: Dict[str, Callable[[Any], dict]] = {
SCRIPT_ACTION_CALL_SERVICE: SERVICE_SCHEMA,
SCRIPT_ACTION_DELAY: _SCRIPT_DELAY_SCHEMA,
SCRIPT_ACTION_WAIT_TEMPLATE: _SCRIPT_WAIT_TEMPLATE_SCHEMA,
SCRIPT_ACTION_FIRE_EVENT: EVENT_SCHEMA,
SCRIPT_ACTION_CHECK_CONDITION: CONDITION_SCHEMA,
SCRIPT_ACTION_DEVICE_AUTOMATION: DEVICE_ACTION_SCHEMA,
SCRIPT_ACTION_ACTIVATE_SCENE: _SCRIPT_SCENE_SCHEMA,
SCRIPT_ACTION_REPEAT: _SCRIPT_REPEAT_SCHEMA,
SCRIPT_ACTION_CHOOSE: _SCRIPT_CHOOSE_SCHEMA,
SCRIPT_ACTION_WAIT_FOR_TRIGGER: _SCRIPT_WAIT_FOR_TRIGGER_SCHEMA,
SCRIPT_ACTION_VARIABLES: _SCRIPT_SET_SCHEMA,
}
|
|
r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1() # doctest: +SKIP
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4() # doctest: +SKIP
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
import os
__author__ = 'Ka-Ping Yee <[email protected]>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
int_ = int # The built-in int type
bytes_ = bytes # The built-in bytes type
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('one of the hex, bytes, bytes_le, fields, '
'or int arguments must be given')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = int_(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[4-1::-1] + bytes_le[6-1:4-1:-1] +
bytes_le[8-1:6-1:-1] + bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
assert isinstance(bytes, bytes_), repr(bytes)
int = int_.from_bytes(bytes, byteorder='big')
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8) | clock_seq_low
int = ((time_low << 96) | (time_mid << 80) |
(time_hi_version << 64) | (clock_seq << 48) | node)
if int is not None:
if not 0 <= int < 1<<128:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48)
int |= 0x8000 << 48
# Set the version number.
int &= ~(0xf000 << 64)
int |= version << 76
self.__dict__['int'] = int
def __eq__(self, other):
if isinstance(other, UUID):
return self.int == other.int
return NotImplemented
# Q. What's the value of being able to sort UUIDs?
# A. Use them as keys in a B-Tree or similar mapping.
def __lt__(self, other):
if isinstance(other, UUID):
return self.int < other.int
return NotImplemented
def __gt__(self, other):
if isinstance(other, UUID):
return self.int > other.int
return NotImplemented
def __le__(self, other):
if isinstance(other, UUID):
return self.int <= other.int
return NotImplemented
def __ge__(self, other):
if isinstance(other, UUID):
return self.int >= other.int
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
@property
def bytes(self):
return self.int.to_bytes(16, 'big')
@property
def bytes_le(self):
bytes = self.bytes
return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] +
bytes[8:])
@property
def fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
@property
def time_low(self):
return self.int >> 96
@property
def time_mid(self):
return (self.int >> 80) & 0xffff
@property
def time_hi_version(self):
return (self.int >> 64) & 0xffff
@property
def clock_seq_hi_variant(self):
return (self.int >> 56) & 0xff
@property
def clock_seq_low(self):
return (self.int >> 48) & 0xff
@property
def time(self):
return (((self.time_hi_version & 0x0fff) << 48) |
(self.time_mid << 32) | self.time_low)
@property
def clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3f) << 8) |
self.clock_seq_low)
@property
def node(self):
return self.int & 0xffffffffffff
@property
def hex(self):
return '%032x' % self.int
@property
def urn(self):
return 'urn:uuid:' + str(self)
@property
def variant(self):
if not self.int & (0x8000 << 48):
return RESERVED_NCS
elif not self.int & (0x4000 << 48):
return RFC_4122
elif not self.int & (0x2000 << 48):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
@property
def version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76) & 0xf)
def _popen(command, *args):
import os, shutil, subprocess
executable = shutil.which(command)
if executable is None:
path = os.pathsep.join(('/sbin', '/usr/sbin'))
executable = shutil.which(command, path=path)
if executable is None:
return None
# LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output
# on stderr (Note: we don't have an example where the words we search
# for are actually localized, but in theory some system could do so.)
env = dict(os.environ)
env['LC_ALL'] = 'C'
proc = subprocess.Popen((executable,) + args,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
return proc
def _find_mac(command, args, hw_identifiers, get_index):
try:
proc = _popen(command, *args.split())
if not proc:
return
with proc:
for line in proc.stdout:
words = line.lower().rstrip().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
try:
word = words[get_index(i)]
mac = int(word.replace(b':', b''), 16)
if mac:
return mac
except (ValueError, IndexError):
# Virtual interfaces, such as those provided by
# VPNs, do not have a colon-delimited MAC address
# as expected, but a 16-byte HWAddr separated by
# dashes. These should be ignored in favor of a
# real MAC address
pass
except OSError:
pass
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, [b'hwaddr', b'ether'], lambda i: i+1)
if mac:
return mac
def _ip_getnode():
"""Get the hardware address on Unix by running ip."""
# This works on Linux with iproute2.
mac = _find_mac('ip', 'link list', [b'link/ether'], lambda i: i+1)
if mac:
return mac
def _arp_getnode():
"""Get the hardware address on Unix by running arp."""
import os, socket
try:
ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
return None
# Try getting the MAC addr from arp based on our IP address (Solaris).
return _find_mac('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1)
def _lanscan_getnode():
"""Get the hardware address on Unix by running lanscan."""
# This might work on HP-UX.
return _find_mac('lanscan', '-ai', [b'lan0'], lambda i: 0)
def _netstat_getnode():
"""Get the hardware address on Unix by running netstat."""
# This might work on AIX, Tru64 UNIX and presumably on IRIX.
try:
proc = _popen('netstat', '-ia')
if not proc:
return
with proc:
words = proc.stdout.readline().rstrip().split()
try:
i = words.index(b'Address')
except ValueError:
return
for line in proc.stdout:
try:
words = line.rstrip().split()
word = words[i]
if len(word) == 17 and word.count(b':') == 5:
mac = int(word.replace(b':', b''), 16)
if mac:
return mac
except (ValueError, IndexError):
pass
except OSError:
pass
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except OSError:
continue
with pipe:
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = status.adapter_address[:6]
if len(bytes) != 6:
continue
return int.from_bytes(bytes, 'big')
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
# XXX This makes the module non-thread-safe!
_uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
import sys
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
_libnames = ['uuid']
if not sys.platform.startswith('win'):
_libnames.append('c')
for libname in _libnames:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except Exception:
continue
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
break
del _libnames
# The uuid_generate_* functions are broken on MacOS X 10.5, as noted
# in issue #8621 the function generates the same sequence of values
# in the parent process and all children created using fork (unless
# those children use exec as well).
#
# Assume that the uuid_generate functions are broken from 10.5 onward,
# the test can be adjusted when a later version is fixed.
if sys.platform == 'darwin':
if int(os.uname().release.split('.')[0]) >= 9:
_uuid_generate_time = None
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=bytes_(_buffer.raw)).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
_buffer = ctypes.create_string_buffer(16)
if _UuidCreate(_buffer) == 0:
return UUID(bytes=bytes_(_buffer.raw)).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.getrandbits(48) | 0x010000000000
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode, _ip_getnode,
_arp_getnode, _lanscan_getnode, _netstat_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=bytes_(_buffer.raw))
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000
if _last_timestamp is not None and timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.getrandbits(14) # instead of stable storage
time_low = timestamp & 0xffffffff
time_mid = (timestamp >> 32) & 0xffff
time_hi_version = (timestamp >> 48) & 0x0fff
clock_seq_low = clock_seq & 0xff
clock_seq_hi_variant = (clock_seq >> 8) & 0x3f
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + bytes(name, "utf-8")).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
return UUID(bytes=os.urandom(16), version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + bytes(name, "utf-8")).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
|
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for MongoDB, but the intent
is that this file can be used as an example to add on different backends.
To extend this to other systems, simply implement the exact same class and
replace the method definitions with API calls for the desired backend.
"""
import logging
import pymongo
from gridfs import GridFS
from mongo_connector import errors, constants
from mongo_connector.util import exception_wrapper
from mongo_connector.doc_managers.doc_manager_base import DocManagerBase
wrap_exceptions = exception_wrapper({
pymongo.errors.ConnectionFailure: errors.ConnectionFailed,
pymongo.errors.OperationFailure: errors.OperationFailed})
LOG = logging.getLogger(__name__)
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that
multiple updates to the same doc reflect the most up to date version as
opposed to multiple, slightly different versions of a doc.
We are using MongoDB native fields for _id and ns, but we also store
them as fields in the document, due to compatibility issues.
"""
def __init__(self, url, **kwargs):
""" Verify URL and establish a connection.
"""
try:
self.mongo = pymongo.MongoClient(
url, **kwargs.get('clientOptions', {}))
except pymongo.errors.InvalidURI:
raise errors.ConnectionFailed("Invalid URI for MongoDB")
except pymongo.errors.ConnectionFailure:
raise errors.ConnectionFailed("Failed to connect to MongoDB")
self.namespace_set = kwargs.get("namespace_set")
self.chunk_size = kwargs.get('chunk_size', constants.DEFAULT_MAX_BULK)
def _db_and_collection(self, namespace):
return namespace.split('.', 1)
@wrap_exceptions
def _namespaces(self):
"""Provides the list of namespaces being replicated to MongoDB
"""
if self.namespace_set:
return self.namespace_set
user_namespaces = []
db_list = self.mongo.database_names()
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = self.mongo[database].collection_names()
for coll in coll_list:
if coll.startswith("system"):
continue
namespace = "%s.%s" % (database, coll)
user_namespaces.append(namespace)
return user_namespaces
def stop(self):
"""Stops any running threads
"""
LOG.info(
"Mongo DocManager Stopped: If you will not target this system "
"again with mongo-connector then you may drop the database "
"__mongo_connector, which holds metadata for Mongo Connector."
)
@wrap_exceptions
def handle_command(self, doc, namespace, timestamp):
db, _ = self._db_and_collection(namespace)
if doc.get('dropDatabase'):
for new_db in self.command_helper.map_db(db):
self.mongo.drop_database(new_db)
if doc.get('renameCollection'):
a = self.command_helper.map_namespace(doc['renameCollection'])
b = self.command_helper.map_namespace(doc['to'])
if a and b:
self.mongo.admin.command(
"renameCollection", a, to=b)
if doc.get('create'):
new_db, coll = self.command_helper.map_collection(
db, doc['create'])
if new_db:
self.mongo[new_db].create_collection(coll)
if doc.get('drop'):
new_db, coll = self.command_helper.map_collection(
db, doc['drop'])
if new_db:
self.mongo[new_db].drop_collection(coll)
@wrap_exceptions
def update(self, document_id, update_spec, namespace, timestamp):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
db, coll = self._db_and_collection(namespace)
self.mongo["__mongo_connector"][namespace].save({
'_id': document_id,
"_ts": timestamp,
"ns": namespace
})
updated = self.mongo[db][coll].find_and_modify(
{'_id': document_id},
update_spec,
new=True
)
return updated
@wrap_exceptions
def upsert(self, doc, namespace, timestamp):
"""Update or insert a document into Mongo
"""
database, coll = self._db_and_collection(namespace)
self.mongo["__mongo_connector"][namespace].save({
'_id': doc['_id'],
"_ts": timestamp,
"ns": namespace
})
self.mongo[database][coll].save(doc)
@wrap_exceptions
def bulk_upsert(self, docs, namespace, timestamp):
def iterate_chunks():
dbname, collname = self._db_and_collection(namespace)
collection = self.mongo[dbname][collname]
meta_collection = self.mongo['__mongo_connector'][namespace]
more_chunks = True
while more_chunks:
bulk = collection.initialize_ordered_bulk_op()
bulk_meta = meta_collection.initialize_ordered_bulk_op()
for i in range(self.chunk_size):
try:
doc = next(docs)
selector = {'_id': doc['_id']}
bulk.find(selector).upsert().replace_one(doc)
bulk_meta.find(selector).upsert().replace_one({
'_id': doc['_id'],
'ns': namespace,
'_ts': timestamp
})
except StopIteration:
more_chunks = False
if i > 0:
yield bulk, bulk_meta
break
if more_chunks:
yield bulk, bulk_meta
for bulk_op, meta_bulk_op in iterate_chunks():
try:
bulk_op.execute()
meta_bulk_op.execute()
except pymongo.errors.DuplicateKeyError as e:
LOG.warn('Continuing after DuplicateKeyError: '
+ str(e))
@wrap_exceptions
def remove(self, document_id, namespace, timestamp):
"""Removes document from Mongo
The input is a python dictionary that represents a mongo document.
The documents has ns and _ts fields.
"""
database, coll = self._db_and_collection(namespace)
doc2 = self.mongo['__mongo_connector'][namespace].find_and_modify(
{'_id': document_id}, remove=True)
if (doc2 and doc2.get('gridfs_id')):
GridFS(self.mongo[database], coll).delete(doc2['gridfs_id'])
else:
self.mongo[database][coll].remove({'_id': document_id})
@wrap_exceptions
def insert_file(self, f, namespace, timestamp):
database, coll = self._db_and_collection(namespace)
id = GridFS(self.mongo[database], coll).put(f, filename=f.filename)
self.mongo["__mongo_connector"][namespace].save({
'_id': f._id,
'_ts': timestamp,
'ns': namespace,
'gridfs_id': id
})
@wrap_exceptions
def search(self, start_ts, end_ts):
"""Called to query Mongo for documents in a time range.
"""
for namespace in self._namespaces():
database, coll = self._db_and_collection(namespace)
for ts_ns_doc in self.mongo["__mongo_connector"][namespace].find(
{'_ts': {'$lte': end_ts,
'$gte': start_ts}}
):
yield ts_ns_doc
def commit(self):
""" Performs a commit
"""
return
@wrap_exceptions
def get_last_doc(self):
"""Returns the last document stored in Mongo.
"""
def docs_by_ts():
for namespace in self._namespaces():
database, coll = self._db_and_collection(namespace)
mc_coll = self.mongo["__mongo_connector"][namespace]
for ts_ns_doc in mc_coll.find(limit=1).sort('_ts', -1):
yield ts_ns_doc
return max(docs_by_ts(), key=lambda x: x["_ts"])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.