gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python
# @author Binu Jasim
# @created on 1 Mar 2017
import webapp2
from google.appengine.ext import ndb
from google.appengine.api import users
import logging
import datetime
import json
import os
import jinja2
from account import *
from model import *
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
try:
return (jinja_env.get_template(template)).render(params)
except:
# TODO - Be careful about blog/blog-error.html
return (jinja_env.get_template('site-error.html')).render()
def render(self, template, **html_add_ins):
self.write(self.render_str(template, **html_add_ins))
class WriteHandler(Handler):
def get(self):
# make sure the user is logged in
user = users.get_current_user()
if user:
user_id = user.user_id()
nickname = user.nickname()
email = user.email()
logout = users.create_logout_url('/')
# Proceed only if an approved user
if (is_approved_user(email)):
# register user if not already registered
status = Account.my_get_or_insert(user_id, nickname = nickname, email = email)
self.render('write.html',
user_name = user.nickname(),
logout_url = logout)
else:
self.write("Sorry! You don't have permission to write articles! Contact Admin")
else:
self.redirect(users.create_login_url(self.request.url))
def post(self):
# Add a new article
user = users.get_current_user()
if user is None:
# redirect doesn't work in ajax
self.redirect(users.create_login_url(self.request.url))
else:
user = users.get_current_user()
user_ent_key = ndb.Key(Account, user.user_id())
link = str(self.request.get('link'))
kind = str(self.request.get('kind'))
if not check_availability(link, kind):
# if not available
self.response.out.write(json.dumps({"result":False}))
else:
# if link available
t = datetime.date.today() # datetime.date(2017, 1, 10)
ndb_date = t#.replace(year = int(date[0:4]), month = int(date[5:7]), day = int(date[8:10]))
article = Article(parent=user_ent_key, dateCreated=ndb_date, lastEdited=ndb_date, kind=kind, link=link)
article_key = article.put()
article_id = article_key.id()
self.response.out.write(json.dumps({"result":True, "id":article_id}))
class WriteAjaxHandler(Handler):
# To retrieve the list of all articles
def get(self):
# make sure the user is logged in
user = users.get_current_user()
if user is None:
# Redirect actually doesn't work in ajax - still... leave it
self.redirect(users.create_login_url(self.request.url))
else:
user_id = user.user_id()
user_ent_key = ndb.Key(Account, user_id)
qry = Article.query(ancestor=user_ent_key).order(-Article.lastEdited)
qry_result = qry.fetch()
# The dates are has to be made JSON serializable
response_data = []
for entry in qry_result:
date = entry.lastEdited
last_date = ('0'+str(date.day) if date.day < 10 else str(date.day)) + '-' + ('0'+str(date.month) if date.month < 10 else str(date.month)) + '-' + str(date.year)
temp = {"id":entry.key.id(), "lastEdited":last_date, "title":entry.title, "description":entry.description, "link":entry.link, "kind":entry.kind}
response_data.append(temp)
self.response.out.write(json.dumps(response_data))
# True means available
def check_availability(link, kind):
qry = Article.query(ndb.AND(Article.link==link, Article.kind==kind))
qry_result = qry.fetch()
return False if qry_result else True
class WriteCheckAvailability(Handler):
# To check whether a link is available
def get(self):
# make sure the user is logged in
user = users.get_current_user()
if user is None:
# Redirect actually doesn't work in ajax - still... leave it
self.redirect(users.create_login_url(self.request.url))
else:
# user is not important. Check availability across all users
#user_id = user.user_id()
#user_ent_key = ndb.Key(Account, user_id)
link = str(self.request.get('link'))
kind = str(self.request.get('kind'))
self.response.out.write(json.dumps({"result": check_availability(link, kind)}))
# To render the writedown page where we write markdown
class WriteDownHandler(Handler):
# To render page with db query to the content
def get(self):
id_article = str(self.request.get('id'))
# if provided an id, make sure user is logged in
# otherwise just render the page without title, desc and save options
user = users.get_current_user()
if user is None and id_article:
# Redirect actually doesn't work in ajax - still... leave it
self.redirect(users.create_login_url(self.request.url))
else:
# if no id is provided, just return a bare writedown for trials
if not id_article:
self.render('writedown.html')
else:
user_id = user.user_id()
logout = users.create_logout_url('/')
# It's weird that user_id is string but id_article is int
article_key = ndb.Key('Account', user_id, 'Article', int(id_article))
article = article_key.get()
if article is None:
self.render('blog-error.html', message="Sorry! The article doesn't exist")
else:
self.render('writedown.html', user_name = user.nickname(), content=article.content, title=article.title, description=article.description, logout_url=logout)
# To save an article
def post(self):
# Save an article
user = users.get_current_user()
if user is None:
# redirect doesn't work in ajax
self.response.out.write(json.dumps({"result":False}))
else:
# logging.error('\n***************************\nYes. Reached the Backend\n****************************\n')
user = users.get_current_user()
user_id = user.user_id()
article_id = int(self.request.get('id'))
title = self.request.get('title').encode('utf-8')
description = self.request.get('description').encode('utf-8')
content = self.request.get('content').encode('utf-8')
# logging.error(content)
t = datetime.date.today() # datetime.date(2017, 1, 10)
ndb_date = t#.replace(year = int(date[0:4]), month = int(date[5:7]), day = int(date[8:10]))
# Create the key of our article with the retreived id
article_key = ndb.Key('Account', user_id, 'Article', article_id)
article = article_key.get()
# update the article
if (article):
article.title = title
article.description = description
article.content = content
article.lastEdited = ndb_date
# save the changes
article.put()
self.response.out.write(json.dumps({"result":True}))
else:
# Failed because provided id doesn't exist
self.response.out.write(json.dumps({"result":False}))
|
|
# -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa
from urlparse import urlparse
from framework.auth.core import Auth
from osf.models import NodeLog
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
)
from tests.utils import assert_logs
node_url_for = lambda n_id: '/{}nodes/{}/'.format(API_BASE, n_id)
class TestNodeLinkDetail(ApiTestCase):
def setUp(self):
super(TestNodeLinkDetail, self).setUp()
self.user = AuthUserFactory()
self.private_project = ProjectFactory(creator=self.user, is_public=False)
self.pointer_project = ProjectFactory(creator=self.user, is_public=False)
self.pointer = self.private_project.add_pointer(self.pointer_project, auth=Auth(self.user), save=True)
self.private_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.private_project._id, self.pointer._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(creator=self.user, is_public=True)
self.public_pointer_project = ProjectFactory(is_public=True)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.public_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, self.public_pointer._id)
def test_returns_embedded_public_node_pointer_detail_logged_out(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_returns_public_node_pointer_detail_logged_in(self):
res = self.app.get(self.public_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_returns_private_node_pointer_detail_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 200)
target_node = res.json['data']['embeds']['target_node']
assert_in('errors', target_node)
assert_equal(target_node['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_returns_private_node_pointer_detail_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.pointer_project._id)
def test_returns_private_node_pointer_detail_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 200)
target_node = res.json['data']['embeds']['target_node']
assert_in('errors', target_node)
assert_equal(target_node['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_self_link_points_to_node_link_detail_url(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
url = res.json['data']['links']['self']
assert_in(self.public_url, url)
def test_node_links_bad_version(self):
url = '{}?version=2.1'.format(self.public_url)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'This feature is deprecated as of version 2.1')
class TestDeleteNodeLink(ApiTestCase):
def setUp(self):
super(TestDeleteNodeLink, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.pointer_project = ProjectFactory(creator=self.user, is_public=True)
self.pointer = self.project.add_pointer(self.pointer_project, auth=Auth(self.user), save=True)
self.private_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.project._id, self.pointer._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.public_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, self.public_pointer._id)
def test_delete_node_link_no_permissions_for_target_node(self):
pointer_project = ProjectFactory(creator=self.user_two, is_public=False)
pointer = self.public_project.add_pointer(pointer_project, auth=Auth(self.user), save=True)
assert_in(pointer.child, self.public_project.nodes)
url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, pointer._id)
res = self.app.delete_json_api(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 204)
self.public_project.reload()
assert_not_in(pointer, self.public_project.nodes)
def test_cannot_delete_if_registration(self):
registration = RegistrationFactory(project=self.public_project)
url = '/{}registrations/{}/node_links/'.format(
API_BASE,
registration._id,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
pointer_id = res.json['data'][0]['id']
url = '/{}registrations/{}/node_links/{}/'.format(
API_BASE,
registration._id,
pointer_id,
)
res = self.app.delete(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 405)
def test_deletes_public_node_pointer_logged_out(self):
res = self.app.delete(self.public_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0].keys())
def test_deletes_public_node_pointer_fails_if_bad_auth(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete(self.public_url, auth=self.user_two.auth, expect_errors=True)
# This is could arguably be a 405, but we don't need to go crazy with status codes
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
self.public_project.reload()
assert_equal(node_count_before, len(self.public_project.nodes_pointer))
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
def test_deletes_public_node_pointer_succeeds_as_owner(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete(self.public_url, auth=self.user.auth)
self.public_project.reload()
assert_equal(res.status_code, 204)
assert_equal(node_count_before - 1, len(self.public_project.nodes_pointer))
def test_deletes_private_node_pointer_logged_out(self):
res = self.app.delete(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_deletes_private_node_pointer_logged_in_contributor(self):
res = self.app.delete(self.private_url, auth=self.user.auth)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
assert_equal(len(self.project.nodes_pointer), 0)
def test_deletes_private_node_pointer_logged_in_non_contributor(self):
res = self.app.delete(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
def test_return_deleted_public_node_pointer(self):
res = self.app.delete(self.public_url, auth=self.user.auth)
self.public_project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
#check that deleted pointer can not be returned
res = self.app.get(self.public_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_return_deleted_private_node_pointer(self):
res = self.app.delete(self.private_url, auth=self.user.auth)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
#check that deleted pointer can not be returned
res = self.app.get(self.private_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
# Regression test for https://openscience.atlassian.net/browse/OSF-4322
def test_delete_link_that_is_not_linked_to_correct_node(self):
project = ProjectFactory(creator=self.user)
# The node link belongs to a different project
res = self.app.delete(
'/{}nodes/{}/node_links/{}/'.format(API_BASE, project._id, self.public_pointer._id),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 404)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], 'Not found.')
|
|
# This Python module is part of the PyRate software package.
#
# Copyright 2022 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains bindings for the GDAL library
"""
# pylint: disable=too-many-arguments,R0914
from typing import Union, List, Tuple
from osgeo import gdal, gdalconst
from osgeo.gdal import Dataset
import numpy as np
import numexpr as ne
from pyrate.core import shared, ifgconstants as ifc
from pyrate.core.logger import pyratelogger as log
gdal.SetCacheMax(2**15)
GDAL_WARP_MEMORY_LIMIT = 2**10
LOW_FLOAT32 = np.finfo(np.float32).min*1e-10
all_mlooked_types = [ifc.MLOOKED_COH_MASKED_IFG, ifc.MULTILOOKED, ifc.MULTILOOKED_COH,
ifc.MLOOKED_DEM]
def coherence_masking(input_gdal_dataset: Dataset, coh_file_path: str,
coh_thr: float) -> None:
"""
Perform coherence masking on raster in-place.
Based on gdal_calc formula provided by Nahidul:
gdal_calc.py -A 20151127-20151209_VV_8rlks_flat_eqa.cc.tif
-B 20151127-20151209_VV_8rlks_eqa.unw.tif
--outfile=test_v1.tif --calc="B*(A>=0.8)-999*(A<0.8)"
--NoDataValue=-999
"""
coherence_ds = gdal.Open(coh_file_path, gdalconst.GA_ReadOnly)
coherence_band = coherence_ds.GetRasterBand(1)
src_band = input_gdal_dataset.GetRasterBand(1)
ndv = np.nan
coherence = coherence_band.ReadAsArray()
src = src_band.ReadAsArray()
var = {"coh": coherence, "src": src, "t": coh_thr, "ndv": ndv}
formula = "where(coh>=t, src, ndv)"
res = ne.evaluate(formula, local_dict=var)
src_band.WriteArray(res)
# update metadata
input_gdal_dataset.GetRasterBand(1).SetNoDataValue(ndv)
input_gdal_dataset.FlushCache() # write on the disc
log.info(f"Masking ifg using file {coh_file_path} and coherence threshold: {coh_thr}")
def world_to_pixel(geo_transform, x, y):
"""
Uses a gdal geomatrix (gdal.GetGeoTransform()) to calculate
the pixel location of a geospatial coordinate;
see: http://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html
:param list geo_transform: Affine transformation coefficients
:param float x: longitude coordinate
:param float y: latitude coordinate
:return: col: pixel column number
:rtype: int
:return: line: pixel line number
:rtype: int
"""
ul_x = geo_transform[0]
ul_y = geo_transform[3]
xres = geo_transform[1]
yres = geo_transform[5]
col = int(np.round((x - ul_x) / xres))
line = int(np.round((ul_y - y) / abs(yres))) # yres has negative size
return col, line
def resample_nearest_neighbour(input_tif, extents, new_res, output_file):
"""
Nearest neighbor resampling and cropping of an image.
:param str input_tif: input geotiff file path
:param list extents: new extents for cropping
:param list[float] new_res: new resolution for resampling
:param str output_file: output geotiff file path
:return: dst: resampled image
:rtype: ndarray
"""
dst, resampled_proj, src, _ = _crop_resample_setup(extents, input_tif,
new_res, output_file)
# Do the work
gdal.ReprojectImage(src, dst, '', resampled_proj,
gdalconst.GRA_NearestNeighbour)
return dst.ReadAsArray()
def _crop_resample_setup(extents, input_tif, new_res, output_file,
dst_driver_type='GTiff', out_bands=2):
"""
Convenience function for crop/resample setup
"""
# Source
src_ds = gdal.Open(input_tif, gdalconst.GA_ReadOnly)
src_proj = src_ds.GetProjection()
# source metadata to be copied into the output
meta_data = src_ds.GetMetadata()
# get the image extents
min_x, min_y, max_x, max_y = extents
geo_transform = src_ds.GetGeoTransform() # tuple of 6 numbers
# Create a new geotransform for the image
gt2 = list(geo_transform)
gt2[0] = min_x
gt2[3] = max_y
# We want a section of source that matches this:
resampled_proj = src_proj
if new_res[0]: # if new_res is not None, it can't be zero either
resampled_geotrans = gt2[:1] + [new_res[0]] + gt2[2:-1] + [new_res[1]]
else:
resampled_geotrans = gt2
px_height, px_width = _gdalwarp_width_and_height(max_x, max_y, min_x,
min_y, resampled_geotrans)
# Output / destination
dst = gdal.GetDriverByName(dst_driver_type).Create(
output_file, px_width, px_height, out_bands, gdalconst.GDT_Float32)
dst.SetGeoTransform(resampled_geotrans)
dst.SetProjection(resampled_proj)
for k, v in meta_data.items():
dst.SetMetadataItem(k, v)
return dst, resampled_proj, src_ds, src_proj
def _gdalwarp_width_and_height(max_x, max_y, min_x, min_y, geo_trans):
"""
Modify pixel height and width
"""
# modified image extents
ul_x, ul_y = world_to_pixel(geo_trans, min_x, max_y)
lr_x, lr_y = world_to_pixel(geo_trans, max_x, min_y)
# Calculate the pixel size of the new image
px_width = int(lr_x - ul_x)
px_height = int(lr_y - ul_y)
return px_height, px_width # this is the same as `gdalwarp`
def crop_resample_average(
input_tif, extents: Union[List, Tuple], new_res, output_file, thresh, hdr,
out_driver_type='GTiff', match_pyrate=False, coherence_path=None,
coherence_thresh=None):
"""
Crop, resample, and average a geotiff image.
:param str input_tif: Path to input geotiff to resample/crop
:param tuple extents: Cropping extents (xfirst, yfirst, xlast, ylast)
:param list new_res: [xres, yres] resolution of output image
:param str output_file: Path to output resampled/cropped geotiff
:param float thresh: NaN fraction threshold
:param str out_driver_type: The output driver; `MEM` or `GTiff` (optional)
:param bool match_pyrate: Match Legacy output (optional)
:param dict hdr: dictionary of metadata
:return: resampled_average: output cropped and resampled image
:rtype: ndarray
:return: out_ds: destination gdal dataset object
:rtype: gdal.Dataset
"""
dst_ds, _, _, _ = _crop_resample_setup(extents, input_tif, new_res, output_file,
out_bands=2, dst_driver_type='MEM')
# make a temporary copy of the dst_ds for PyRate style prepifg
if (match_pyrate and new_res[0]):
tmp_ds = gdal.GetDriverByName('MEM').CreateCopy('', dst_ds)
else:
tmp_ds = None
src_ds, src_ds_mem = _setup_source(input_tif)
if coherence_path and coherence_thresh:
coherence_masking(src_ds_mem, coherence_path, coherence_thresh)
elif coherence_path and not coherence_thresh:
raise ValueError("Coherence file provided without a coherence "
"threshold. Please ensure you provide 'cohthresh' "
"in your config if coherence masking is enabled.")
resampled_average, src_ds_mem = gdal_average(dst_ds, src_ds, src_ds_mem, thresh)
src_ds = None
src_dtype = src_ds_mem.GetRasterBand(1).DataType
src_gt = src_ds_mem.GetGeoTransform()
# required to match Legacy output
if tmp_ds:
_alignment(input_tif, new_res, resampled_average, src_ds_mem, src_gt, tmp_ds)
# grab metadata from existing geotiff
gt = dst_ds.GetGeoTransform()
wkt = dst_ds.GetProjection()
# insert metadata from the header
md = shared.collate_metadata(hdr)
# update metadata for output
# TODO: Metadata should be updated immediately as a prepifg/process step is applied
# move this into the respective steps
for k, v in md.items():
if k == ifc.DATA_TYPE:
# update data type metadata
if (v == ifc.ORIG) and (coherence_path is not None):
md.update({ifc.DATA_TYPE: ifc.MLOOKED_COH_MASKED_IFG})
elif (v == ifc.ORIG) and (coherence_path is None):
md.update({ifc.DATA_TYPE: ifc.MULTILOOKED})
elif v == ifc.COH:
md.update({ifc.DATA_TYPE: ifc.MULTILOOKED_COH})
elif v == ifc.DEM:
md.update({ifc.DATA_TYPE: ifc.MLOOKED_DEM})
else:
raise TypeError(f'Data Type metadata {v} not recognised')
_add_looks_and_crop_from_header(hdr, md, coherence_thresh)
# In-memory GDAL driver doesn't support compression so turn it off.
creation_opts = ['compress=packbits'] if out_driver_type != 'MEM' else []
out_ds = shared.gdal_dataset(output_file, dst_ds.RasterXSize, dst_ds.RasterYSize,
driver=out_driver_type, bands=1, dtype=src_dtype, metadata=md,
crs=wkt, geotransform=gt, creation_opts=creation_opts)
if out_driver_type != 'MEM':
log.info(f"Writing geotiff: {output_file}")
shared.write_geotiff(resampled_average, out_ds, np.nan)
else:
out_ds.GetRasterBand(1).WriteArray(resampled_average)
return resampled_average, out_ds
def _add_looks_and_crop_from_header(hdr, md, coh_thr):
"""
function to add prepfig options to geotiff metadata
"""
# insert prepifg mlook and crop params as metadata
if any(m in md.values() for m in all_mlooked_types):
if ifc.IFG_LKSX in hdr:
md[ifc.IFG_LKSX] = hdr[ifc.IFG_LKSX]
if ifc.IFG_LKSY in hdr:
md[ifc.IFG_LKSY] = hdr[ifc.IFG_LKSY]
if ifc.IFG_CROP in hdr:
md[ifc.IFG_CROP] = hdr[ifc.IFG_CROP]
# add coherence threshold to metadata, if used for masking ifgs
if md[ifc.DATA_TYPE] == ifc.MLOOKED_COH_MASKED_IFG:
md[ifc.COH_THRESH] = coh_thr
def _alignment(input_tif, new_res, resampled_average, src_ds_mem,
src_gt, tmp_ds):
"""
Correction step to match python multi-look/crop output to match that of
Legacy data. Modifies the resampled_average array in place.
"""
src_ds = gdal.Open(input_tif)
data = src_ds.GetRasterBand(1).ReadAsArray()
xlooks = ylooks = int(new_res[0] / src_gt[1])
xres, yres = _get_resampled_data_size(xlooks, ylooks, data)
nrows, ncols = resampled_average.shape
# Legacy nearest neighbor resampling for the last
# [yres:nrows, xres:ncols] cells without nan_conversion
# turn off nan-conversion
src_ds_mem.GetRasterBand(1).SetNoDataValue(LOW_FLOAT32)
# nearest neighbor resapling
gdal.ReprojectImage(src_ds_mem, tmp_ds, '', '', gdal.GRA_NearestNeighbour)
# only take the [yres:nrows, xres:ncols] slice
if nrows > yres or ncols > xres:
resampled_nearest_neighbor = tmp_ds.GetRasterBand(1).ReadAsArray()
resampled_average[yres - nrows:, xres - ncols:] = \
resampled_nearest_neighbor[yres - nrows:, xres - ncols:]
def gdal_average(dst_ds, src_ds, src_ds_mem, thresh):
"""
Perform subsampling of an image by averaging values
:param gdal.Dataset dst_ds: Destination gdal dataset object
:param str input_tif: Input geotif
:param float thresh: NaN fraction threshold
:return resampled_average: resampled image data
:rtype: ndarray
:return src_ds_mem: Modified in memory src_ds with nan_fraction in Band2. The nan_fraction
is computed efficiently here in gdal in the same step as the that of
the resampled average (band 1). This results is huge memory and
computational efficiency
:rtype: gdal.Dataset
"""
src_gt = src_ds.GetGeoTransform()
src_ds_mem.SetGeoTransform(src_gt)
data = src_ds_mem.GetRasterBand(1).ReadAsArray()
# update nan_matrix
# if data==nan, then 1, else 0
nan_matrix = np.isnan(data) # all nans due to phase data + coh masking if used
src_ds_mem.GetRasterBand(2).WriteArray(nan_matrix)
gdal.ReprojectImage(src_ds_mem, dst_ds, '', '', gdal.GRA_Average)
# dst_ds band2 average is our nan_fraction matrix
nan_frac = dst_ds.GetRasterBand(2).ReadAsArray()
resampled_average = dst_ds.GetRasterBand(1).ReadAsArray()
resampled_average[nan_frac >= thresh] = np.nan
return resampled_average, src_ds_mem
def _setup_source(input_tif):
"""convenience setup function for gdal_average"""
src_ds = gdal.Open(input_tif)
data = src_ds.GetRasterBand(1).ReadAsArray()
src_dtype = src_ds.GetRasterBand(1).DataType
mem_driver = gdal.GetDriverByName('MEM')
src_ds_mem = mem_driver.Create('', src_ds.RasterXSize, src_ds.RasterYSize, 2, src_dtype)
if isinstance(shared.dem_or_ifg(data_path=input_tif), shared.Ifg):
data[np.isclose(data, 0, atol=1e-6)] = np.nan # nan conversion of phase data
src_ds_mem.GetRasterBand(1).WriteArray(data)
src_ds_mem.GetRasterBand(1).SetNoDataValue(np.nan)
src_ds_mem.GetRasterBand(2).SetNoDataValue(np.nan)
src_ds_mem.SetGeoTransform(src_ds.GetGeoTransform())
return src_ds, src_ds_mem
def _get_resampled_data_size(xscale, yscale, data):
"""convenience function mimicking the Legacy output size"""
xscale = int(xscale)
yscale = int(yscale)
ysize, xsize = data.shape
xres, yres = int(xsize / xscale), int(ysize / yscale)
return xres, yres
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware ESX platform.
**Related Flags**
:vmwareapi_host_ip: IPAddress of VMware ESX server.
:vmwareapi_host_username: Username for connection to VMware ESX Server.
:vmwareapi_host_password: Password for connection to VMware ESX Server.
:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks
(default: 1.0).
:vmwareapi_api_retry_count: The API retry count in case of failure such as
network failures (socket errors etc.)
(default: 10).
"""
import time
from eventlet import event
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.virt import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi.vmops import VMWareVMOps
LOG = logging.getLogger("nova.virt.vmwareapi_conn")
FLAGS = flags.FLAGS
flags.DEFINE_string('vmwareapi_host_ip',
None,
'URL for connection to VMWare ESX host.'
'Required if connection_type is vmwareapi.')
flags.DEFINE_string('vmwareapi_host_username',
None,
'Username for connection to VMWare ESX host.'
'Used only if connection_type is vmwareapi.')
flags.DEFINE_string('vmwareapi_host_password',
None,
'Password for connection to VMWare ESX host.'
'Used only if connection_type is vmwareapi.')
flags.DEFINE_float('vmwareapi_task_poll_interval',
5.0,
'The interval used for polling of remote tasks '
'Used only if connection_type is vmwareapi')
flags.DEFINE_float('vmwareapi_api_retry_count',
10,
'The number of times we retry on failures, '
'e.g., socket error, etc.'
'Used only if connection_type is vmwareapi')
flags.DEFINE_string('vmwareapi_vlan_interface',
'vmnic0',
'Physical ethernet adapter name for vlan networking')
TIME_BETWEEN_API_CALL_RETRIES = 2.0
class Failure(Exception):
"""Base Exception class for handling task failures."""
def __init__(self, details):
self.details = details
def __str__(self):
return str(self.details)
def get_connection(_):
"""Sets up the ESX host connection."""
host_ip = FLAGS.vmwareapi_host_ip
host_username = FLAGS.vmwareapi_host_username
host_password = FLAGS.vmwareapi_host_password
api_retry_count = FLAGS.vmwareapi_api_retry_count
if not host_ip or host_username is None or host_password is None:
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
"connection_type=vmwareapi"))
return VMWareESXConnection(host_ip, host_username, host_password,
api_retry_count)
class VMWareESXConnection(driver.ComputeDriver):
"""The ESX host connection object."""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
super(VMWareESXConnection, self).__init__()
session = VMWareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
self._vmops = VMWareVMOps(session)
def init_host(self, host):
"""Do the initialization that needs to be done."""
# FIXME(sateesh): implement this
pass
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, network_info,
block_device_mapping=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, network_info)
def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, instance, network_info, cleanup=True):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info)
def pause(self, instance, callback):
"""Pause VM instance."""
self._vmops.pause(instance, callback)
def unpause(self, instance, callback):
"""Unpause paused VM instance."""
self._vmops.unpause(instance, callback)
def suspend(self, instance, callback):
"""Suspend the specified instance."""
self._vmops.suspend(instance, callback)
def resume(self, instance, callback):
"""Resume the suspended VM instance."""
self._vmops.resume(instance, callback)
def get_info(self, instance_id):
"""Return info about the VM instance."""
return self._vmops.get_info(instance_id)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_info(instance)
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_ajax_console(self, instance):
"""Return link to instance's ajax console."""
return self._vmops.get_ajax_console(instance)
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach volume storage to VM instance."""
pass
def detach_volume(self, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
pass
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
return {'address': FLAGS.vmwareapi_host_ip,
'username': FLAGS.vmwareapi_host_username,
'password': FLAGS.vmwareapi_host_password}
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
pass
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
def plug_vifs(self, instance, network_info):
"""Plugs in VIFs to networks."""
self._vmops.plug_vifs(instance, network_info)
class VMWareAPISession(object):
"""
Sets up a session with the ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
self._host_ip = host_ip
self._host_username = host_username
self._host_password = host_password
self.api_retry_count = api_retry_count
self._scheme = scheme
self._session_id = None
self.vim = None
self._create_session()
def _get_vim_object(self):
"""Create the VIM Object instance."""
return vim.Vim(protocol=self._scheme, host=self._host_ip)
def _create_session(self):
"""Creates a session with the ESX host."""
while True:
try:
# Login and setup the session with the ESX host for making
# API calls
self.vim = self._get_vim_object()
session = self.vim.Login(
self.vim.get_service_content().sessionManager,
userName=self._host_username,
password=self._host_password)
# Terminate the earlier session, if possible ( For the sake of
# preserving sessions as there is a limit to the number of
# sessions we can have )
if self._session_id:
try:
self.vim.TerminateSession(
self.vim.get_service_content().sessionManager,
sessionId=[self._session_id])
except Exception, excep:
# This exception is something we can live with. It is
# just an extra caution on our side. The session may
# have been cleared. We could have made a call to
# SessionIsActive, but that is an overhead because we
# anyway would have to call TerminateSession.
LOG.debug(excep)
self._session_id = session.key
return
except Exception, excep:
LOG.critical(_("In vmwareapi:_create_session, "
"got this exception: %s") % excep)
raise exception.Error(excep)
def __del__(self):
"""Logs-out the session."""
# Logout to avoid un-necessary increase in session count at the
# ESX host
try:
self.vim.Logout(self.vim.get_service_content().sessionManager)
except Exception, excep:
# It is just cautionary on our part to do a logout in del just
# to ensure that the session is not left active.
LOG.debug(excep)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""
Calls a method within the module specified with
args provided.
"""
args = list(args)
retry_count = 0
exc = None
last_fault_list = []
while True:
try:
if not self._is_vim_object(module):
# If it is not the first try, then get the latest
# vim object
if retry_count > 0:
args = args[1:]
args = [self.vim] + args
retry_count += 1
temp_module = module
for method_elem in method.split("."):
temp_module = getattr(temp_module, method_elem)
return temp_module(*args, **kwargs)
except error_util.VimFaultException, excep:
# If it is a Session Fault Exception, it may point
# to a session gone bad. So we try re-creating a session
# and then proceeding ahead with the call.
exc = excep
if error_util.FAULT_NOT_AUTHENTICATED in excep.fault_list:
# Because of the idle session returning an empty
# RetrievePropertiesResponse and also the same is returned
# when there is say empty answer to the query for
# VMs on the host ( as in no VMs on the host), we have no
# way to differentiate.
# So if the previous response was also am empty response
# and after creating a new session, we get the same empty
# response, then we are sure of the response being supposed
# to be empty.
if error_util.FAULT_NOT_AUTHENTICATED in last_fault_list:
return []
last_fault_list = excep.fault_list
self._create_session()
else:
# No re-trying for errors for API call has gone through
# and is the caller's fault. Caller should handle these
# errors. e.g, InvalidArgument fault.
break
except error_util.SessionOverLoadException, excep:
# For exceptions which may come because of session overload,
# we retry
exc = excep
except Exception, excep:
# If it is a proper exception, say not having furnished
# proper data in the SOAP call or the retry limit having
# exceeded, we raise the exception
exc = excep
break
# If retry count has been reached then break and
# raise the exception
if retry_count > self.api_retry_count:
break
time.sleep(TIME_BETWEEN_API_CALL_RETRIES)
LOG.critical(_("In vmwareapi:_call_method, "
"got this exception: %s") % exc)
raise
def _get_vim(self):
"""Gets the VIM object reference."""
if self.vim is None:
self._create_session()
return self.vim
def _wait_for_task(self, instance_id, task_ref):
"""
Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
done = event.Event()
loop = utils.LoopingCall(self._poll_task, instance_id, task_ref,
done)
loop.start(FLAGS.vmwareapi_task_poll_interval, now=True)
ret_val = done.wait()
loop.stop()
return ret_val
def _poll_task(self, instance_id, task_ref, done):
"""
Poll the given task, and fires the given Deferred if we
get a result.
"""
try:
task_info = self._call_method(vim_util, "get_dynamic_property",
task_ref, "Task", "info")
task_name = task_info.name
action = dict(
instance_id=int(instance_id),
action=task_name[0:255],
error=None)
if task_info.state in ['queued', 'running']:
return
elif task_info.state == 'success':
LOG.debug(_("Task [%(task_name)s] %(task_ref)s "
"status: success") % locals())
done.send("success")
else:
error_info = str(task_info.error.localizedMessage)
action["error"] = error_info
LOG.warn(_("Task [%(task_name)s] %(task_ref)s "
"status: error %(error_info)s") % locals())
done.send_exception(exception.Error(error_info))
db.instance_action_create(context.get_admin_context(), action)
except Exception, excep:
LOG.warn(_("In vmwareapi:_poll_task, Got this error %s") % excep)
done.send_exception(excep)
|
|
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
default_opts = [
cfg.StrOpt("scheduler_topic",
default="scheduler",
deprecated_for_removal=True,
deprecated_since="15.0.0",
deprecated_reason="""
There is no need to let users choose the RPC topic for all services - there
is little gain from this. Furthermore, it makes it really easy to break Nova
by using this option.
""",
help="""
Scheduler message queue topic.
This is the message queue topic that the scheduler 'listens' on. It is used
when the scheduler service is started up to configure the queue, and whenever
an RPC call to the scheduler is made. There is almost never any reason to ever
change this value.
Possible values:
* A valid AMQP topic name
"""),
]
scheduler_group = cfg.OptGroup(name="scheduler",
title="Scheduler configuration")
scheduler_opts = [
cfg.StrOpt("host_manager",
default="host_manager",
choices=("host_manager", "ironic_host_manager"),
deprecated_name="scheduler_host_manager",
deprecated_group="DEFAULT",
help="""
The scheduler host manager to use.
The host manager manages the in-memory picture of the hosts that the scheduler
uses. The options values are chosen from the entry points under the namespace
'nova.scheduler.host_manager' in 'setup.cfg'.
"""),
cfg.StrOpt("driver",
default="filter_scheduler",
choices=("filter_scheduler", "caching_scheduler",
"chance_scheduler", "fake_scheduler"),
deprecated_name="scheduler_driver",
deprecated_group="DEFAULT",
help="""
The class of the driver used by the scheduler.
The options are chosen from the entry points under the namespace
'nova.scheduler.driver' in 'setup.cfg'.
Possible values:
* A string, where the string corresponds to the class name of a scheduler
driver. There are a number of options available:
** 'caching_scheduler', which aggressively caches the system state for better
individual scheduler performance at the risk of more retries when running
multiple schedulers
** 'chance_scheduler', which simply picks a host at random
** 'fake_scheduler', which is used for testing
** A custom scheduler driver. In this case, you will be responsible for
creating and maintaining the entry point in your 'setup.cfg' file
"""),
cfg.IntOpt("periodic_task_interval",
default=60,
deprecated_name="scheduler_driver_task_period",
deprecated_group="DEFAULT",
help="""
Periodic task interval.
This value controls how often (in seconds) to run periodic tasks in the
scheduler. The specific tasks that are run for each period are determined by
the particular scheduler being used.
If this is larger than the nova-service 'service_down_time' setting, Nova may
report the scheduler service as down. This is because the scheduler driver is
responsible for sending a heartbeat and it will only do that as often as this
option allows. As each scheduler can work a little differently than the others,
be sure to test this with your selected scheduler.
Possible values:
* An integer, where the integer corresponds to periodic task interval in
seconds. 0 uses the default interval (60 seconds). A negative value disables
periodic tasks.
Related options:
* ``nova-service service_down_time``
"""),
cfg.IntOpt("max_attempts",
default=3,
min=1,
deprecated_name="scheduler_max_attempts",
deprecated_group="DEFAULT",
help="""
Maximum number of schedule attempts for a chosen host.
This is the maximum number of attempts that will be made to schedule an
instance before it is assumed that the failures aren't due to normal occasional
race conflicts, but rather some other problem. When this is reached a
MaxRetriesExceeded exception is raised, and the instance is set to an error
state.
Possible values:
* A positive integer, where the integer corresponds to the max number of
attempts that can be made when scheduling an instance.
""")]
filter_scheduler_group = cfg.OptGroup(name="filter_scheduler",
title="Filter scheduler options")
filter_scheduler_opts = [
cfg.IntOpt("host_subset_size",
default=1,
min=1,
deprecated_name="scheduler_host_subset_size",
deprecated_group="DEFAULT",
help="""
Size of subset of best hosts selected by scheduler.
New instances will be scheduled on a host chosen randomly from a subset of the
N best hosts, where N is the value set by this option.
Setting this to a value greater than 1 will reduce the chance that multiple
scheduler processes handling similar requests will select the same host,
creating a potential race condition. By selecting a host randomly from the N
hosts that best fit the request, the chance of a conflict is reduced. However,
the higher you set this value, the less optimal the chosen host may be for a
given request.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* An integer, where the integer corresponds to the size of a host subset. Any
integer is valid, although any value less than 1 will be treated as 1
"""),
cfg.IntOpt("max_io_ops_per_host",
default=8,
deprecated_group="DEFAULT",
help="""
The number of instances that can be actively performing IO on a host.
Instances performing IO includes those in the following states: build, resize,
snapshot, migrate, rescue, unshelve.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'io_ops_filter' filter is enabled.
Possible values:
* An integer, where the integer corresponds to the max number of instances
that can be actively performing IO on any given host.
"""),
# TODO(sfinucan): Add 'min' parameter
cfg.IntOpt("max_instances_per_host",
default=50,
deprecated_group="DEFAULT",
help="""
Maximum number of instances that be active on a host.
If you need to limit the number of instances on any given host, set this option
to the maximum number of instances you want to allow. The num_instances_filter
will reject any host that has at least as many instances as this option's
value.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'num_instances_filter' filter is enabled.
Possible values:
* An integer, where the integer corresponds to the max instances that can be
scheduled on a host.
"""),
cfg.BoolOpt("track_instance_changes",
default=True,
deprecated_name="scheduler_tracks_instance_changes",
deprecated_group="DEFAULT",
help="""
Enable querying of individual hosts for instance information.
The scheduler may need information about the instances on a host in order to
evaluate its filters and weighers. The most common need for this information is
for the (anti-)affinity filters, which need to choose a host based on the
instances already running on a host.
If the configured filters and weighers do not need this information, disabling
this option will improve performance. It may also be disabled when the tracking
overhead proves too heavy, although this will cause classes requiring host
usage data to query the database on each request instead.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
"""),
cfg.MultiStrOpt("available_filters",
default=["nova.scheduler.filters.all_filters"],
deprecated_name="scheduler_available_filters",
deprecated_group="DEFAULT",
help="""
Filters that the scheduler can use.
An unordered list of the filter classes the nova scheduler may apply. Only the
filters specified in the 'scheduler_enabled_filters' option will be used, but
any filter appearing in that option must also be included in this list.
By default, this is set to all filters that are included with nova.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more strings, where each string corresponds to the name of
a filter that may be used for selecting a host
Related options:
* scheduler_enabled_filters
"""),
cfg.ListOpt("enabled_filters",
default=[
"RetryFilter",
"AvailabilityZoneFilter",
"RamFilter",
"DiskFilter",
"ComputeFilter",
"ComputeCapabilitiesFilter",
"ImagePropertiesFilter",
"ServerGroupAntiAffinityFilter",
"ServerGroupAffinityFilter",
],
deprecated_name="scheduler_default_filters",
deprecated_group="DEFAULT",
help="""
Filters that the scheduler will use.
An ordered list of filter class names that will be used for filtering
hosts. Ignore the word 'default' in the name of this option: these filters will
*always* be applied, and they will be applied in the order they are listed so
place your most restrictive filters first to make the filtering process more
efficient.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more strings, where each string corresponds to the name of
a filter to be used for selecting a host
Related options:
* All of the filters in this option *must* be present in the
'scheduler_available_filters' option, or a SchedulerHostFilterNotFound
exception will be raised.
"""),
cfg.ListOpt("baremetal_enabled_filters",
default=[
"RetryFilter",
"AvailabilityZoneFilter",
"ComputeFilter",
"ComputeCapabilitiesFilter",
"ImagePropertiesFilter",
"ExactRamFilter",
"ExactDiskFilter",
"ExactCoreFilter",
],
deprecated_name="baremetal_scheduler_default_filters",
deprecated_group="DEFAULT",
help="""
Filters used for filtering baremetal hosts.
Filters are applied in order, so place your most restrictive filters first to
make the filtering process more efficient.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more strings, where each string corresponds to the name of
a filter to be used for selecting a baremetal host
Related options:
* If the 'scheduler_use_baremetal_filters' option is False, this option has
no effect.
"""),
cfg.BoolOpt("use_baremetal_filters",
deprecated_name="scheduler_use_baremetal_filters",
deprecated_group="DEFAULT",
default=False,
help="""
Enable baremetal filters.
Set this to True to tell the nova scheduler that it should use the filters
specified in the 'baremetal_scheduler_enabled_filters' option. If you are not
scheduling baremetal nodes, leave this at the default setting of False.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Related options:
* If this option is set to True, then the filters specified in the
'baremetal_scheduler_enabled_filters' are used instead of the filters
specified in 'scheduler_enabled_filters'.
"""),
cfg.ListOpt("weight_classes",
default=["nova.scheduler.weights.all_weighers"],
deprecated_name="scheduler_weight_classes",
deprecated_group="DEFAULT",
help="""
Weighers that the scheduler will use.
Only hosts which pass the filters are weighed. The weight for any host starts
at 0, and the weighers order these hosts by adding to or subtracting from the
weight assigned by the previous weigher. Weights may become negative. An
instance will be scheduled to one of the N most-weighted hosts, where N is
'scheduler_host_subset_size'.
By default, this is set to all weighers that are included with Nova.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more strings, where each string corresponds to the name of
a weigher that will be used for selecting a host
"""),
cfg.FloatOpt("ram_weight_multiplier",
default=1.0,
deprecated_group="DEFAULT",
help="""
Ram weight multipler ratio.
This option determines how hosts with more or less available RAM are weighed. A
positive value will result in the scheduler preferring hosts with more
available RAM, and a negative number will result in the scheduler preferring
hosts with less available RAM. Another way to look at it is that positive
values for this option will tend to spread instances across many hosts, while
negative values will tend to fill up (stack) hosts as much as possible before
scheduling to a less-used host. The absolute value, whether positive or
negative, controls how strong the RAM weigher is relative to other weighers.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'ram' weigher is enabled.
Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
"""),
cfg.FloatOpt("disk_weight_multiplier",
default=1.0,
deprecated_group="DEFAULT",
help="""
Disk weight multipler ratio.
Multiplier used for weighing free disk space. Negative numbers mean to
stack vs spread.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'ram' weigher is enabled.
Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
"""),
cfg.FloatOpt("io_ops_weight_multiplier",
default=-1.0,
deprecated_group="DEFAULT",
help="""
IO operations weight multipler ratio.
This option determines how hosts with differing workloads are weighed. Negative
values, such as the default, will result in the scheduler preferring hosts with
lighter workloads whereas positive values will prefer hosts with heavier
workloads. Another way to look at it is that positive values for this option
will tend to schedule instances onto hosts that are already busy, while
negative values will tend to distribute the workload across more hosts. The
absolute value, whether positive or negative, controls how strong the io_ops
weigher is relative to other weighers.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'io_ops' weigher is enabled.
Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
"""),
cfg.FloatOpt("soft_affinity_weight_multiplier",
default=1.0,
deprecated_group="DEFAULT",
help="""
Multiplier used for weighing hosts for group soft-affinity.
Possible values:
* An integer or float value, where the value corresponds to weight multiplier
for hosts with group soft affinity. Only a positive value are meaningful, as
negative values would make this behave as a soft anti-affinity weigher.
"""),
cfg.FloatOpt(
"soft_anti_affinity_weight_multiplier",
default=1.0,
deprecated_group="DEFAULT",
help="""
Multiplier used for weighing hosts for group soft-anti-affinity.
Possible values:
* An integer or float value, where the value corresponds to weight multiplier
for hosts with group soft anti-affinity. Only a positive value are
meaningful, as negative values would make this behave as a soft affinity
weigher.
"""),
# TODO(mikal): replace this option with something involving host aggregates
cfg.ListOpt("isolated_images",
default=[],
deprecated_group="DEFAULT",
help="""
List of UUIDs for images that can only be run on certain hosts.
If there is a need to restrict some images to only run on certain designated
hosts, list those image UUIDs here.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
Possible values:
* A list of UUID strings, where each string corresponds to the UUID of an
image
Related options:
* scheduler/isolated_hosts
* scheduler/restrict_isolated_hosts_to_isolated_images
"""),
cfg.ListOpt("isolated_hosts",
default=[],
deprecated_group="DEFAULT",
help="""
List of hosts that can only run certain images.
If there is a need to restrict some images to only run on certain designated
hosts, list those host names here.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
Possible values:
* A list of strings, where each string corresponds to the name of a host
Related options:
* scheduler/isolated_images
* scheduler/restrict_isolated_hosts_to_isolated_images
"""),
cfg.BoolOpt(
"restrict_isolated_hosts_to_isolated_images",
default=True,
deprecated_group="DEFAULT",
help="""
Prevent non-isolated images from being built on isolated hosts.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'IsolatedHostsFilter' filter is enabled. Even
then, this option doesn't affect the behavior of requests for isolated images,
which will *always* be restricted to isolated hosts.
Related options:
* scheduler/isolated_images
* scheduler/isolated_hosts
"""),
cfg.StrOpt(
"aggregate_image_properties_isolation_namespace",
deprecated_group="DEFAULT",
help="""
Image property namespace for use in the host aggregate.
Images and hosts can be configured so that certain images can only be scheduled
to hosts in a particular aggregate. This is done with metadata values set on
the host aggregate that are identified by beginning with the value of this
option. If the host is part of an aggregate with such a metadata key, the image
in the request spec must have the value of that metadata in its properties in
order for the scheduler to consider the host as acceptable.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'aggregate_image_properties_isolation' filter is
enabled.
Possible values:
* A string, where the string corresponds to an image property namespace
Related options:
* aggregate_image_properties_isolation_separator
"""),
cfg.StrOpt(
"aggregate_image_properties_isolation_separator",
default=".",
deprecated_group="DEFAULT",
help="""
Separator character(s) for image property namespace and name.
When using the aggregate_image_properties_isolation filter, the relevant
metadata keys are prefixed with the namespace defined in the
aggregate_image_properties_isolation_namespace configuration option plus a
separator. This option defines the separator to be used.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'aggregate_image_properties_isolation' filter
is enabled.
Possible values:
* A string, where the string corresponds to an image property namespace
separator character
Related options:
* aggregate_image_properties_isolation_namespace
""")]
trust_group = cfg.OptGroup(name="trusted_computing",
title="Trust parameters",
help="""
Configuration options for enabling Trusted Platform Module.
""")
trusted_opts = [
cfg.StrOpt("attestation_server",
help="""
The host to use as the attestation server.
Cloud computing pools can involve thousands of compute nodes located at
different geographical locations, making it difficult for cloud providers to
identify a node's trustworthiness. When using the Trusted filter, users can
request that their VMs only be placed on nodes that have been verified by the
attestation server specified in this option.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Possible values:
* A string representing the host name or IP address of the attestation server,
or an empty string.
Related options:
* attestation_server_ca_file
* attestation_port
* attestation_api_url
* attestation_auth_blob
* attestation_auth_timeout
* attestation_insecure_ssl
"""),
cfg.StrOpt("attestation_server_ca_file",
help="""
The absolute path to the certificate to use for authentication when connecting
to the attestation server. See the `attestation_server` help text for more
information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Possible values:
* A string representing the path to the authentication certificate for the
attestation server, or an empty string.
Related options:
* attestation_server
* attestation_port
* attestation_api_url
* attestation_auth_blob
* attestation_auth_timeout
* attestation_insecure_ssl
"""),
cfg.PortOpt("attestation_port",
default=8443,
help="""
The port to use when connecting to the attestation server. See the
`attestation_server` help text for more information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Related options:
* attestation_server
* attestation_server_ca_file
* attestation_api_url
* attestation_auth_blob
* attestation_auth_timeout
* attestation_insecure_ssl
"""),
cfg.StrOpt("attestation_api_url",
default="/OpenAttestationWebServices/V1.0",
help="""
The URL on the attestation server to use. See the `attestation_server` help
text for more information about host verification.
This value must be just that path portion of the full URL, as it will be joined
to the host specified in the attestation_server option.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Possible values:
* A valid URL string of the attestation server, or an empty string.
Related options:
* attestation_server
* attestation_server_ca_file
* attestation_port
* attestation_auth_blob
* attestation_auth_timeout
* attestation_insecure_ssl
"""),
cfg.StrOpt("attestation_auth_blob",
secret=True,
help="""
Attestation servers require a specific blob that is used to authenticate. The
content and format of the blob are determined by the particular attestation
server being used. There is no default value; you must supply the value as
specified by your attestation service. See the `attestation_server` help text
for more information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Possible values:
* A string containing the specific blob required by the attestation server, or
an empty string.
Related options:
* attestation_server
* attestation_server_ca_file
* attestation_port
* attestation_api_url
* attestation_auth_timeout
* attestation_insecure_ssl
"""),
# TODO(stephenfin): Add min parameter
cfg.IntOpt("attestation_auth_timeout",
default=60,
help="""
This value controls how long a successful attestation is cached. Once this
period has elapsed, a new attestation request will be made. See the
`attestation_server` help text for more information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Possible values:
* A integer value, corresponding to the timeout interval for attestations in
seconds. Any integer is valid, although setting this to zero or negative
values can greatly impact performance when using an attestation service.
Related options:
* attestation_server
* attestation_server_ca_file
* attestation_port
* attestation_api_url
* attestation_auth_blob
* attestation_insecure_ssl
"""),
cfg.BoolOpt("attestation_insecure_ssl",
default=False,
help="""
When set to True, the SSL certificate verification is skipped for the
attestation service. See the `attestation_server` help text for more
information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Related options:
* attestation_server
* attestation_server_ca_file
* attestation_port
* attestation_api_url
* attestation_auth_blob
* attestation_auth_timeout
"""),
]
metrics_group = cfg.OptGroup(name="metrics",
title="Metrics parameters",
help="""
Configuration options for metrics
Options under this group allow to adjust how values assigned to metrics are
calculated.
""")
metrics_weight_opts = [
cfg.FloatOpt("weight_multiplier",
default=1.0,
help="""
When using metrics to weight the suitability of a host, you can use this option
to change how the calculated weight influences the weight assigned to a host as
follows:
* >1.0: increases the effect of the metric on overall weight
* 1.0: no change to the calculated weight
* >0.0,<1.0: reduces the effect of the metric on overall weight
* 0.0: the metric value is ignored, and the value of the
'weight_of_unavailable' option is returned instead
* >-1.0,<0.0: the effect is reduced and reversed
* -1.0: the effect is reversed
* <-1.0: the effect is increased proportionally and reversed
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
Related options:
* weight_of_unavailable
"""),
cfg.ListOpt("weight_setting",
default=[],
help="""
This setting specifies the metrics to be weighed and the relative ratios for
each metric. This should be a single string value, consisting of a series of
one or more 'name=ratio' pairs, separated by commas, where 'name' is the name
of the metric to be weighed, and 'ratio' is the relative weight for that
metric.
Note that if the ratio is set to 0, the metric value is ignored, and instead
the weight will be set to the value of the 'weight_of_unavailable' option.
As an example, let's consider the case where this option is set to:
``name1=1.0, name2=-1.3``
The final weight will be:
``(name1.value * 1.0) + (name2.value * -1.3)``
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more key/value pairs separated by commas, where the key is
a string representing the name of a metric and the value is a numeric weight
for that metric. If any value is set to 0, the value is ignored and the
weight will be set to the value of the 'weight_of_unavailable' option.
Related options:
* weight_of_unavailable
"""),
cfg.BoolOpt("required",
default=True,
help="""
This setting determines how any unavailable metrics are treated. If this option
is set to True, any hosts for which a metric is unavailable will raise an
exception, so it is recommended to also use the MetricFilter to filter out
those hosts before weighing.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* True or False, where False ensures any metric being unavailable for a host
will set the host weight to 'weight_of_unavailable'.
Related options:
* weight_of_unavailable
"""),
cfg.FloatOpt("weight_of_unavailable",
default=float(-10000.0),
help="""
When any of the following conditions are met, this value will be used in place
of any actual metric value:
* One of the metrics named in 'weight_setting' is not available for a host,
and the value of 'required' is False
* The ratio specified for a metric in 'weight_setting' is 0
* The 'weight_multiplier' option is set to 0
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
Related options:
* weight_setting
* required
* weight_multiplier
"""),
]
def register_opts(conf):
conf.register_opts(default_opts)
conf.register_group(scheduler_group)
conf.register_opts(scheduler_opts, group=scheduler_group)
conf.register_group(filter_scheduler_group)
conf.register_opts(filter_scheduler_opts, group=filter_scheduler_group)
conf.register_group(trust_group)
conf.register_opts(trusted_opts, group=trust_group)
conf.register_group(metrics_group)
conf.register_opts(metrics_weight_opts, group=metrics_group)
def list_opts():
return {scheduler_group: scheduler_opts,
filter_scheduler_group: filter_scheduler_opts,
trust_group: trusted_opts,
metrics_group: metrics_weight_opts}
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source httplib connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
import os
import time
import functools
import inspect
import itertools
from urllib import quote
from eventlet import spawn_n, GreenPile
from eventlet.queue import Queue, Empty, Full
from eventlet.timeout import Timeout
from swift.common.wsgi import make_pre_authed_env
from swift.common.utils import normalize_timestamp, config_true_value, \
public, split_path, list_from_csv, GreenthreadSafeIterator, \
quorum_size
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ChunkReadTimeout, ConnectionTimeout
from swift.common.http import is_informational, is_success, is_redirection, \
is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \
HTTP_INSUFFICIENT_STORAGE, HTTP_UNAUTHORIZED
from swift.common.swob import Request, Response, HeaderKeyDict
def update_headers(response, headers):
"""
Helper function to update headers in the response.
:param response: swob.Response object
:param headers: dictionary headers
"""
if hasattr(headers, 'items'):
headers = headers.items()
for name, value in headers:
if name == 'etag':
response.headers[name] = value.replace('"', '')
elif name not in ('date', 'content-length', 'content-type',
'connection', 'x-put-timestamp', 'x-delete-after'):
response.headers[name] = value
def source_key(resp):
"""
Provide the timestamp of the swift http response as a floating
point value. Used as a sort key.
:param resp: httplib response object
"""
return float(resp.getheader('x-put-timestamp') or
resp.getheader('x-timestamp') or 0)
def delay_denial(func):
"""
Decorator to declare which methods should have any swift.authorize call
delayed. This is so the method can load the Request object up with
additional information that may be needed by the authorization system.
:param func: function for which authorization will be delayed
"""
func.delay_denial = True
@functools.wraps(func)
def wrapped(*a, **kw):
return func(*a, **kw)
return wrapped
def get_account_memcache_key(account):
cache_key, env_key = _get_cache_key(account, None)
return cache_key
def get_container_memcache_key(account, container):
if not container:
raise ValueError("container not provided")
cache_key, env_key = _get_cache_key(account, container)
return cache_key
def headers_to_account_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of account info based on response headers.
"""
headers = dict((k.lower(), v) for k, v in dict(headers).iteritems())
return {
'status': status_int,
# 'container_count' anomaly:
# Previous code sometimes expects an int sometimes a string
# Current code aligns to str and None, yet translates to int in
# deprecated functions as needed
'container_count': headers.get('x-account-container-count'),
'total_object_count': headers.get('x-account-object-count'),
'bytes': headers.get('x-account-bytes-used'),
'meta': dict((key[15:], value)
for key, value in headers.iteritems()
if key.startswith('x-account-meta-'))
}
def headers_to_container_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of container info based on response headers.
"""
headers = dict((k.lower(), v) for k, v in dict(headers).iteritems())
return {
'status': status_int,
'read_acl': headers.get('x-container-read'),
'write_acl': headers.get('x-container-write'),
'sync_key': headers.get('x-container-sync-key'),
'object_count': headers.get('x-container-object-count'),
'bytes': headers.get('x-container-bytes-used'),
'versions': headers.get('x-versions-location'),
'cors': {
'allow_origin': headers.get(
'x-container-meta-access-control-allow-origin'),
'expose_headers': headers.get(
'x-container-meta-access-control-expose-headers'),
'max_age': headers.get(
'x-container-meta-access-control-max-age')
},
'meta': dict((key[17:], value)
for key, value in headers.iteritems()
if key.startswith('x-container-meta-'))
}
def cors_validation(func):
"""
Decorator to check if the request is a CORS request and if so, if it's
valid.
:param func: function to check
"""
@functools.wraps(func)
def wrapped(*a, **kw):
controller = a[0]
req = a[1]
# The logic here was interpreted from
# http://www.w3.org/TR/cors/#resource-requests
# Is this a CORS request?
req_origin = req.headers.get('Origin', None)
if req_origin:
# Yes, this is a CORS request so test if the origin is allowed
container_info = \
controller.container_info(controller.account_name,
controller.container_name, req)
cors_info = container_info.get('cors', {})
# Call through to the decorated method
resp = func(*a, **kw)
# Expose,
# - simple response headers,
# http://www.w3.org/TR/cors/#simple-response-header
# - swift specific: etag, x-timestamp, x-trans-id
# - user metadata headers
# - headers provided by the user in
# x-container-meta-access-control-expose-headers
expose_headers = ['cache-control', 'content-language',
'content-type', 'expires', 'last-modified',
'pragma', 'etag', 'x-timestamp', 'x-trans-id']
for header in resp.headers:
if header.startswith('X-Container-Meta') or \
header.startswith('X-Object-Meta'):
expose_headers.append(header.lower())
if cors_info.get('expose_headers'):
expose_headers.extend(
[header_line.strip()
for header_line in cors_info['expose_headers'].split(' ')
if header_line.strip()])
resp.headers['Access-Control-Expose-Headers'] = \
', '.join(expose_headers)
# The user agent won't process the response if the Allow-Origin
# header isn't included
resp.headers['Access-Control-Allow-Origin'] = req_origin
return resp
else:
# Not a CORS request so make the call as normal
return func(*a, **kw)
return wrapped
def get_container_info(env, app, swift_source=None):
"""
Get the info structure for a container, based on env and app.
This is useful to middlewares.
Note: This call bypasses auth. Success does not imply that the
request has authorization to the account.
"""
(version, account, container, _) = \
split_path(env['PATH_INFO'], 3, 4, True)
info = get_info(app, env, account, container, ret_not_found=True)
if not info:
info = headers_to_container_info({}, 0)
return info
def get_account_info(env, app, swift_source=None):
"""
Get the info structure for an account, based on env and app.
This is useful to middlewares.
Note: This call bypasses auth. Success does not imply that the
request has authorization to the container.
"""
(version, account, _junk, _junk) = \
split_path(env['PATH_INFO'], 2, 4, True)
info = get_info(app, env, account, ret_not_found=True)
if not info:
info = headers_to_account_info({}, 0)
if info.get('container_count') is None:
info['container_count'] = 0
else:
info['container_count'] = int(info['container_count'])
return info
def _get_cache_key(account, container):
"""
Get the keys for both memcache (cache_key) and env (env_key)
where info about accounts and containers is cached
:param account: The name of the account
:param container: The name of the container (or None if account)
:returns a tuple of (cache_key, env_key)
"""
if container:
cache_key = 'container/%s/%s' % (account, container)
else:
cache_key = 'account/%s' % account
# Use a unique environment cache key per account and one container.
# This allows caching both account and container and ensures that when we
# copy this env to form a new request, it won't accidentally reuse the
# old container or account info
env_key = 'swift.%s' % cache_key
return cache_key, env_key
def _set_info_cache(app, env, account, container, resp):
"""
Cache info in both memcache and env.
Caching is used to avoid unnecessary calls to account & container servers.
This is a private function that is being called by GETorHEAD_base and
by clear_info_cache.
Any attempt to GET or HEAD from the container/account server should use
the GETorHEAD_base interface which would than set the cache.
:param app: the application object
:param account: the unquoted account name
:param container: the unquoted containr name or None
:param resp: the response received or None if info cache should be cleared
"""
if container:
cache_time = app.recheck_container_existence
else:
cache_time = app.recheck_account_existence
cache_key, env_key = _get_cache_key(account, container)
if resp:
if resp.status_int == HTTP_NOT_FOUND:
cache_time *= 0.1
elif not is_success(resp.status_int):
cache_time = None
else:
cache_time = None
# Next actually set both memcache and the env chache
memcache = getattr(app, 'memcache', None) or env.get('swift.cache')
if not cache_time:
env.pop(env_key, None)
if memcache:
memcache.delete(cache_key)
return
if container:
info = headers_to_container_info(resp.headers, resp.status_int)
else:
info = headers_to_account_info(resp.headers, resp.status_int)
if memcache:
memcache.set(cache_key, info, cache_time)
env[env_key] = info
def clear_info_cache(app, env, account, container=None):
"""
Clear the cached info in both memcache and env
:param app: the application object
:param account: the account name
:param container: the containr name or None if setting info for containers
"""
_set_info_cache(app, env, account, container, None)
def _get_info_cache(app, env, account, container=None):
"""
Get the cached info from env or memcache (if used) in that order
Used for both account and container info
A private function used by get_info
:param app: the application object
:param env: the environment used by the current request
:returns the cached info or None if not cached
"""
cache_key, env_key = _get_cache_key(account, container)
if env_key in env:
return env[env_key]
memcache = getattr(app, 'memcache', None) or env.get('swift.cache')
if memcache:
info = memcache.get(cache_key)
if info:
env[env_key] = info
return info
return None
def _prepare_pre_auth_info_request(env, path):
"""
Prepares a pre authed request to obtain info using a HEAD.
:param env: the environment used by the current request
:param path: The unquoted request path
:returns: the pre authed request
"""
# Set the env for the pre_authed call without a query string
newenv = make_pre_authed_env(env, 'HEAD', path, agent='Swift',
query_string='', swift_source='GET_INFO')
# Note that Request.blank expects quoted path
return Request.blank(quote(path), environ=newenv)
def get_info(app, env, account, container=None, ret_not_found=False):
"""
Get the info about accounts or containers
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted name of the account
:param container: The unquoted name of the container (or None if account)
:returns: the cached info or None if cannot be retrieved
"""
info = _get_info_cache(app, env, account, container)
if info:
if ret_not_found or is_success(info['status']):
return info
return None
# Not in cached, let's try the account servers
path = '/v1/%s' % account
if container:
# Stop and check if we have an account?
if not get_info(app, env, account):
return None
path += '/' + container
req = _prepare_pre_auth_info_request(env, path)
# Whenever we do a GET/HEAD, the GETorHEAD_base will set the info in
# the environment under environ[env_key] and in memcache. We will
# pick the one from environ[env_key] and use it to set the caller env
resp = req.get_response(app)
cache_key, env_key = _get_cache_key(account, container)
try:
info = resp.environ[env_key]
env[env_key] = info
if ret_not_found or is_success(info['status']):
return info
except (KeyError, AttributeError):
pass
return None
class Controller(object):
"""Base WSGI controller class for the proxy"""
server_type = 'Base'
# Ensure these are all lowercase
pass_through_headers = []
def __init__(self, app):
"""
Creates a controller attached to an application instance
:param app: the application instance
"""
self.account_name = None
self.app = app
self.trans_id = '-'
self.allowed_methods = set()
all_methods = inspect.getmembers(self, predicate=inspect.ismethod)
for name, m in all_methods:
if getattr(m, 'publicly_accessible', False):
self.allowed_methods.add(name)
def _x_remove_headers(self):
"""
Returns a list of headers that must not be sent to the backend
:returns: a list of header
"""
return []
def transfer_headers(self, src_headers, dst_headers):
"""
Transfer legal headers from an original client request to dictionary
that will be used as headers by the backend request
:param src_headers: A dictionary of the original client request headers
:param dst_headers: A dictionary of the backend request headers
"""
st = self.server_type.lower()
x_remove = 'x-remove-%s-meta-' % st
dst_headers.update((k.lower().replace('-remove', '', 1), '')
for k in src_headers
if k.lower().startswith(x_remove) or
k.lower() in self._x_remove_headers())
x_meta = 'x-%s-meta-' % st
dst_headers.update((k.lower(), v)
for k, v in src_headers.iteritems()
if k.lower() in self.pass_through_headers or
k.lower().startswith(x_meta))
def generate_request_headers(self, orig_req=None, additional=None,
transfer=False):
"""
Create a list of headers to be used in backend requets
:param orig_req: the original request sent by the client to the proxy
:param additional: additional headers to send to the backend
:param transfer: If True, transfer headers from original client request
:returns: a dictionary of headers
"""
# Use the additional headers first so they don't overwrite the headers
# we require.
headers = HeaderKeyDict(additional) if additional else HeaderKeyDict()
if transfer:
self.transfer_headers(orig_req.headers, headers)
if 'x-timestamp' not in headers:
headers['x-timestamp'] = normalize_timestamp(time.time())
if orig_req:
referer = orig_req.as_referer()
else:
referer = ''
headers.update({'x-trans-id': self.trans_id,
'connection': 'close',
'user-agent': 'proxy-server %s' % os.getpid(),
'referer': referer})
return headers
def error_occurred(self, node, msg):
"""
Handle logging, and handling of errors.
:param node: dictionary of node to handle errors for
:param msg: error message
"""
node['errors'] = node.get('errors', 0) + 1
node['last_error'] = time.time()
self.app.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'),
{'msg': msg, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
def exception_occurred(self, node, typ, additional_info):
"""
Handle logging of generic exceptions.
:param node: dictionary of node to log the error for
:param typ: server type
:param additional_info: additional information to log
"""
self.app.logger.exception(
_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: '
'%(info)s'),
{'type': typ, 'ip': node['ip'], 'port': node['port'],
'device': node['device'], 'info': additional_info})
def error_limited(self, node):
"""
Check if the node is currently error limited.
:param node: dictionary of node to check
:returns: True if error limited, False otherwise
"""
now = time.time()
if 'errors' not in node:
return False
if 'last_error' in node and node['last_error'] < \
now - self.app.error_suppression_interval:
del node['last_error']
if 'errors' in node:
del node['errors']
return False
limited = node['errors'] > self.app.error_suppression_limit
if limited:
self.app.logger.debug(
_('Node error limited %(ip)s:%(port)s (%(device)s)'), node)
return limited
def error_limit(self, node, msg):
"""
Mark a node as error limited. This immediately pretends the
node received enough errors to trigger error suppression. Use
this for errors like Insufficient Storage. For other errors
use :func:`error_occurred`.
:param node: dictionary of node to error limit
:param msg: error message
"""
node['errors'] = self.app.error_suppression_limit + 1
node['last_error'] = time.time()
self.app.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'),
{'msg': msg, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
def account_info(self, account, req=None):
"""
Get account information, and also verify that the account exists.
:param account: name of the account to get the info for
:param req: caller's HTTP request context object (optional)
:returns: tuple of (account partition, account nodes, container_count)
or (None, None, None) if it does not exist
"""
partition, nodes = self.app.account_ring.get_nodes(account)
if req:
env = getattr(req, 'environ', {})
else:
env = {}
info = get_info(self.app, env, account)
if not info:
return None, None, None
if info.get('container_count') is None:
container_count = 0
else:
container_count = int(info['container_count'])
return partition, nodes, container_count
def container_info(self, account, container, req=None):
"""
Get container information and thusly verify container existence.
This will also verify account existence.
:param account: account name for the container
:param container: container name to look up
:param req: caller's HTTP request context object (optional)
:returns: dict containing at least container partition ('partition'),
container nodes ('containers'), container read
acl ('read_acl'), container write acl ('write_acl'),
and container sync key ('sync_key').
Values are set to None if the container does not exist.
"""
part, nodes = self.app.container_ring.get_nodes(account, container)
if req:
env = getattr(req, 'environ', {})
else:
env = {}
info = get_info(self.app, env, account, container)
if not info:
info = headers_to_container_info({}, 0)
info['partition'] = None
info['nodes'] = None
else:
info['partition'] = part
info['nodes'] = nodes
return info
def iter_nodes(self, ring, partition, node_iter=None):
"""
Yields nodes for a ring partition, skipping over error
limited nodes and stopping at the configurable number of
nodes. If a node yielded subsequently gets error limited, an
extra node will be yielded to take its place.
Note that if you're going to iterate over this concurrently from
multiple greenthreads, you'll want to use a
swift.common.utils.GreenthreadSafeIterator to serialize access.
Otherwise, you may get ValueErrors from concurrent access. (You also
may not, depending on how logging is configured, the vagaries of
socket IO and eventlet, and the phase of the moon.)
:param ring: ring to get yield nodes from
:param partition: ring partition to yield nodes for
:param node_iter: optional iterable of nodes to try. Useful if you
want to filter or reorder the nodes.
"""
part_nodes = ring.get_part_nodes(partition)
if node_iter is None:
node_iter = itertools.chain(part_nodes,
ring.get_more_nodes(partition))
num_primary_nodes = len(part_nodes)
# Use of list() here forcibly yanks the first N nodes (the primary
# nodes) from node_iter, so the rest of its values are handoffs.
primary_nodes = self.app.sort_nodes(
list(itertools.islice(node_iter, num_primary_nodes)))
handoff_nodes = node_iter
nodes_left = self.app.request_node_count(ring)
for node in primary_nodes:
if not self.error_limited(node):
yield node
if not self.error_limited(node):
nodes_left -= 1
if nodes_left <= 0:
return
handoffs = 0
for node in handoff_nodes:
if not self.error_limited(node):
handoffs += 1
if self.app.log_handoffs:
self.app.logger.increment('handoff_count')
self.app.logger.warning(
'Handoff requested (%d)' % handoffs)
if handoffs == len(primary_nodes):
self.app.logger.increment('handoff_all_count')
yield node
if not self.error_limited(node):
nodes_left -= 1
if nodes_left <= 0:
return
def _make_request(self, nodes, part, method, path, headers, query,
logger_thread_locals):
"""
Sends an HTTP request to a single node and aggregates the result.
It attempts the primary node, then iterates over the handoff nodes
as needed.
:param nodes: an iterator of the backend server and handoff servers
:param part: the partition number
:param method: the method to send to the backend
:param path: the path to send to the backend
:param headers: a list of dicts, where each dict represents one
backend request that should be made.
:param query: query string to send to the backend.
:param logger_thread_locals: The thread local values to be set on the
self.app.logger to retain transaction
logging information.
:returns: a swob.Response object
"""
self.app.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
start_node_timing = time.time()
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], part, method, path,
headers=headers, query_string=query)
conn.node = node
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(self.app.node_timeout):
resp = conn.getresponse()
if not is_informational(resp.status) and \
not is_server_error(resp.status):
return resp.status, resp.reason, resp.getheaders(), \
resp.read()
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node, _('ERROR Insufficient Storage'))
except (Exception, Timeout):
self.exception_occurred(node, self.server_type,
_('Trying to %(method)s %(path)s') %
{'method': method, 'path': path})
def make_requests(self, req, ring, part, method, path, headers,
query_string=''):
"""
Sends an HTTP request to multiple nodes and aggregates the results.
It attempts the primary nodes concurrently, then iterates over the
handoff nodes as needed.
:param req: a request sent by the client
:param ring: the ring used for finding backend servers
:param part: the partition number
:param method: the method to send to the backend
:param path: the path to send to the backend
:param headers: a list of dicts, where each dict represents one
backend request that should be made.
:param query_string: optional query string to send to the backend
:returns: a swob.Response object
"""
start_nodes = ring.get_part_nodes(part)
nodes = GreenthreadSafeIterator(self.iter_nodes(ring, part))
pile = GreenPile(len(start_nodes))
for head in headers:
pile.spawn(self._make_request, nodes, part, method, path,
head, query_string, self.app.logger.thread_locals)
response = [resp for resp in pile if resp]
while len(response) < len(start_nodes):
response.append((HTTP_SERVICE_UNAVAILABLE, '', '', ''))
statuses, reasons, resp_headers, bodies = zip(*response)
return self.best_response(req, statuses, reasons, bodies,
'%s %s' % (self.server_type, req.method),
headers=resp_headers)
def best_response(self, req, statuses, reasons, bodies, server_type,
etag=None, headers=None):
"""
Given a list of responses from several servers, choose the best to
return to the API.
:param req: swob.Request object
:param statuses: list of statuses returned
:param reasons: list of reasons for each status
:param bodies: bodies of each response
:param server_type: type of server the responses came from
:param etag: etag
:param headers: headers of each response
:returns: swob.Response object with the correct status, body, etc. set
"""
resp = Response(request=req)
if len(statuses):
for hundred in (HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST):
hstatuses = \
[s for s in statuses if hundred <= s < hundred + 100]
if len(hstatuses) >= quorum_size(len(statuses)):
status = max(hstatuses)
status_index = statuses.index(status)
resp.status = '%s %s' % (status, reasons[status_index])
resp.body = bodies[status_index]
if headers:
update_headers(resp, headers[status_index])
if etag:
resp.headers['etag'] = etag.strip('"')
return resp
self.app.logger.error(_('%(type)s returning 503 for %(statuses)s'),
{'type': server_type, 'statuses': statuses})
resp.status = '503 Internal Server Error'
return resp
@public
def GET(self, req):
"""
Handler for HTTP GET requests.
:param req: The client request
:returns: the response to the client
"""
return self.GETorHEAD(req)
@public
def HEAD(self, req):
"""
Handler for HTTP HEAD requests.
:param req: The client request
:returns: the response to the client
"""
return self.GETorHEAD(req)
def _make_app_iter_reader(self, node, source, queue, logger_thread_locals):
"""
Reads from the source and places data in the queue. It expects
something else be reading from the queue and, if nothing does within
self.app.client_timeout seconds, the process will be aborted.
:param node: The node dict that the source is connected to, for
logging/error-limiting purposes.
:param source: The httplib.Response object to read from.
:param queue: The eventlet.queue.Queue to place read source data into.
:param logger_thread_locals: The thread local values to be set on the
self.app.logger to retain transaction
logging information.
"""
self.app.logger.thread_locals = logger_thread_locals
success = True
try:
try:
while True:
with ChunkReadTimeout(self.app.node_timeout):
chunk = source.read(self.app.object_chunk_size)
if not chunk:
break
queue.put(chunk, timeout=self.app.client_timeout)
except Full:
self.app.logger.warn(
_('Client did not read from queue within %ss') %
self.app.client_timeout)
self.app.logger.increment('client_timeouts')
success = False
except (Exception, Timeout):
self.exception_occurred(node, _('Object'),
_('Trying to read during GET'))
success = False
finally:
# Ensure the queue getter gets a terminator.
queue.resize(2)
queue.put(success)
# Close-out the connection as best as possible.
if getattr(source, 'swift_conn', None):
self.close_swift_conn(source)
def _make_app_iter(self, node, source):
"""
Returns an iterator over the contents of the source (via its read
func). There is also quite a bit of cleanup to ensure garbage
collection works and the underlying socket of the source is closed.
:param source: The httplib.Response object this iterator should read
from.
:param node: The node the source is reading from, for logging purposes.
"""
try:
# Spawn reader to read from the source and place in the queue.
# We then drop any reference to the source or node, for garbage
# collection purposes.
queue = Queue(1)
spawn_n(self._make_app_iter_reader, node, source, queue,
self.app.logger.thread_locals)
source = node = None
while True:
chunk = queue.get(timeout=self.app.node_timeout)
if isinstance(chunk, bool): # terminator
success = chunk
if not success:
raise Exception(_('Failed to read all data'
' from the source'))
break
yield chunk
except Empty:
raise ChunkReadTimeout()
except (GeneratorExit, Timeout):
self.app.logger.warn(_('Client disconnected on read'))
except Exception:
self.app.logger.exception(_('Trying to send to client'))
raise
def close_swift_conn(self, src):
"""
Force close the http connection to the backend.
:param src: the response from the backend
"""
try:
src.swift_conn.close()
except Exception:
pass
src.swift_conn = None
try:
while src.read(self.app.object_chunk_size):
pass
except Exception:
pass
try:
src.close()
except Exception:
pass
def is_good_source(self, src):
"""
Indicates whether or not the request made to the backend found
what it was looking for.
:param src: the response from the backend
:returns: True if found, False if not
"""
return is_success(src.status) or is_redirection(src.status)
def autocreate_account(self, env, account):
"""
Autocreate an account
:param env: the environment of the request leading to this autocreate
:param account: the unquoted account name
"""
partition, nodes = self.app.account_ring.get_nodes(account)
path = '/%s' % account
headers = {'X-Timestamp': normalize_timestamp(time.time()),
'X-Trans-Id': self.trans_id,
'Connection': 'close'}
resp = self.make_requests(Request.blank('/v1' + path),
self.app.account_ring, partition, 'PUT',
path, [headers] * len(nodes))
if is_success(resp.status_int):
self.app.logger.info('autocreate account %r' % path)
clear_info_cache(self.app, env, account)
else:
self.app.logger.warning('Could not autocreate account %r' % path)
def GETorHEAD_base(self, req, server_type, ring, partition, path):
"""
Base handler for HTTP GET or HEAD requests.
:param req: swob.Request object
:param server_type: server type
:param ring: the ring to obtain nodes from
:param partition: partition
:param path: path for the request
:returns: swob.Response object
"""
statuses = []
reasons = []
bodies = []
source_headers = []
sources = []
newest = config_true_value(req.headers.get('x-newest', 'f'))
for node in self.iter_nodes(ring, partition):
start_node_timing = time.time()
try:
with ConnectionTimeout(self.app.conn_timeout):
headers = self.generate_request_headers(
req, additional=req.headers)
conn = http_connect(
node['ip'], node['port'], node['device'], partition,
req.method, path, headers=headers,
query_string=req.query_string)
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(self.app.node_timeout):
possible_source = conn.getresponse()
# See NOTE: swift_conn at top of file about this.
possible_source.swift_conn = conn
except (Exception, Timeout):
self.exception_occurred(
node, server_type, _('Trying to %(method)s %(path)s') %
{'method': req.method, 'path': req.path})
continue
if self.is_good_source(possible_source):
# 404 if we know we don't have a synced copy
if not float(possible_source.getheader('X-PUT-Timestamp', 1)):
statuses.append(HTTP_NOT_FOUND)
reasons.append('')
bodies.append('')
source_headers.append('')
self.close_swift_conn(possible_source)
else:
statuses.append(possible_source.status)
reasons.append(possible_source.reason)
bodies.append('')
source_headers.append('')
sources.append((possible_source, node))
if not newest: # one good source is enough
break
else:
statuses.append(possible_source.status)
reasons.append(possible_source.reason)
bodies.append(possible_source.read())
source_headers.append(possible_source.getheaders())
if possible_source.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node, _('ERROR Insufficient Storage'))
elif is_server_error(possible_source.status):
self.error_occurred(node, _('ERROR %(status)d %(body)s '
'From %(type)s Server') %
{'status': possible_source.status,
'body': bodies[-1][:1024],
'type': server_type})
res = None
if sources:
sources.sort(key=lambda s: source_key(s[0]))
source, node = sources.pop()
for src, _junk in sources:
self.close_swift_conn(src)
res = Response(request=req, conditional_response=True)
if req.method == 'GET' and \
source.status in (HTTP_OK, HTTP_PARTIAL_CONTENT):
res.app_iter = self._make_app_iter(node, source)
# See NOTE: swift_conn at top of file about this.
res.swift_conn = source.swift_conn
res.status = source.status
update_headers(res, source.getheaders())
if not res.environ:
res.environ = {}
res.environ['swift_x_timestamp'] = \
source.getheader('x-timestamp')
res.accept_ranges = 'bytes'
res.content_length = source.getheader('Content-Length')
if source.getheader('Content-Type'):
res.charset = None
res.content_type = source.getheader('Content-Type')
if not res:
res = self.best_response(req, statuses, reasons, bodies,
'%s %s' % (server_type, req.method),
headers=source_headers)
try:
(account, container) = split_path(req.path_info, 1, 2)
_set_info_cache(self.app, req.environ, account, container, res)
except ValueError:
pass
return res
def is_origin_allowed(self, cors_info, origin):
"""
Is the given Origin allowed to make requests to this resource
:param cors_info: the resource's CORS related metadata headers
:param origin: the origin making the request
:return: True or False
"""
allowed_origins = set()
if cors_info.get('allow_origin'):
allowed_origins.update(
[a.strip()
for a in cors_info['allow_origin'].split(' ')
if a.strip()])
if self.app.cors_allow_origin:
allowed_origins.update(self.app.cors_allow_origin)
return origin in allowed_origins or '*' in allowed_origins
@public
def OPTIONS(self, req):
"""
Base handler for OPTIONS requests
:param req: swob.Request object
:returns: swob.Response object
"""
# Prepare the default response
headers = {'Allow': ', '.join(self.allowed_methods)}
resp = Response(status=200, request=req, headers=headers)
# If this isn't a CORS pre-flight request then return now
req_origin_value = req.headers.get('Origin', None)
if not req_origin_value:
return resp
# This is a CORS preflight request so check it's allowed
try:
container_info = \
self.container_info(self.account_name,
self.container_name, req)
except AttributeError:
# This should only happen for requests to the Account. A future
# change could allow CORS requests to the Account level as well.
return resp
cors = container_info.get('cors', {})
# If the CORS origin isn't allowed return a 401
if not self.is_origin_allowed(cors, req_origin_value) or (
req.headers.get('Access-Control-Request-Method') not in
self.allowed_methods):
resp.status = HTTP_UNAUTHORIZED
return resp
# Allow all headers requested in the request. The CORS
# specification does leave the door open for this, as mentioned in
# http://www.w3.org/TR/cors/#resource-preflight-requests
# Note: Since the list of headers can be unbounded
# simply returning headers can be enough.
allow_headers = set()
if req.headers.get('Access-Control-Request-Headers'):
allow_headers.update(
list_from_csv(req.headers['Access-Control-Request-Headers']))
# Populate the response with the CORS preflight headers
headers['access-control-allow-origin'] = req_origin_value
if cors.get('max_age') is not None:
headers['access-control-max-age'] = cors.get('max_age')
headers['access-control-allow-methods'] = \
', '.join(self.allowed_methods)
if allow_headers:
headers['access-control-allow-headers'] = ', '.join(allow_headers)
resp.headers = headers
return resp
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- getdeploymentinfo
- getchaintxstats
- gettxoutsetinfo
- getblockheader
- getdifficulty
- getnetworkhashps
- waitforblockheight
- getblock
- getblockhash
- getbestblockhash
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import os
import subprocess
from test_framework.blocktools import (
MAX_FUTURE_BLOCK_TIME,
TIME_GENESIS_BLOCK,
create_block,
create_coinbase,
)
from test_framework.messages import (
CBlockHeader,
from_hex,
msg_block,
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
get_datadir_path,
)
from test_framework.wallet import MiniWallet
HEIGHT = 200 # blocks mined
TIME_RANGE_STEP = 600 # ten-minute steps
TIME_RANGE_MTP = TIME_GENESIS_BLOCK + (HEIGHT - 6) * TIME_RANGE_STEP
TIME_RANGE_TIP = TIME_GENESIS_BLOCK + (HEIGHT - 1) * TIME_RANGE_STEP
TIME_RANGE_END = TIME_GENESIS_BLOCK + HEIGHT * TIME_RANGE_STEP
class BlockchainTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
self.mine_chain()
self._test_max_future_block_time()
self.restart_node(
0,
extra_args=[
"-stopatheight=207",
"-checkblocks=-1", # Check all blocks
"-prune=1", # Set pruning after rescan is complete
],
)
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
self._test_getblock()
self._test_getdeploymentinfo()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
self.log.info(f"Generate {HEIGHT} blocks after the genesis block in ten-minute steps")
for t in range(TIME_GENESIS_BLOCK, TIME_RANGE_END, TIME_RANGE_STEP):
self.nodes[0].setmocktime(t)
self.generate(self.wallet, 1)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], HEIGHT)
def _test_max_future_block_time(self):
self.stop_node(0)
self.log.info("A block tip of more than MAX_FUTURE_BLOCK_TIME in the future raises an error")
self.nodes[0].assert_start_raises_init_error(
extra_args=[f"-mocktime={TIME_RANGE_TIP - MAX_FUTURE_BLOCK_TIME - 1}"],
expected_msg=": The block database contains a block which appears to be from the future."
" This may be due to your computer's date and time being set incorrectly."
f" Only rebuild the block database if you are sure that your computer's date and time are correct.{os.linesep}"
"Please restart with -reindex or -reindex-chainstate to recover.",
)
self.log.info("A block tip of MAX_FUTURE_BLOCK_TIME in the future is fine")
self.start_node(0, extra_args=[f"-mocktime={TIME_RANGE_TIP - MAX_FUTURE_BLOCK_TIME}"])
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'time',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
assert_equal(res['time'], TIME_RANGE_END - TIME_RANGE_STEP)
assert_equal(res['mediantime'], TIME_RANGE_MTP)
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(
extra_args=['-testactivationheight=name@2'],
expected_msg='Error: Invalid name (name@2) for -testactivationheight=name@height.',
)
self.nodes[0].assert_start_raises_init_error(
extra_args=['-testactivationheight=bip34@-2'],
expected_msg='Error: Invalid height value (bip34@-2) for -testactivationheight=name@height.',
)
self.nodes[0].assert_start_raises_init_error(
extra_args=['-testactivationheight='],
expected_msg='Error: Invalid format () for -testactivationheight=name@height.',
)
self.start_node(0, extra_args=[
'-stopatheight=207',
'-prune=550',
])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
def check_signalling_deploymentinfo_result(self, gdi_result, height, blockhash, status_next):
assert height >= 144 and height <= 287
assert_equal(gdi_result, {
"hash": blockhash,
"height": height,
"deployments": {
'bip34': {'type': 'buried', 'active': True, 'height': 2},
'bip66': {'type': 'buried', 'active': True, 'height': 3},
'bip65': {'type': 'buried', 'active': True, 'height': 4},
'csv': {'type': 'buried', 'active': True, 'height': 5},
'segwit': {'type': 'buried', 'active': True, 'height': 6},
'testdummy': {
'type': 'bip9',
'bip9': {
'bit': 28,
'start_time': 0,
'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value
'min_activation_height': 0,
'status': 'started',
'status-next': status_next,
'since': 144,
'statistics': {
'period': 144,
'threshold': 108,
'elapsed': height - 143,
'count': height - 143,
'possible': True,
},
'signalling': '#'*(height-143),
},
'active': False
},
'taproot': {
'type': 'bip9',
'bip9': {
'start_time': -1,
'timeout': 9223372036854775807,
'min_activation_height': 0,
'status': 'active',
'status-next': 'active',
'since': 0,
},
'height': 0,
'active': True
}
}
})
def _test_getdeploymentinfo(self):
# Note: continues past -stopatheight height, so must be invoked
# after _test_stopatheight
self.log.info("Test getdeploymentinfo")
self.stop_node(0)
self.start_node(0, extra_args=[
'-testactivationheight=bip34@2',
'-testactivationheight=dersig@3',
'-testactivationheight=cltv@4',
'-testactivationheight=csv@5',
'-testactivationheight=segwit@6',
])
gbci207 = self.nodes[0].getblockchaininfo()
self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(), gbci207["blocks"], gbci207["bestblockhash"], "started")
# block just prior to lock in
self.generate(self.wallet, 287 - gbci207["blocks"])
gbci287 = self.nodes[0].getblockchaininfo()
self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(), gbci287["blocks"], gbci287["bestblockhash"], "locked_in")
# calling with an explicit hash works
self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(gbci207["bestblockhash"]), gbci207["blocks"], gbci207["bestblockhash"], "started")
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')
blockhash = self.nodes[0].getblockhash(HEIGHT)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], HEIGHT + 1)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * TIME_RANGE_STEP, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(HEIGHT)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], HEIGHT + 1)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_final_block_height'], HEIGHT )
assert_equal(chaintxstats['window_block_count'], HEIGHT - 1)
assert_equal(chaintxstats['window_tx_count'], HEIGHT - 1)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(HEIGHT - 1))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_final_block_height'], 1)
assert_equal(chaintxstats['window_block_count'], 0)
assert 'window_tx_count' not in chaintxstats
assert 'window_interval' not in chaintxstats
assert 'txrate' not in chaintxstats
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], HEIGHT)
assert_equal(res['height'], HEIGHT)
assert_equal(res['txouts'], HEIGHT)
assert_equal(res['bogosize'], 16800),
assert_equal(res['bestblock'], node.getblockhash(HEIGHT))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test gettxoutsetinfo works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test gettxoutsetinfo returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
# The field 'disk_size' is non-deterministic and can thus not be
# compared between res and res3. Everything else should be the same.
del res['disk_size'], res3['disk_size']
assert_equal(res, res3)
self.log.info("Test gettxoutsetinfo hash_type option")
# Adding hash_type 'hash_serialized_2', which is the default, should
# not change the result.
res4 = node.gettxoutsetinfo(hash_type='hash_serialized_2')
del res4['disk_size']
assert_equal(res, res4)
# hash_type none should not return a UTXO set hash.
res5 = node.gettxoutsetinfo(hash_type='none')
assert 'hash_serialized_2' not in res5
# hash_type muhash should return a different UTXO set hash.
res6 = node.gettxoutsetinfo(hash_type='muhash')
assert 'muhash' in res6
assert(res['hash_serialized_2'] != res6['muhash'])
# muhash should not be returned unless requested.
for r in [res, res2, res3, res4, res5]:
assert 'muhash' not in r
# Unknown hash_type raises an error
assert_raises_rpc_error(-8, "'foo hash' is not a valid hash_type", node.gettxoutsetinfo, "foo hash")
def _test_getblockheader(self):
self.log.info("Test getblockheader")
node = self.nodes[0]
assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")
assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(HEIGHT - 1)
header = node.getblockheader(blockhash=besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], HEIGHT)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert_equal(header['mediantime'], TIME_RANGE_MTP)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
# Test with verbose=False, which should return the header as hex.
header_hex = node.getblockheader(blockhash=besthash, verbose=False)
assert_is_hex_string(header_hex)
header = from_hex(CBlockHeader(), header_hex)
header.calc_sha256()
assert_equal(header.hash, besthash)
assert 'previousblockhash' not in node.getblockheader(node.getblockhash(0))
assert 'nextblockhash' not in node.getblockheader(node.getbestblockhash())
def _test_getdifficulty(self):
self.log.info("Test getdifficulty")
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
self.log.info("Test getnetworkhashps")
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
self.log.info("Test stopping at height")
assert_equal(self.nodes[0].getblockcount(), HEIGHT)
self.generate(self.wallet, 6)
assert_equal(self.nodes[0].getblockcount(), HEIGHT + 6)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.generatetoaddress(self.nodes[0], 1, self.wallet.get_address(), sync_fun=self.no_op)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), HEIGHT + 7)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
peer = node.add_p2p_connection(P2PInterface())
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
peer.send_and_ping(msg_block(b))
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
node.invalidateblock(b22f.hash)
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height=height, timeout=timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1)
def _test_getblock(self):
node = self.nodes[0]
fee_per_byte = Decimal('0.00000010')
fee_per_kb = 1000 * fee_per_byte
self.wallet.send_self_transfer(fee_rate=fee_per_kb, from_node=node)
blockhash = self.generate(node, 1)[0]
def assert_fee_not_in_block(verbosity):
block = node.getblock(blockhash, verbosity)
assert 'fee' not in block['tx'][1]
def assert_fee_in_block(verbosity):
block = node.getblock(blockhash, verbosity)
tx = block['tx'][1]
assert 'fee' in tx
assert_equal(tx['fee'], tx['vsize'] * fee_per_byte)
def assert_vin_contains_prevout(verbosity):
block = node.getblock(blockhash, verbosity)
tx = block["tx"][1]
total_vin = Decimal("0.00000000")
total_vout = Decimal("0.00000000")
for vin in tx["vin"]:
assert "prevout" in vin
assert_equal(set(vin["prevout"].keys()), set(("value", "height", "generated", "scriptPubKey")))
assert_equal(vin["prevout"]["generated"], True)
total_vin += vin["prevout"]["value"]
for vout in tx["vout"]:
total_vout += vout["value"]
assert_equal(total_vin, total_vout + tx["fee"])
def assert_vin_does_not_contain_prevout(verbosity):
block = node.getblock(blockhash, verbosity)
tx = block["tx"][1]
if isinstance(tx, str):
# In verbosity level 1, only the transaction hashes are written
pass
else:
for vin in tx["vin"]:
assert "prevout" not in vin
self.log.info("Test that getblock with verbosity 1 doesn't include fee")
assert_fee_not_in_block(1)
self.log.info('Test that getblock with verbosity 2 and 3 includes expected fee')
assert_fee_in_block(2)
assert_fee_in_block(3)
self.log.info("Test that getblock with verbosity 1 and 2 does not include prevout")
assert_vin_does_not_contain_prevout(1)
assert_vin_does_not_contain_prevout(2)
self.log.info("Test that getblock with verbosity 3 includes prevout")
assert_vin_contains_prevout(3)
self.log.info("Test that getblock with verbosity 2 and 3 still works with pruned Undo data")
datadir = get_datadir_path(self.options.tmpdir, 0)
self.log.info("Test getblock with invalid verbosity type returns proper error message")
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", node.getblock, blockhash, "2")
def move_block_file(old, new):
old_path = os.path.join(datadir, self.chain, 'blocks', old)
new_path = os.path.join(datadir, self.chain, 'blocks', new)
os.rename(old_path, new_path)
# Move instead of deleting so we can restore chain state afterwards
move_block_file('rev00000.dat', 'rev_wrong')
assert_fee_not_in_block(2)
assert_fee_not_in_block(3)
assert_vin_does_not_contain_prevout(2)
assert_vin_does_not_contain_prevout(3)
# Restore chain state
move_block_file('rev_wrong', 'rev00000.dat')
assert 'previousblockhash' not in node.getblock(node.getblockhash(0))
assert 'nextblockhash' not in node.getblock(node.getbestblockhash())
if __name__ == '__main__':
BlockchainTest().main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module implements IO classes to read and write data on MongoDB.
Read from MongoDB
-----------------
:class:`ReadFromMongoDB` is a ``PTransform`` that reads from a configured
MongoDB source and returns a ``PCollection`` of dict representing MongoDB
documents.
To configure MongoDB source, the URI to connect to MongoDB server, database
name, collection name needs to be provided.
Example usage::
pipeline | ReadFromMongoDB(uri='mongodb://localhost:27017',
db='testdb',
coll='input')
Write to MongoDB:
-----------------
:class:`WriteToMongoDB` is a ``PTransform`` that writes MongoDB documents to
configured sink, and the write is conducted through a mongodb bulk_write of
``ReplaceOne`` operations. If the document's _id field already existed in the
MongoDB collection, it results in an overwrite, otherwise, a new document
will be inserted.
Example usage::
pipeline | WriteToMongoDB(uri='mongodb://localhost:27017',
db='testdb',
coll='output',
batch_size=10)
No backward compatibility guarantees. Everything in this module is experimental.
"""
from __future__ import absolute_import
from __future__ import division
import json
import logging
import struct
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.io.range_trackers import OrderedPositionRangeTracker
from apache_beam.transforms import DoFn
from apache_beam.transforms import PTransform
from apache_beam.transforms import Reshuffle
from apache_beam.utils.annotations import experimental
_LOGGER = logging.getLogger(__name__)
try:
# Mongodb has its own bundled bson, which is not compatible with bson pakcage.
# (https://github.com/py-bson/bson/issues/82). Try to import objectid and if
# it fails because bson package is installed, MongoDB IO will not work but at
# least rest of the SDK will work.
from bson import objectid
# pymongo also internally depends on bson.
from pymongo import ASCENDING
from pymongo import DESCENDING
from pymongo import MongoClient
from pymongo import ReplaceOne
except ImportError:
objectid = None
_LOGGER.warning("Could not find a compatible bson package.")
__all__ = ['ReadFromMongoDB', 'WriteToMongoDB']
@experimental()
class ReadFromMongoDB(PTransform):
"""A ``PTransfrom`` to read MongoDB documents into a ``PCollection``.
"""
def __init__(self,
uri='mongodb://localhost:27017',
db=None,
coll=None,
filter=None,
projection=None,
extra_client_params=None):
"""Initialize a :class:`ReadFromMongoDB`
Args:
uri (str): The MongoDB connection string following the URI format
db (str): The MongoDB database name
coll (str): The MongoDB collection name
filter: A `bson.SON
<https://api.mongodb.com/python/current/api/bson/son.html>`_ object
specifying elements which must be present for a document to be included
in the result set
projection: A list of field names that should be returned in the result
set or a dict specifying the fields to include or exclude
extra_client_params(dict): Optional `MongoClient
<https://api.mongodb.com/python/current/api/pymongo/mongo_client.html>`_
parameters
Returns:
:class:`~apache_beam.transforms.ptransform.PTransform`
"""
if extra_client_params is None:
extra_client_params = {}
if not isinstance(db, str):
raise ValueError('ReadFromMongDB db param must be specified as a string')
if not isinstance(coll, str):
raise ValueError('ReadFromMongDB coll param must be specified as a '
'string')
self._mongo_source = _BoundedMongoSource(
uri=uri,
db=db,
coll=coll,
filter=filter,
projection=projection,
extra_client_params=extra_client_params)
def expand(self, pcoll):
return pcoll | iobase.Read(self._mongo_source)
class _BoundedMongoSource(iobase.BoundedSource):
def __init__(self,
uri=None,
db=None,
coll=None,
filter=None,
projection=None,
extra_client_params=None):
if extra_client_params is None:
extra_client_params = {}
if filter is None:
filter = {}
self.uri = uri
self.db = db
self.coll = coll
self.filter = filter
self.projection = projection
self.spec = extra_client_params
def estimate_size(self):
with MongoClient(self.uri, **self.spec) as client:
return client[self.db].command('collstats', self.coll).get('size')
def split(self, desired_bundle_size, start_position=None, stop_position=None):
start_position, stop_position = self._replace_none_positions(
start_position, stop_position)
desired_bundle_size_in_mb = desired_bundle_size // 1024 // 1024
split_keys = self._get_split_keys(desired_bundle_size_in_mb, start_position,
stop_position)
bundle_start = start_position
for split_key_id in split_keys:
if bundle_start >= stop_position:
break
bundle_end = min(stop_position, split_key_id['_id'])
yield iobase.SourceBundle(weight=desired_bundle_size_in_mb,
source=self,
start_position=bundle_start,
stop_position=bundle_end)
bundle_start = bundle_end
# add range of last split_key to stop_position
if bundle_start < stop_position:
yield iobase.SourceBundle(weight=desired_bundle_size_in_mb,
source=self,
start_position=bundle_start,
stop_position=stop_position)
def get_range_tracker(self, start_position, stop_position):
start_position, stop_position = self._replace_none_positions(
start_position, stop_position)
return _ObjectIdRangeTracker(start_position, stop_position)
def read(self, range_tracker):
with MongoClient(self.uri, **self.spec) as client:
all_filters = self._merge_id_filter(range_tracker)
docs_cursor = client[self.db][self.coll].find(filter=all_filters).sort(
[('_id', ASCENDING)])
for doc in docs_cursor:
if not range_tracker.try_claim(doc['_id']):
return
yield doc
def display_data(self):
res = super(_BoundedMongoSource, self).display_data()
res['uri'] = self.uri
res['database'] = self.db
res['collection'] = self.coll
res['filter'] = json.dumps(self.filter)
res['projection'] = str(self.projection)
res['mongo_client_spec'] = json.dumps(self.spec)
return res
def _get_split_keys(self, desired_chunk_size_in_mb, start_pos, end_pos):
# calls mongodb splitVector command to get document ids at split position
# for desired bundle size, if desired chunk size smaller than 1mb, use
# mongodb default split size of 1mb.
if desired_chunk_size_in_mb < 1:
desired_chunk_size_in_mb = 1
if start_pos >= end_pos:
# single document not splittable
return []
with MongoClient(self.uri, **self.spec) as client:
name_space = '%s.%s' % (self.db, self.coll)
return (client[self.db].command(
'splitVector',
name_space,
keyPattern={'_id': 1}, # Ascending index
min={'_id': start_pos},
max={'_id': end_pos},
maxChunkSize=desired_chunk_size_in_mb)['splitKeys'])
def _merge_id_filter(self, range_tracker):
# Merge the default filter with refined _id field range of range_tracker.
# see more at https://docs.mongodb.com/manual/reference/operator/query/and/
all_filters = {
'$and': [
self.filter.copy(),
# add additional range filter to query. $gte specifies start
# position(inclusive) and $lt specifies the end position(exclusive),
# see more at
# https://docs.mongodb.com/manual/reference/operator/query/gte/ and
# https://docs.mongodb.com/manual/reference/operator/query/lt/
{
'_id': {
'$gte': range_tracker.start_position(),
'$lt': range_tracker.stop_position()
}
},
]
}
return all_filters
def _get_head_document_id(self, sort_order):
with MongoClient(self.uri, **self.spec) as client:
cursor = client[self.db][self.coll].find(filter={}, projection=[]).sort([
('_id', sort_order)
]).limit(1)
try:
return cursor[0]['_id']
except IndexError:
raise ValueError('Empty Mongodb collection')
def _replace_none_positions(self, start_position, stop_position):
if start_position is None:
start_position = self._get_head_document_id(ASCENDING)
if stop_position is None:
last_doc_id = self._get_head_document_id(DESCENDING)
# increment last doc id binary value by 1 to make sure the last document
# is not excluded
stop_position = _ObjectIdHelper.increment_id(last_doc_id, 1)
return start_position, stop_position
class _ObjectIdHelper(object):
"""A Utility class to manipulate bson object ids."""
@classmethod
def id_to_int(cls, id):
"""
Args:
id: ObjectId required for each MongoDB document _id field.
Returns: Converted integer value of ObjectId's 12 bytes binary value.
"""
# converts object id binary to integer
# id object is bytes type with size of 12
ints = struct.unpack('>III', id.binary)
return (ints[0] << 64) + (ints[1] << 32) + ints[2]
@classmethod
def int_to_id(cls, number):
"""
Args:
number(int): The integer value to be used to convert to ObjectId.
Returns: The ObjectId that has the 12 bytes binary converted from the
integer value.
"""
# converts integer value to object id. Int value should be less than
# (2 ^ 96) so it can be convert to 12 bytes required by object id.
if number < 0 or number >= (1 << 96):
raise ValueError('number value must be within [0, %s)' % (1 << 96))
ints = [(number & 0xffffffff0000000000000000) >> 64,
(number & 0x00000000ffffffff00000000) >> 32,
number & 0x0000000000000000ffffffff]
bytes = struct.pack('>III', *ints)
return objectid.ObjectId(bytes)
@classmethod
def increment_id(cls, object_id, inc):
"""
Args:
object_id: The ObjectId to change.
inc(int): The incremental int value to be added to ObjectId.
Returns:
"""
# increment object_id binary value by inc value and return new object id.
id_number = _ObjectIdHelper.id_to_int(object_id)
new_number = id_number + inc
if new_number < 0 or new_number >= (1 << 96):
raise ValueError('invalid incremental, inc value must be within ['
'%s, %s)' % (0 - id_number, 1 << 96 - id_number))
return _ObjectIdHelper.int_to_id(new_number)
class _ObjectIdRangeTracker(OrderedPositionRangeTracker):
"""RangeTracker for tracking mongodb _id of bson ObjectId type."""
def position_to_fraction(self, pos, start, end):
pos_number = _ObjectIdHelper.id_to_int(pos)
start_number = _ObjectIdHelper.id_to_int(start)
end_number = _ObjectIdHelper.id_to_int(end)
return (pos_number - start_number) / (end_number - start_number)
def fraction_to_position(self, fraction, start, end):
start_number = _ObjectIdHelper.id_to_int(start)
end_number = _ObjectIdHelper.id_to_int(end)
total = end_number - start_number
pos = int(total * fraction + start_number)
# make sure split position is larger than start position and smaller than
# end position.
if pos <= start_number:
return _ObjectIdHelper.increment_id(start, 1)
if pos >= end_number:
return _ObjectIdHelper.increment_id(end, -1)
return _ObjectIdHelper.int_to_id(pos)
@experimental()
class WriteToMongoDB(PTransform):
"""WriteToMongoDB is a ``PTransform`` that writes a ``PCollection`` of
mongodb document to the configured MongoDB server.
In order to make the document writes idempotent so that the bundles are
retry-able without creating duplicates, the PTransform added 2 transformations
before final write stage:
a ``GenerateId`` transform and a ``Reshuffle`` transform.::
-----------------------------------------------
Pipeline --> |GenerateId --> Reshuffle --> WriteToMongoSink|
-----------------------------------------------
(WriteToMongoDB)
The ``GenerateId`` transform adds a random and unique*_id* field to the
documents if they don't already have one, it uses the same format as MongoDB
default. The ``Reshuffle`` transform makes sure that no fusion happens between
``GenerateId`` and the final write stage transform,so that the set of
documents and their unique IDs are not regenerated if final write step is
retried due to a failure. This prevents duplicate writes of the same document
with different unique IDs.
"""
def __init__(self,
uri='mongodb://localhost:27017',
db=None,
coll=None,
batch_size=100,
extra_client_params=None):
"""
Args:
uri (str): The MongoDB connection string following the URI format
db (str): The MongoDB database name
coll (str): The MongoDB collection name
batch_size(int): Number of documents per bulk_write to MongoDB,
default to 100
extra_client_params(dict): Optional `MongoClient
<https://api.mongodb.com/python/current/api/pymongo/mongo_client.html>`_
parameters as keyword arguments
Returns:
:class:`~apache_beam.transforms.ptransform.PTransform`
"""
if extra_client_params is None:
extra_client_params = {}
if not isinstance(db, str):
raise ValueError('WriteToMongoDB db param must be specified as a string')
if not isinstance(coll, str):
raise ValueError('WriteToMongoDB coll param must be specified as a '
'string')
self._uri = uri
self._db = db
self._coll = coll
self._batch_size = batch_size
self._spec = extra_client_params
def expand(self, pcoll):
return pcoll \
| beam.ParDo(_GenerateObjectIdFn()) \
| Reshuffle() \
| beam.ParDo(_WriteMongoFn(self._uri, self._db, self._coll,
self._batch_size, self._spec))
class _GenerateObjectIdFn(DoFn):
def process(self, element, *args, **kwargs):
# if _id field already exist we keep it as it is, otherwise the ptransform
# generates a new _id field to achieve idempotent write to mongodb.
if '_id' not in element:
# object.ObjectId() generates a unique identifier that follows mongodb
# default format, if _id is not present in document, mongodb server
# generates it with this same function upon write. However the
# uniqueness of generated id may not be guaranteed if the work load are
# distributed across too many processes. See more on the ObjectId format
# https://docs.mongodb.com/manual/reference/bson-types/#objectid.
element['_id'] = objectid.ObjectId()
yield element
class _WriteMongoFn(DoFn):
def __init__(self,
uri=None,
db=None,
coll=None,
batch_size=100,
extra_params=None):
if extra_params is None:
extra_params = {}
self.uri = uri
self.db = db
self.coll = coll
self.spec = extra_params
self.batch_size = batch_size
self.batch = []
def finish_bundle(self):
self._flush()
def process(self, element, *args, **kwargs):
self.batch.append(element)
if len(self.batch) >= self.batch_size:
self._flush()
def _flush(self):
if len(self.batch) == 0:
return
with _MongoSink(self.uri, self.db, self.coll, self.spec) as sink:
sink.write(self.batch)
self.batch = []
def display_data(self):
res = super(_WriteMongoFn, self).display_data()
res['uri'] = self.uri
res['database'] = self.db
res['collection'] = self.coll
res['mongo_client_params'] = json.dumps(self.spec)
res['batch_size'] = self.batch_size
return res
class _MongoSink(object):
def __init__(self, uri=None, db=None, coll=None, extra_params=None):
if extra_params is None:
extra_params = {}
self.uri = uri
self.db = db
self.coll = coll
self.spec = extra_params
self.client = None
def write(self, documents):
if self.client is None:
self.client = MongoClient(host=self.uri, **self.spec)
requests = []
for doc in documents:
# match document based on _id field, if not found in current collection,
# insert new one, otherwise overwrite it.
requests.append(
ReplaceOne(filter={'_id': doc.get('_id', None)},
replacement=doc,
upsert=True))
resp = self.client[self.db][self.coll].bulk_write(requests)
_LOGGER.debug('BulkWrite to MongoDB result in nModified:%d, nUpserted:%d, '
'nMatched:%d, Errors:%s' %
(resp.modified_count, resp.upserted_count, resp.matched_count,
resp.bulk_api_result.get('writeErrors')))
def __enter__(self):
if self.client is None:
self.client = MongoClient(host=self.uri, **self.spec)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.client is not None:
self.client.close()
|
|
import re
import functools
try:
from urllib.parse import quote
except ImportError:
# Python 2
from urllib import quote
from . import url
__all__ = ['Template', 'expand']
patterns = re.compile(r"{([^\}]+)}")
class Template(object):
def __init__(self, url_str):
self._base = url_str
def __str__(self):
return 'Template: %s' % self._base
def expand(self, variables=None):
return url.URL(expand(self._base, variables))
def expand(template, variables=None):
"""
Expand a URL template string using the passed variables
"""
if variables is None:
variables = {}
return patterns.sub(functools.partial(_replace, variables), template)
# Utils
def _flatten(container):
"""
_flatten a sequence of sequences into a single list
"""
_flattened = []
for sequence in container:
_flattened.extend(sequence)
return _flattened
# Format functions
# ----------------
# These are responsible for formatting the (key, value) pair into a string
def _format_pair_no_equals(explode, separator, escape, key, value):
"""
Format a key, value pair but don't include the equals sign
when there is no value
"""
if not value:
return key
return _format_pair(explode, separator, escape, key, value)
def _format_pair_with_equals(explode, separator, escape, key, value):
"""
Format a key, value pair including the equals sign
when there is no value
"""
if not value:
return key + '='
return _format_pair(explode, separator, escape, key, value)
def _format_pair(explode, separator, escape, key, value):
if isinstance(value, (list, tuple)):
join_char = ","
if explode:
join_char = separator
try:
dict(value)
except:
# Scalar container
if explode:
items = ["%s=%s" % (key, escape(v)) for v in value]
return join_char.join(items)
else:
escaped_value = join_char.join(map(escape, value))
else:
# Tuple container
if explode:
items = ["%s=%s" % (k, escape(v)) for (k, v) in value]
return join_char.join(items)
else:
items = _flatten(value)
escaped_value = join_char.join(map(escape, items))
else:
escaped_value = escape(value)
return '%s=%s' % (key, escaped_value)
def _format_default(explode, separator, escape, key, value):
if isinstance(value, (list, tuple)):
join_char = ","
if explode:
join_char = separator
try:
dict(value)
except:
# Scalar container
escaped_value = join_char.join(map(escape, value))
else:
# Tuple container
if explode:
items = ["%s=%s" % (k, escape(v)) for (k, v) in value]
escaped_value = join_char.join(items)
else:
items = _flatten(value)
escaped_value = join_char.join(map(escape, items))
else:
escaped_value = escape(value)
return escaped_value
# Modifer functions
# -----------------
# These are responsible for modifying the variable before formatting
_identity = lambda x: x
def _truncate(string, num_chars):
return string[:num_chars]
# Splitting functions
# -------------------
# These are responsible for splitting a string into a sequence of (key,
# modifier) tuples
def _split_basic(string):
"""
Split a string into a list of tuples of the form (key, modifier_fn,
explode) where modifier_fn is a function that applies the appropriate
modification to the variable.
"""
tuples = []
for word in string.split(','):
# Attempt to split on colon
parts = word.split(':', 2)
key, modifier_fn, explode = parts[0], _identity, False
if len(parts) > 1:
modifier_fn = functools.partial(
_truncate, num_chars=int(parts[1]))
if word[len(word) - 1] == '*':
key = word[:len(word) - 1]
explode = True
tuples.append((key, modifier_fn, explode))
return tuples
def _split_operator(string):
return _split_basic(string[1:])
# Escaping functions
# ------------------
def _escape_all(value):
return url.unicode_quote(value, safe="")
def _escape_reserved(value):
return url.unicode_quote(value, safe="/!,.;")
# Operator map
# ------------
# A mapping of:
# operator -> (prefix, separator, split_fn, escape_fn, format_fn)
operator_map = {
'+': ('', ',', _split_operator, _escape_reserved, _format_default),
'#': ('#', ',', _split_operator, _escape_reserved, _format_default),
'.': ('.', '.', _split_operator, _escape_all, _format_default),
'/': ('/', '/', _split_operator, _escape_all, _format_default),
';': (';', ';', _split_operator, _escape_all, _format_pair_no_equals),
'?': ('?', '&', _split_operator, _escape_all, _format_pair_with_equals),
'&': ('&', '&', _split_operator, _escape_all, _format_pair_with_equals),
}
defaults = ('', ',', _split_basic, _escape_all, _format_default)
def _replace(variables, match):
"""
Return the appropriate replacement for `match` using the passed variables
"""
expression = match.group(1)
# Look-up chars and functions for the specified operator
(prefix_char, separator_char, split_fn, escape_fn,
format_fn) = operator_map.get(expression[0], defaults)
replacements = []
for key, modify_fn, explode in split_fn(expression):
if key in variables:
variable = modify_fn(variables[key])
replacement = format_fn(
explode, separator_char, escape_fn, key, variable)
replacements.append(replacement)
if not replacements:
return ''
return prefix_char + separator_char.join(replacements)
|
|
from twisted.internet import reactor, defer
from telephus.client import CassandraClient
from telephus.protocol import ManagedCassandraClientFactory
from telephus.cassandra.ttypes import *
import simplejson, collections, os, uuid
json_encode = simplejson.dumps
json_decode = simplejson.loads
try:
import pygtk
pygtk.require("2.0")
except:
pass
import gtk
import gtk.glade
class ChitonViewer(object):
def __init__(self, host=None, port=None):
self.gladefile = os.path.join(os.path.dirname(__file__), "chiton.glade")
self.cmanager = None
self._client = None
self._ksmap = {}
self._currentks = None
self._currentcf = None
self._pageamt = 25
self._maxcomphist = 100
self._lastcol = None
self.columns = None
self.keyspaces = None
self.windowname = "mainWindow"
self.wTree = gtk.glade.XML(self.gladefile, self.windowname)
self.window = self.wTree.get_widget(self.windowname)
self.columnsView = self.wTree.get_widget("columnsView")
self.keyspaceView = self.wTree.get_widget("keyspaceView")
self._loadprefs()
self.columnEntry = self.wTree.get_widget("columnEntry")
self.columnCompletionStore = self._setCompletion(self.columnEntry,
self._prefs['completion']['columnEntry'])
self.rowEntry = self.wTree.get_widget("rowEntry")
self.rowCompletionStore = self._setCompletion(self.rowEntry,
self._prefs['completion']['rowEntry'])
self.columnLabel = self.wTree.get_widget("columnLabel")
self.entryTable = self.wTree.get_widget("entryTable")
self.goButton = self.wTree.get_widget("goButton")
self.pageToolbar = self.wTree.get_widget("pageToolbar")
self.pagePrev = self.wTree.get_widget("prevbutton")
self.pageNext = self.wTree.get_widget("nextbutton")
self.statusbar = self.wTree.get_widget("statusbar")
self.sid = self.statusbar.get_context_id("status")
self.statusbar.push(self.sid, '')
self.goButton.connect("clicked", self.updateView)
self.pageNext.connect("clicked", self.nextPage)
self.pagePrev.connect("clicked", self.prevPage)
self.keyspaceView.get_selection().connect('changed', self.keyspaceChanged)
self.wTree.get_widget("quitmenuitem").connect("activate", self._quit)
self.wTree.get_widget("connectmenuitem").connect("activate", self._connectDialog)
self._resetpages()
self.wTree.signal_autoconnect({
"on_mainWindow_destroy": self._quit,
})
self.window.show()
if host and port:
self._connect(host, port)
def _quit(self, res=None):
if self.cmanager:
self.cmanager.shutdown()
try:
open(self.prefpath, 'w').write(json_encode(self._prefs))
except Exception, e:
print e
reactor.stop()
return False
def _prefpath(self):
return os.path.join(os.path.expanduser('~'), '.chiton.json')
prefpath = property(_prefpath)
def _loadprefs(self):
self._prefs = {}
try:
self._prefs = json_decode(open(self.prefpath).read())
except Exception, e:
print e
def ldict():
return collections.defaultdict(list)
if not self._prefs:
self._prefs = collections.defaultdict(ldict)
def _resetpages(self):
self._currpage = 1
self._firstcol = ''
self._lastcol = ''
self._lastrow = None
def _setCompletion(self, entry, data):
completer = gtk.EntryCompletion()
store = gtk.ListStore(str)
completer.set_model(store)
completer.set_text_column(0)
entry.set_completion(completer)
for item in data:
store.append([item])
return store
def _updateCompletion(self):
row = self.rowEntry.get_text()
column = self.columnEntry.get_text()
if row not in self._prefs['completion']['rowEntry']:
self.rowCompletionStore.append([row])
self._prefs['completion']['rowEntry'].append(row)
if column not in self._prefs['completion']['columnEntry']:
self.columnCompletionStore.append([column])
self._prefs['completion']['columnEntry'].append(column)
for k in ('rowEntry', 'columnEntry'):
if len(self._prefs['completion'][k]) > self._maxcomphist:
self._prefs['completion'][k].pop(0)
def _addcol(self, view, name, colId, width=None):
col = gtk.TreeViewColumn(name, gtk.CellRendererText(), text=colId)
col.set_resizable(True)
if width:
col.set_fixed_width(width)
col.set_sort_column_id(colId)
view.append_column(col)
def _status(self, status):
self.statusbar.pop(self.sid)
self.statusbar.push(self.sid, status)
def _showError(self, err):
errTree = gtk.glade.XML(self.gladefile, "errorDialog")
errorDialog = errTree.get_widget("errorDialog")
errorDialog.set_markup(str(err))
errorDialog.run()
errorDialog.destroy()
@defer.inlineCallbacks
def _connect(self, host, port):
try:
if self.cmanager:
self.cmanager.shutdown()
self.cmanager = ManagedCassandraClientFactory()
print "connecting..."
for x in xrange(3):
reactor.connectTCP(host, int(port), self.cmanager)
yield self.cmanager.deferred
yield self._setupKeyspaces()
self._setupColumns()
except Exception, e:
if self.cmanager:
self.cmanager.shutdown()
self.cmanager = None
self._status(str(e))
self._showError(e)
raise
defer.returnValue(None)
@defer.inlineCallbacks
def _connectDialog(self, source=None):
cdlg = ConnectDialog(self.gladefile)
result, host, port = cdlg.run()
if result == 0:
yield self._connect(host, port)
@defer.inlineCallbacks
def _setupKeyspaces(self):
if self.keyspaces:
self.keyspaces.clear()
for c in self.keyspaceView.get_columns():
self.keyspaceView.remove_column(c)
self._addcol(self.keyspaceView, 'Keyspaces', 0, width=20)
self.keyspaces = gtk.TreeStore(str)
self.keyspaceView.set_model(self.keyspaces)
c = CassandraClient(self.cmanager, '')
self._status("Fetching keyspaces...")
ks = yield c.get_string_list_property('keyspaces')
self._status("Found %s keyspaces" % len(ks))
for i,k in enumerate(ks):
self.keyspaces.append(None, [k])
kiter = self.keyspaces.get_iter(str(i))
self._status("Describing keyspace '%s'..." % k)
r = yield c.describe_keyspace(k)
self._status("Received description of keyspace '%s':"""
"%s column families" % (k, len(r)))
self._ksmap[k] = r
print r
for col, info in r.items():
self.keyspaces.append(kiter, [col])
def _setupColumns(self):
if self.columns:
self.columns.clear()
for c in self.columnsView.get_columns():
self.columnsView.remove_column(c)
self._addcol(self.columnsView, 'Column name', 0)
self._addcol(self.columnsView, 'Value', 1)
self._addcol(self.columnsView, 'Timestamp', 2)
self.columns = gtk.ListStore(str, str, str)
self.columnsView.set_model(self.columns)
def keyspaceChanged(self, selection):
self._resetpages()
tree, path = selection.get_selected_rows()
if path:
if len(path[0]) == 1:
self._currentks = tree[path[0]][0]
self._currentcf = None
self.entryTable.hide()
elif len(path[0]) == 2:
self._currentks = tree[(path[0][0],)][0]
self._currentcf = tree[path[0]][0]
self.entryTable.show()
self.columns.clear()
if self._currentcf:
self._client = CassandraClient(self.cmanager, self._currentks)
cf = self._ksmap[self._currentks][self._currentcf]
if cf['Type'] == 'Super':
self._status("Column family '%s': Type: %s, CompareWith: %s, """
"CompareSubWith: %s" % (self._currentcf, cf['Type'],
cf['CompareWith'], cf['CompareSubcolumnsWith']))
self.columnEntry.show()
self.columnLabel.show()
else:
self._status("Column family '%s': Type: %s, CompareWith: %s """
% (self._currentcf, cf['Type'], cf['CompareWith']))
self.columnEntry.hide()
self.columnLabel.hide()
def decodeColumn(self, column):
unames = ['org.apache.cassandra.db.marshal.TimeUUIDType',
'org.apache.cassandra.db.marshal.LexicalUUIDType']
cf = self._ksmap[self._currentks][self._currentcf]
if cf['Type'] == 'Super':
compare = 'CompareSubcolumnsWith'
else:
compare = 'CompareWith'
if cf[compare] in unames:
return uuid.UUID(bytes=column)
else:
return column
@defer.inlineCallbacks
def updateView(self, source=None, start='', reverse=False):
if source == self.goButton:
self._resetpages()
self._updateCompletion()
try:
if self._ksmap[self._currentks][self._currentcf]['Type'] == 'Super':
path = ColumnParent(column_family=self._currentcf,
super_column=self.columnEntry.get_text())
else:
path = ColumnParent(column_family=self._currentcf)
self._status("Fetching data...")
cols = yield self._client.get_slice(self.rowEntry.get_text(), path,
count=self._pageamt, start=start, reverse=reverse)
self._status("%s columns retrieved" % len(cols))
self.columns.clear()
if reverse:
cols.reverse()
for col in cols:
self.columns.append([self.decodeColumn(col.column.name),
col.column.value, col.column.timestamp])
if cols:
self._firstcol = cols[0].column.name
self._lastcol = cols[-1].column.name
if self._lastrow == self.rowEntry.get_text():
if reverse:
self._currpage -= 1
else:
self._currpage += 1
self._lastrow = self.rowEntry.get_text()
if self._currpage > 1:
self.pagePrev.set_property('sensitive', True)
else:
self.pagePrev.set_property('sensitive', False)
if len(cols) >= self._pageamt:
self.pageNext.set_property('sensitive', True)
else:
self.pageNext.set_property('sensitive', False)
except Exception, e:
self._showError(e)
raise
def nextPage(self, source):
self.updateView(start=self._lastcol)
def prevPage(self, source):
self.updateView(start=self._firstcol, reverse=True)
class ConnectDialog(object):
def __init__(self, gladefile):
self.wTree = gtk.glade.XML(gladefile, "connectDialog")
self.dialog = self.wTree.get_widget("connectDialog")
self.hostEntry = self.wTree.get_widget("hostEntry")
self.portEntry = self.wTree.get_widget("portEntry")
def run(self):
self.result = self.dialog.run()
host, port = self.hostEntry.get_text(), self.portEntry.get_text()
self.dialog.destroy()
return self.result, host, port
|
|
# -*- test-case-name: twisted.protocols.test.test_tls,twisted.internet.test.test_tls,twisted.test.test_sslverify -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of a TLS transport (L{ISSLTransport}) as an
L{IProtocol<twisted.internet.interfaces.IProtocol>} layered on top of any
L{ITransport<twisted.internet.interfaces.ITransport>} implementation, based on
U{OpenSSL<http://www.openssl.org>}'s memory BIO features.
L{TLSMemoryBIOFactory} is a L{WrappingFactory} which wraps protocols created by
the factory it wraps with L{TLSMemoryBIOProtocol}. L{TLSMemoryBIOProtocol}
intercedes between the underlying transport and the wrapped protocol to
implement SSL and TLS. Typical usage of this module looks like this::
from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.internet.protocol import ServerFactory
from twisted.internet.ssl import PrivateCertificate
from twisted.internet import reactor
from someapplication import ApplicationProtocol
serverFactory = ServerFactory()
serverFactory.protocol = ApplicationProtocol
certificate = PrivateCertificate.loadPEM(certPEMData)
contextFactory = certificate.options()
tlsFactory = TLSMemoryBIOFactory(contextFactory, False, serverFactory)
reactor.listenTCP(12345, tlsFactory)
reactor.run()
This API offers somewhat more flexibility than
L{twisted.internet.interfaces.IReactorSSL}; for example, a L{TLSMemoryBIOProtocol}
instance can use another instance of L{TLSMemoryBIOProtocol} as its transport,
yielding TLS over TLS - useful to implement onion routing. It can also be used
to run TLS over unusual transports, such as UNIX sockets and stdio.
"""
from __future__ import division, absolute_import
from OpenSSL.SSL import Error, ZeroReturnError, WantReadError
from OpenSSL.SSL import TLSv1_METHOD, Context, Connection
try:
Connection(Context(TLSv1_METHOD), None)
except TypeError as e:
if str(e) != "argument must be an int, or have a fileno() method.":
raise
raise ImportError("twisted.protocols.tls requires pyOpenSSL 0.10 or newer.")
from zope.interface import implementer, providedBy, directlyProvides
from twisted.python.compat import unicode
from twisted.python.failure import Failure
from twisted.python import log
from twisted.python._reflectpy3 import safe_str
from twisted.internet.interfaces import ISystemHandle, ISSLTransport
from twisted.internet.interfaces import IPushProducer, ILoggingContext
from twisted.internet.main import CONNECTION_LOST
from twisted.internet.protocol import Protocol
from twisted.internet.task import cooperate
from twisted.protocols.policies import ProtocolWrapper, WrappingFactory
@implementer(IPushProducer)
class _PullToPush(object):
"""
An adapter that converts a non-streaming to a streaming producer.
Because of limitations of the producer API, this adapter requires the
cooperation of the consumer. When the consumer's C{registerProducer} is
called with a non-streaming producer, it must wrap it with L{_PullToPush}
and then call C{startStreaming} on the resulting object. When the
consumer's C{unregisterProducer} is called, it must call
C{stopStreaming} on the L{_PullToPush} instance.
If the underlying producer throws an exception from C{resumeProducing},
the producer will be unregistered from the consumer.
@ivar _producer: the underling non-streaming producer.
@ivar _consumer: the consumer with which the underlying producer was
registered.
@ivar _finished: C{bool} indicating whether the producer has finished.
@ivar _coopTask: the result of calling L{cooperate}, the task driving the
streaming producer.
"""
_finished = False
def __init__(self, pullProducer, consumer):
self._producer = pullProducer
self._consumer = consumer
def _pull(self):
"""
A generator that calls C{resumeProducing} on the underlying producer
forever.
If C{resumeProducing} throws an exception, the producer is
unregistered, which should result in streaming stopping.
"""
while True:
try:
self._producer.resumeProducing()
except:
log.err(None, "%s failed, producing will be stopped:" %
(safe_str(self._producer),))
try:
self._consumer.unregisterProducer()
# The consumer should now call stopStreaming() on us,
# thus stopping the streaming.
except:
# Since the consumer blew up, we may not have had
# stopStreaming() called, so we just stop on our own:
log.err(None, "%s failed to unregister producer:" %
(safe_str(self._consumer),))
self._finished = True
return
yield None
def startStreaming(self):
"""
This should be called by the consumer when the producer is registered.
Start streaming data to the consumer.
"""
self._coopTask = cooperate(self._pull())
def stopStreaming(self):
"""
This should be called by the consumer when the producer is unregistered.
Stop streaming data to the consumer.
"""
if self._finished:
return
self._finished = True
self._coopTask.stop()
# IPushProducer implementation:
def pauseProducing(self):
self._coopTask.pause()
def resumeProducing(self):
self._coopTask.resume()
def stopProducing(self):
self.stopStreaming()
self._producer.stopProducing()
@implementer(IPushProducer)
class _ProducerMembrane(object):
"""
Stand-in for producer registered with a L{TLSMemoryBIOProtocol} transport.
Ensures that producer pause/resume events from the undelying transport are
coordinated with pause/resume events from the TLS layer.
@ivar _producer: The application-layer producer.
"""
_producerPaused = False
def __init__(self, producer):
self._producer = producer
def pauseProducing(self):
"""
C{pauseProducing} the underlying producer, if it's not paused.
"""
if self._producerPaused:
return
self._producerPaused = True
self._producer.pauseProducing()
def resumeProducing(self):
"""
C{resumeProducing} the underlying producer, if it's paused.
"""
if not self._producerPaused:
return
self._producerPaused = False
self._producer.resumeProducing()
def stopProducing(self):
"""
C{stopProducing} the underlying producer.
There is only a single source for this event, so it's simply passed
on.
"""
self._producer.stopProducing()
@implementer(ISystemHandle, ISSLTransport)
class TLSMemoryBIOProtocol(ProtocolWrapper):
"""
L{TLSMemoryBIOProtocol} is a protocol wrapper which uses OpenSSL via a
memory BIO to encrypt bytes written to it before sending them on to the
underlying transport and decrypts bytes received from the underlying
transport before delivering them to the wrapped protocol.
In addition to producer events from the underlying transport, the need to
wait for reads before a write can proceed means the
L{TLSMemoryBIOProtocol} may also want to pause a producer. Pause/resume
events are therefore merged using the L{_ProducerMembrane}
wrapper. Non-streaming (pull) producers are supported by wrapping them
with L{_PullToPush}.
@ivar _tlsConnection: The L{OpenSSL.SSL.Connection} instance which is
encrypted and decrypting this connection.
@ivar _lostTLSConnection: A flag indicating whether connection loss has
already been dealt with (C{True}) or not (C{False}). TLS disconnection
is distinct from the underlying connection being lost.
@ivar _writeBlockedOnRead: A flag indicating whether further writing must
wait for data to be received (C{True}) or not (C{False}).
@ivar _appSendBuffer: A C{list} of C{str} of application-level (cleartext)
data which is waiting for C{_writeBlockedOnRead} to be reset to
C{False} so it can be passed to and perhaps accepted by
C{_tlsConnection.send}.
@ivar _connectWrapped: A flag indicating whether or not to call
C{makeConnection} on the wrapped protocol. This is for the reactor's
L{twisted.internet.interfaces.ITLSTransport.startTLS} implementation,
since it has a protocol which it has already called C{makeConnection}
on, and which has no interest in a new transport. See #3821.
@ivar _handshakeDone: A flag indicating whether or not the handshake is
known to have completed successfully (C{True}) or not (C{False}). This
is used to control error reporting behavior. If the handshake has not
completed, the underlying L{OpenSSL.SSL.Error} will be passed to the
application's C{connectionLost} method. If it has completed, any
unexpected L{OpenSSL.SSL.Error} will be turned into a
L{ConnectionLost}. This is weird; however, it is simply an attempt at
a faithful re-implementation of the behavior provided by
L{twisted.internet.ssl}.
@ivar _reason: If an unexpected L{OpenSSL.SSL.Error} occurs which causes
the connection to be lost, it is saved here. If appropriate, this may
be used as the reason passed to the application protocol's
C{connectionLost} method.
@ivar _producer: The current producer registered via C{registerProducer},
or C{None} if no producer has been registered or a previous one was
unregistered.
"""
_reason = None
_handshakeDone = False
_lostTLSConnection = False
_writeBlockedOnRead = False
_producer = None
def __init__(self, factory, wrappedProtocol, _connectWrapped=True):
ProtocolWrapper.__init__(self, factory, wrappedProtocol)
self._connectWrapped = _connectWrapped
def getHandle(self):
"""
Return the L{OpenSSL.SSL.Connection} object being used to encrypt and
decrypt this connection.
This is done for the benefit of L{twisted.internet.ssl.Certificate}'s
C{peerFromTransport} and C{hostFromTransport} methods only. A
different system handle may be returned by future versions of this
method.
"""
return self._tlsConnection
def makeConnection(self, transport):
"""
Connect this wrapper to the given transport and initialize the
necessary L{OpenSSL.SSL.Connection} with a memory BIO.
"""
tlsContext = self.factory._contextFactory.getContext()
self._tlsConnection = Connection(tlsContext, None)
if self.factory._isClient:
self._tlsConnection.set_connect_state()
else:
self._tlsConnection.set_accept_state()
self._appSendBuffer = []
# Add interfaces provided by the transport we are wrapping:
for interface in providedBy(transport):
directlyProvides(self, interface)
# Intentionally skip ProtocolWrapper.makeConnection - it might call
# wrappedProtocol.makeConnection, which we want to make conditional.
Protocol.makeConnection(self, transport)
self.factory.registerProtocol(self)
if self._connectWrapped:
# Now that the TLS layer is initialized, notify the application of
# the connection.
ProtocolWrapper.makeConnection(self, transport)
# Now that we ourselves have a transport (initialized by the
# ProtocolWrapper.makeConnection call above), kick off the TLS
# handshake.
try:
self._tlsConnection.do_handshake()
except WantReadError:
# This is the expected case - there's no data in the connection's
# input buffer yet, so it won't be able to complete the whole
# handshake now. If this is the speak-first side of the
# connection, then some bytes will be in the send buffer now; flush
# them.
self._flushSendBIO()
def _flushSendBIO(self):
"""
Read any bytes out of the send BIO and write them to the underlying
transport.
"""
try:
bytes = self._tlsConnection.bio_read(2 ** 15)
except WantReadError:
# There may be nothing in the send BIO right now.
pass
else:
self.transport.write(bytes)
def _flushReceiveBIO(self):
"""
Try to receive any application-level bytes which are now available
because of a previous write into the receive BIO. This will take
care of delivering any application-level bytes which are received to
the protocol, as well as handling of the various exceptions which
can come from trying to get such bytes.
"""
# Keep trying this until an error indicates we should stop or we
# close the connection. Looping is necessary to make sure we
# process all of the data which was put into the receive BIO, as
# there is no guarantee that a single recv call will do it all.
while not self._lostTLSConnection:
try:
bytes = self._tlsConnection.recv(2 ** 15)
except WantReadError:
# The newly received bytes might not have been enough to produce
# any application data.
break
except ZeroReturnError:
# TLS has shut down and no more TLS data will be received over
# this connection.
self._shutdownTLS()
# Passing in None means the user protocol's connnectionLost
# will get called with reason from underlying transport:
self._tlsShutdownFinished(None)
except Error as e:
# Something went pretty wrong. For example, this might be a
# handshake failure (because there were no shared ciphers, because
# a certificate failed to verify, etc). TLS can no longer proceed.
# Squash EOF in violation of protocol into ConnectionLost; we
# create Failure before calling _flushSendBio so that no new
# exception will get thrown in the interim.
if e.args[0] == -1 and e.args[1] == 'Unexpected EOF':
failure = Failure(CONNECTION_LOST)
else:
failure = Failure()
self._flushSendBIO()
self._tlsShutdownFinished(failure)
else:
# If we got application bytes, the handshake must be done by
# now. Keep track of this to control error reporting later.
self._handshakeDone = True
ProtocolWrapper.dataReceived(self, bytes)
# The received bytes might have generated a response which needs to be
# sent now. For example, the handshake involves several round-trip
# exchanges without ever producing application-bytes.
self._flushSendBIO()
def dataReceived(self, bytes):
"""
Deliver any received bytes to the receive BIO and then read and deliver
to the application any application-level data which becomes available
as a result of this.
"""
self._tlsConnection.bio_write(bytes)
if self._writeBlockedOnRead:
# A read just happened, so we might not be blocked anymore. Try to
# flush all the pending application bytes.
self._writeBlockedOnRead = False
appSendBuffer = self._appSendBuffer
self._appSendBuffer = []
for bytes in appSendBuffer:
self._write(bytes)
if (not self._writeBlockedOnRead and self.disconnecting and
self.producer is None):
self._shutdownTLS()
if self._producer is not None:
self._producer.resumeProducing()
self._flushReceiveBIO()
def _shutdownTLS(self):
"""
Initiate, or reply to, the shutdown handshake of the TLS layer.
"""
shutdownSuccess = self._tlsConnection.shutdown()
self._flushSendBIO()
if shutdownSuccess:
# Both sides have shutdown, so we can start closing lower-level
# transport. This will also happen if we haven't started
# negotiation at all yet, in which case shutdown succeeds
# immediately.
self.transport.loseConnection()
def _tlsShutdownFinished(self, reason):
"""
Called when TLS connection has gone away; tell underlying transport to
disconnect.
"""
self._reason = reason
self._lostTLSConnection = True
# Using loseConnection causes the application protocol's
# connectionLost method to be invoked non-reentrantly, which is always
# a nice feature. However, for error cases (reason != None) we might
# want to use abortConnection when it becomes available. The
# loseConnection call is basically tested by test_handshakeFailure.
# At least one side will need to do it or the test never finishes.
self.transport.loseConnection()
def connectionLost(self, reason):
"""
Handle the possible repetition of calls to this method (due to either
the underlying transport going away or due to an error at the TLS
layer) and make sure the base implementation only gets invoked once.
"""
if not self._lostTLSConnection:
# Tell the TLS connection that it's not going to get any more data
# and give it a chance to finish reading.
self._tlsConnection.bio_shutdown()
self._flushReceiveBIO()
self._lostTLSConnection = True
reason = self._reason or reason
self._reason = None
ProtocolWrapper.connectionLost(self, reason)
def loseConnection(self):
"""
Send a TLS close alert and close the underlying connection.
"""
if self.disconnecting:
return
self.disconnecting = True
if not self._writeBlockedOnRead and self._producer is None:
self._shutdownTLS()
def write(self, bytes):
"""
Process the given application bytes and send any resulting TLS traffic
which arrives in the send BIO.
If C{loseConnection} was called, subsequent calls to C{write} will
drop the bytes on the floor.
"""
if isinstance(bytes, unicode):
raise TypeError("Must write bytes to a TLS transport, not unicode.")
# Writes after loseConnection are not supported, unless a producer has
# been registered, in which case writes can happen until the producer
# is unregistered:
if self.disconnecting and self._producer is None:
return
self._write(bytes)
def _write(self, bytes):
"""
Process the given application bytes and send any resulting TLS traffic
which arrives in the send BIO.
This may be called by C{dataReceived} with bytes that were buffered
before C{loseConnection} was called, which is why this function
doesn't check for disconnection but accepts the bytes regardless.
"""
if self._lostTLSConnection:
return
leftToSend = bytes
while leftToSend:
try:
sent = self._tlsConnection.send(leftToSend)
except WantReadError:
self._writeBlockedOnRead = True
self._appSendBuffer.append(leftToSend)
if self._producer is not None:
self._producer.pauseProducing()
break
except Error:
# Pretend TLS connection disconnected, which will trigger
# disconnect of underlying transport. The error will be passed
# to the application protocol's connectionLost method. The
# other SSL implementation doesn't, but losing helpful
# debugging information is a bad idea.
self._tlsShutdownFinished(Failure())
break
else:
# If we sent some bytes, the handshake must be done. Keep
# track of this to control error reporting behavior.
self._handshakeDone = True
self._flushSendBIO()
leftToSend = leftToSend[sent:]
def writeSequence(self, iovec):
"""
Write a sequence of application bytes by joining them into one string
and passing them to L{write}.
"""
self.write(b"".join(iovec))
def getPeerCertificate(self):
return self._tlsConnection.get_peer_certificate()
def registerProducer(self, producer, streaming):
# If we've already disconnected, nothing to do here:
if self._lostTLSConnection:
producer.stopProducing()
return
# If we received a non-streaming producer, wrap it so it becomes a
# streaming producer:
if not streaming:
producer = streamingProducer = _PullToPush(producer, self)
producer = _ProducerMembrane(producer)
# This will raise an exception if a producer is already registered:
self.transport.registerProducer(producer, True)
self._producer = producer
# If we received a non-streaming producer, we need to start the
# streaming wrapper:
if not streaming:
streamingProducer.startStreaming()
def unregisterProducer(self):
# If we received a non-streaming producer, we need to stop the
# streaming wrapper:
if isinstance(self._producer._producer, _PullToPush):
self._producer._producer.stopStreaming()
self._producer = None
self._producerPaused = False
self.transport.unregisterProducer()
if self.disconnecting and not self._writeBlockedOnRead:
self._shutdownTLS()
class TLSMemoryBIOFactory(WrappingFactory):
"""
L{TLSMemoryBIOFactory} adds TLS to connections.
@ivar _contextFactory: The TLS context factory which will be used to define
certain TLS connection parameters.
@ivar _isClient: A flag which is C{True} if this is a client TLS
connection, C{False} if it is a server TLS connection.
"""
protocol = TLSMemoryBIOProtocol
noisy = False # disable unnecessary logging.
def __init__(self, contextFactory, isClient, wrappedFactory):
WrappingFactory.__init__(self, wrappedFactory)
self._contextFactory = contextFactory
self._isClient = isClient
# Force some parameter checking in pyOpenSSL. It's better to fail now
# than after we've set up the transport.
contextFactory.getContext()
def logPrefix(self):
"""
Annotate the wrapped factory's log prefix with some text indicating TLS
is in use.
@rtype: C{str}
"""
if ILoggingContext.providedBy(self.wrappedFactory):
logPrefix = self.wrappedFactory.logPrefix()
else:
logPrefix = self.wrappedFactory.__class__.__name__
return "%s (TLS)" % (logPrefix,)
|
|
from cStringIO import StringIO
from hashlib import sha1
import math
import os
from time import time
import Image
from django.conf import settings
from django.db import IntegrityError
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from canvas import util
from canvas.models import Content, Comment, ContentUrlMapping, Visibility
from canvas.redis_models import redis
from configuration import Config
from realtime.footer import Footer
# Workaround jpeg quality saving issues:
# http://mail.python.org/pipermail/image-sig/1999-August/000816.html
import ImageFile
ImageFile.MAXBLOCK = 4 * 1024 * 10214 # default is 64k
EXTENSION_FROM_FORMAT = {
'JPEG': '.jpg',
'GIF': '.gif',
'PNG': '.png',
'BMP': '.bmp',
}
hexdigest = lambda data: sha1(data).hexdigest()
def count_frames(img):
frames = 1
if img.format == 'GIF':
try:
while True:
img.seek(img.tell() + 1)
except EOFError:
frames = img.tell()
finally:
img.seek(0)
return frames
def update_metadata(content, meta):
content.alpha = meta['alpha']
content.animated = meta['original'].get('animated', False)
def generate_thumbnails(image_file_data, fs=None, image_type=None, filename=None, exclude_types=[]):
"""
Specify an `image_type` to recreate just one image type.
Returns a metadata object.
"""
if fs is None:
from canvas.upload import get_fs
fs = get_fs(*settings.IMAGE_FS)
thumbnailer = Thumbnailer(fs)
return thumbnailer.store(image_file_data, image_type=image_type, filename=filename, exclude_types=exclude_types)
def create_content(ip, fs, data, remix_of, stamps_used, text_used, source_url='', is_quest=False):
exclude_types = []
if settings.PROJECT == 'drawquest':
if not is_quest:
exclude_types = ['homepage_featured']
meta = generate_thumbnails(data, fs=fs, exclude_types=exclude_types)
if remix_of:
remix_of = Content.all_objects.get(id=remix_of)
remix_of.hide_if_unpublished()
else:
remix_of = None
if stamps_used:
stamps_used = [Content.all_objects.get_or_none(id=stamp_id) for stamp_id in stamps_used if stamp_id]
stamps_used = [stamp for stamp in stamps_used if stamp]
try:
content = Content.all_objects.get(id=meta['id'])
# Don't allow uploading content that has been disabled.
if content.visibility == Visibility.DISABLED:
return {
'success': False,
'reason': 'This image has been banned.',
}
except Content.DoesNotExist:
url_mapping = ContentUrlMapping()
url_mapping.save()
content = Content(
id = meta['id'],
visibility = Visibility.UNPUBLISHED,
url_mapping = url_mapping,
ip = ip,
timestamp = time(),
remix_of = remix_of,
remix_text = text_used,
source_url = source_url,
)
update_metadata(content, meta)
try:
content.save()
except IntegrityError:
# Race condition, retry
return create_content(ip, fs, data, remix_of, stamps_used, text_used, source_url)
content.stamps_used.add(*stamps_used)
redis.set(content.details_key, util.dumps(meta))
existing_url = content.first_caption and content.first_caption.details().url
return {'success': True, 'content': content.details.force(), 'existing_url': existing_url}
def update_all_content(fs, resume_percent=0.0, stop_percent=1.0, image_type=None):
total = Content.all_objects.all().count()
start_slice = math.floor(float(resume_percent) * total)
stop_slice = math.ceil(float(stop_percent) * total)
print "Rethumbnailing images %s-%s of %s" % (start_slice, stop_slice, total)
contents = Content.all_objects.all()
for i, content in enumerate(contents[start_slice:stop_slice]):
print "%05.2f%% complete. Updating: %s" % ((i+start_slice) * 100.0 / total, content.id)
try:
update(fs, content, image_type=image_type, save_to_db=False)
except Exception:
import traceback
traceback.print_exc()
print "ERROR: Something is wrong with content: %s" % content.details()
def update(fs, content, image_type, save_to_db=True):
filename = content.details()['original']['name']
# Prevent issues with unicode filenames.
filename = filename.encode('ascii')
data = fs.read(filename)
thumbnailer = Thumbnailer(fs)
meta = util.loads(redis.get(content.details_key))
meta.update(thumbnailer.store(data, image_type))
update_metadata(content, meta)
if save_to_db:
content.save()
redis.set(content.details_key, util.dumps(meta))
content.details.force()
def image_entropy(img):
histogram = img.histogram()
histogram_length = sum(histogram)
samples_probability = [float(h) / histogram_length for h in histogram]
return -sum([p * math.log(p, 2) for p in samples_probability if p != 0])
def thumbnail(img, x, y, crop_y=False, crop_x=False, intelligent_crop=False, filter_=Image.ANTIALIAS):
# Slightly prefer the top of images when cropping, if the bottom isn't much more interesting.
top_entropy_bias = 1.25
if img.mode == 'RGBA' or (img.mode == 'P' and 'transparency' in img.info):
convert_to = 'RGBA'
else:
convert_to = 'RGB'
thumb = img.convert(convert_to)
ix, iy = thumb.size
if crop_y and crop_x:
if ix > iy:
crop_y = None
else:
crop_x = None
cy, cx = 0, 0
if not crop_y and not crop_x:
thumb.thumbnail((x,y), filter_)
elif crop_y and not crop_x:
target_aspect = float(y)/x
aspect = float(iy)/ix
# Rescale horizontally if necessary.
fx, fy = float(x), float(y)
if ix > fx:
scale = fx / ix
new_size = [int(math.ceil(v)) for v in [(ix * scale), (iy * scale)]]
thumb = thumb.resize(new_size, filter_)
ix, iy = thumb.size
# Crop vertically.
if intelligent_crop:
while (iy - cy) > y:
# Slice until we're at the target size
slice_height = min((iy - cy) - y, 8)
top = thumb.crop((0, cy, ix, cy + slice_height))
bottom = thumb.crop((0, iy - slice_height, ix, iy))
if image_entropy(top) * top_entropy_bias > image_entropy(bottom):
iy -= slice_height
else:
cy += slice_height
else:
if iy > y:
strip = (iy - y) / 2.0
iy -= int(math.ceil(strip))
cy = int(math.floor(strip))
thumb = thumb.crop((cx, cy, ix, iy))
elif crop_x and not crop_y:
target_aspect = float(y)/x
aspect = float(iy)/ix
# Rescale vertically if necessary.
fx, fy = float(x), float(y)
if iy > fy:
scale = fy / iy
new_size = [int(math.ceil(v)) for v in [(ix * scale), (iy * scale)]]
thumb = thumb.resize(new_size, filter_)
ix, iy = thumb.size
# Crop horizontally.
if intelligent_crop:
while (ix - cx) > x:
# Slice until we're at the target size
slice_width = min((ix - cx) - x, 8)
left = thumb.crop((cx, 0, cx + slice_width, iy))
right = thumb.crop((ix - slice_width, 0, ix, iy))
if image_entropy(left) * top_entropy_bias > image_entropy(right):
ix -= slice_width
else:
cx += slice_width
else:
if ix > x:
strip = (ix - x) / 2.0
ix -= int(math.ceil(strip))
cx = int(math.floor(strip))
thumb = thumb.crop((cx, cy, ix, iy))
return thumb
def determine_alpha(img):
""" Insanely brittle, because this stuff seems really untested in PIL :( """
img.load()
if 'P' in img.getbands():
img = img.convert('RGBA')
img.load()
bands = dict(zip(img.getbands(), img.getextrema()))
amin, amax = bands.get('A', [255,255])
return amin < 0xFF
class Thumbnailer(object):
if settings.PROJECT == 'canvas':
# ==> WHEN UPDATING THESE TYPES, BE SURE TO UPDATE common.js::canvas.Content.types. <==
META = {
'id': lambda self, img, data, **kwargs: hexdigest(data),
'original': lambda self, img, data, **kwargs: self.store_image(img, filedata=data),
'tiny_square': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 30,30, crop_x=True, crop_y=True, intelligent_crop=True), jpeg=True),
'small_square': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 50,50, crop_x=True, crop_y=True, intelligent_crop=True), jpeg=True),
'square': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 100,100, crop_x=True, crop_y=True, intelligent_crop=True), jpeg=True),
'medium_square': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 175,175, crop_x=True, crop_y=True, intelligent_crop=True), jpeg=True),
'thumbnail': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 160,100), jpeg=True),
'stream': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 400,250), jpeg=True),
'small_column': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 150,500, crop_y=True, intelligent_crop=True), jpeg=True),
'column': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 250,700, crop_y=True, intelligent_crop=True), jpeg=True),
'explore_column': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 228,700, crop_y=True, intelligent_crop=True), jpeg=True),
'giant': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 600,3000)),
'mobile': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 600,3000), jpeg=True, quality=70),
}
CLONED_META = {}
# These must be specified via `image_type` - they're not auto-generated.
MANUAL_META = {
'footer': lambda self, img, data, filename=None: self.store_image(img, filedata=data, filename=filename),
}
elif settings.PROJECT == 'drawquest':
META = {
'id': lambda self, img, data, **kwargs: hexdigest(data),
'original': lambda self, img, data, **kwargs: self.store_image(img, filedata=data),
'gallery': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 1212,908, crop_y=True), jpeg=True),
'homepage_featured': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 783,588, crop_y=True)),
'archive': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 277,207, crop_y=True), jpeg=True),
'activity': lambda self, img, data, **kwargs: self.store_image(thumbnail(img, 114,85, crop_y=True, filter_=Image.BICUBIC), jpeg=True),
}
CLONED_META = {
'editor_template': 'original',
}
MANUAL_META = {}
def __init__(self, fs):
"""
fs:
A "filesystem". See realtime.server.get_local_fs(), or look at test_thumbnailer.py
"""
self.fs = fs
def store(self, data, image_type=None, filename=None, exclude_types=[]):
""" Saves a plethora of different versions of the image. See META. """
if filename is not None and image_type is None:
raise ValueError("Cannot specify a filename without also specifying a single image_type")
img = Image.open(StringIO(data))
if img.format not in EXTENSION_FROM_FORMAT:
raise IOError("Unknown format type")
meta = {}
# Rethumbnail everything, or just the specified type, and alpha if we rethumbnailed 'original'.
calculate_alpha = True
if image_type is None:
for _image_type, fn in self.META.items():
if _image_type in exclude_types:
continue
meta[_image_type] = fn(self, img, data, filename=filename)
for _clone_type, _image_type in self.CLONED_META.items():
if _clone_type in exclude_types:
raise Exception("Can't exclude a cloned thumbnail type.")
meta[_clone_type] = meta[_image_type]
else:
if image_type in self.CLONED_META:
raise Exception("Regenerate {} instead, since this is just a cloned thumbnail.".format(self.CLONED_META[image_type]))
elif image_type in exclude_types:
raise Exception("Can't exclude a type that you're explicitly generating.")
fn = self.META.get(image_type, self.MANUAL_META[image_type])
meta[image_type] = fn(self, img, data, filename=filename)
calculate_alpha = image_type == 'original'
for _clone_type, _image_type in self.CLONED_META.items():
if _image_type == image_type:
meta[_clone_type] = meta[image_type]
if calculate_alpha:
# This must go last, because PIL doesn't know how to non-destructively get extrema (giant fucking bug
# in PIL)
meta['alpha'] = determine_alpha(img)
return meta
def store_image(self, img, filedata=None, filename=None, jpeg=False, quality=90):
format_ = img.format or ('JPEG' if img.mode == 'RGB' else 'PNG')
if jpeg:
format_ = "JPEG"
original = bool(filedata)
if not filedata:
imgio = StringIO()
if jpeg:
if img.mode == "RGBA":
white_background = Image.new("RGB", img.size, (255,255,255))
white_background.paste(img, None, img)
img = white_background
img.save(imgio, format_, quality=quality, optimize=True)
else:
img.save(imgio, format_)
filedata = imgio.getvalue()
if filename is None:
filename = os.path.join('original' if original else 'processed',
hexdigest(filedata) + EXTENSION_FROM_FORMAT[format_])
self.fs.save(filename, filedata)
data = {
'name': filename,
'width': img.size[0],
'height': img.size[1],
'kb': len(filedata) / 1024,
}
if count_frames(img) > 1:
data['animated'] = True
return data
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainSkel.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(826, 558)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(25, 25, 70))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(25, 25, 70))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(25, 25, 70))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(25, 25, 70))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
MainWindow.setPalette(palette)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalGroupBox = QtGui.QGroupBox(self.centralwidget)
self.horizontalGroupBox.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.horizontalGroupBox.sizePolicy().hasHeightForWidth())
self.horizontalGroupBox.setSizePolicy(sizePolicy)
self.horizontalGroupBox.setMaximumSize(QtCore.QSize(1200, 90))
self.horizontalGroupBox.setLayoutDirection(QtCore.Qt.RightToLeft)
self.horizontalGroupBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.horizontalGroupBox.setObjectName(_fromUtf8("horizontalGroupBox"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalGroupBox)
self.horizontalLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout.setContentsMargins(50, 0, 100, 0)
self.horizontalLayout.setSpacing(100)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.PantheraLogo = QtGui.QLabel(self.horizontalGroupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PantheraLogo.sizePolicy().hasHeightForWidth())
self.PantheraLogo.setSizePolicy(sizePolicy)
self.PantheraLogo.setMaximumSize(QtCore.QSize(150, 75))
self.PantheraLogo.setLayoutDirection(QtCore.Qt.LeftToRight)
self.PantheraLogo.setText(_fromUtf8(""))
self.PantheraLogo.setPixmap(QtGui.QPixmap(_fromUtf8("../../../../Panthera logo & branding_internal only/Panthera Logos/Logo_DarkBackgrounds.png")))
self.PantheraLogo.setScaledContents(True)
self.PantheraLogo.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.PantheraLogo.setWordWrap(True)
self.PantheraLogo.setObjectName(_fromUtf8("PantheraLogo"))
self.horizontalLayout.addWidget(self.PantheraLogo)
self.label = QtGui.QLabel(self.horizontalGroupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMaximumSize(QtCore.QSize(75, 75))
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8("hsicon.ico")))
self.label.setScaledContents(True)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.label_2 = QtGui.QLabel(self.horizontalGroupBox)
self.label_2.setMaximumSize(QtCore.QSize(175, 60))
self.label_2.setText(_fromUtf8(""))
self.label_2.setPixmap(QtGui.QPixmap(_fromUtf8("../../../../Downloads/SeattleUMain-red-background.png")))
self.label_2.setScaledContents(True)
self.label_2.setWordWrap(True)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.verticalLayout_2.addWidget(self.horizontalGroupBox)
self.TableTabWidget = QtGui.QTabWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TableTabWidget.sizePolicy().hasHeightForWidth())
self.TableTabWidget.setSizePolicy(sizePolicy)
self.TableTabWidget.setMaximumSize(QtCore.QSize(16777215, 1200))
self.TableTabWidget.setTabsClosable(False)
self.TableTabWidget.setObjectName(_fromUtf8("TableTabWidget"))
self.imageTab = QtGui.QWidget()
self.imageTab.setObjectName(_fromUtf8("imageTab"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.imageTab)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.imageView = QtGui.QVBoxLayout()
self.imageView.setObjectName(_fromUtf8("imageView"))
self.gxs_TBL = QtGui.QTableWidget(self.imageTab)
self.gxs_TBL.setObjectName(_fromUtf8("gxs_TBL"))
self.gxs_TBL.setColumnCount(0)
self.gxs_TBL.setRowCount(0)
self.imageView.addWidget(self.gxs_TBL)
self.verticalLayout_6.addLayout(self.imageView)
self.horizontalGroupBox1 = QtGui.QGroupBox(self.imageTab)
self.horizontalGroupBox1.setObjectName(_fromUtf8("horizontalGroupBox1"))
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.horizontalGroupBox1)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.pushButton = QtGui.QPushButton(self.horizontalGroupBox1)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout_4.addWidget(self.pushButton)
self.pushButton_2 = QtGui.QPushButton(self.horizontalGroupBox1)
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.horizontalLayout_4.addWidget(self.pushButton_2)
self.AutoChip = QtGui.QPushButton(self.horizontalGroupBox1)
self.AutoChip.setObjectName(_fromUtf8("AutoChip"))
self.horizontalLayout_4.addWidget(self.AutoChip)
self.verticalLayout_6.addWidget(self.horizontalGroupBox1)
self.TableTabWidget.addTab(self.imageTab, _fromUtf8(""))
self.chipTab = QtGui.QWidget()
self.chipTab.setObjectName(_fromUtf8("chipTab"))
self.verticalLayout_7 = QtGui.QVBoxLayout(self.chipTab)
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.chipView = QtGui.QVBoxLayout()
self.chipView.setObjectName(_fromUtf8("chipView"))
self.cxs_TBL = QtGui.QTableWidget(self.chipTab)
self.cxs_TBL.setObjectName(_fromUtf8("cxs_TBL"))
self.cxs_TBL.setColumnCount(0)
self.cxs_TBL.setRowCount(0)
self.chipView.addWidget(self.cxs_TBL)
self.verticalLayout_7.addLayout(self.chipView)
self.horizontalGroupBox2 = QtGui.QGroupBox(self.chipTab)
self.horizontalGroupBox2.setObjectName(_fromUtf8("horizontalGroupBox2"))
self.horizontalLayout_5 = QtGui.QHBoxLayout(self.horizontalGroupBox2)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.pushButton_3 = QtGui.QPushButton(self.horizontalGroupBox2)
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.horizontalLayout_5.addWidget(self.pushButton_3)
self.AutoQuery = QtGui.QPushButton(self.horizontalGroupBox2)
self.AutoQuery.setObjectName(_fromUtf8("AutoQuery"))
self.horizontalLayout_5.addWidget(self.AutoQuery)
self.verticalLayout_7.addWidget(self.horizontalGroupBox2)
self.TableTabWidget.addTab(self.chipTab, _fromUtf8(""))
self.nameTab = QtGui.QWidget()
self.nameTab.setObjectName(_fromUtf8("nameTab"))
self.verticalLayout_8 = QtGui.QVBoxLayout(self.nameTab)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.nameView = QtGui.QVBoxLayout()
self.nameView.setObjectName(_fromUtf8("nameView"))
self.nxs_TBL = QtGui.QTableWidget(self.nameTab)
self.nxs_TBL.setObjectName(_fromUtf8("nxs_TBL"))
self.nxs_TBL.setColumnCount(0)
self.nxs_TBL.setRowCount(0)
self.nameView.addWidget(self.nxs_TBL)
self.verticalLayout_8.addLayout(self.nameView)
self.TableTabWidget.addTab(self.nameTab, _fromUtf8(""))
self.tab_6 = QtGui.QWidget()
self.tab_6.setObjectName(_fromUtf8("tab_6"))
self.verticalLayout_9 = QtGui.QVBoxLayout(self.tab_6)
self.verticalLayout_9.setObjectName(_fromUtf8("verticalLayout_9"))
self.queryView = QtGui.QVBoxLayout()
self.queryView.setObjectName(_fromUtf8("queryView"))
self.qxs_TBL = QtGui.QTableWidget(self.tab_6)
self.qxs_TBL.setObjectName(_fromUtf8("qxs_TBL"))
self.qxs_TBL.setColumnCount(0)
self.qxs_TBL.setRowCount(0)
self.queryView.addWidget(self.qxs_TBL)
self.verticalLayout_9.addLayout(self.queryView)
self.TableTabWidget.addTab(self.tab_6, _fromUtf8(""))
self.verticalLayout_2.addWidget(self.TableTabWidget)
self.textEdit = QtGui.QTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setMaximumSize(QtCore.QSize(16777215, 150))
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.verticalLayout_2.addWidget(self.textEdit)
self.TableTabWidget.raise_()
self.textEdit.raise_()
self.horizontalGroupBox.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 826, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuActions = QtGui.QMenu(self.menubar)
self.menuActions.setObjectName(_fromUtf8("menuActions"))
self.menuBatch = QtGui.QMenu(self.menubar)
self.menuBatch.setObjectName(_fromUtf8("menuBatch"))
self.menuOptions = QtGui.QMenu(self.menubar)
self.menuOptions.setObjectName(_fromUtf8("menuOptions"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
MainWindow.setMenuBar(self.menubar)
self.actionNew_Database = QtGui.QAction(MainWindow)
self.actionNew_Database.setObjectName(_fromUtf8("actionNew_Database"))
self.actionOpen_Database = QtGui.QAction(MainWindow)
self.actionOpen_Database.setObjectName(_fromUtf8("actionOpen_Database"))
self.actionSave_Database = QtGui.QAction(MainWindow)
self.actionSave_Database.setObjectName(_fromUtf8("actionSave_Database"))
self.actionImport_Images_Select_file_s = QtGui.QAction(MainWindow)
self.actionImport_Images_Select_file_s.setObjectName(_fromUtf8("actionImport_Images_Select_file_s"))
self.actionImport_Images_Select_Directory = QtGui.QAction(MainWindow)
self.actionImport_Images_Select_Directory.setObjectName(_fromUtf8("actionImport_Images_Select_Directory"))
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.actionAdd_Chip = QtGui.QAction(MainWindow)
self.actionAdd_Chip.setObjectName(_fromUtf8("actionAdd_Chip"))
self.actionNew_Chip_Property = QtGui.QAction(MainWindow)
self.actionNew_Chip_Property.setObjectName(_fromUtf8("actionNew_Chip_Property"))
self.actionQuery = QtGui.QAction(MainWindow)
self.actionQuery.setObjectName(_fromUtf8("actionQuery"))
self.actionReselect_ROI = QtGui.QAction(MainWindow)
self.actionReselect_ROI.setObjectName(_fromUtf8("actionReselect_ROI"))
self.actionReselect_Orientation = QtGui.QAction(MainWindow)
self.actionReselect_Orientation.setObjectName(_fromUtf8("actionReselect_Orientation"))
self.actionSelect_Next = QtGui.QAction(MainWindow)
self.actionSelect_Next.setObjectName(_fromUtf8("actionSelect_Next"))
self.actionDelete_Chip = QtGui.QAction(MainWindow)
self.actionDelete_Chip.setObjectName(_fromUtf8("actionDelete_Chip"))
self.actionDelete_Image = QtGui.QAction(MainWindow)
self.actionDelete_Image.setObjectName(_fromUtf8("actionDelete_Image"))
self.actionPrecompute_Chips_Features = QtGui.QAction(MainWindow)
self.actionPrecompute_Chips_Features.setObjectName(_fromUtf8("actionPrecompute_Chips_Features"))
self.actionPrecompute_Queries = QtGui.QAction(MainWindow)
self.actionPrecompute_Queries.setObjectName(_fromUtf8("actionPrecompute_Queries"))
self.actionScale_All_ROIs = QtGui.QAction(MainWindow)
self.actionScale_All_ROIs.setObjectName(_fromUtf8("actionScale_All_ROIs"))
self.actionConvert_All_Images_into_Chips = QtGui.QAction(MainWindow)
self.actionConvert_All_Images_into_Chips.setObjectName(_fromUtf8("actionConvert_All_Images_into_Chips"))
self.actionLayout_Figures = QtGui.QAction(MainWindow)
self.actionLayout_Figures.setObjectName(_fromUtf8("actionLayout_Figures"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionView_Documentation = QtGui.QAction(MainWindow)
self.actionView_Documentation.setObjectName(_fromUtf8("actionView_Documentation"))
self.actionView_Database_Dir = QtGui.QAction(MainWindow)
self.actionView_Database_Dir.setObjectName(_fromUtf8("actionView_Database_Dir"))
self.actionView_Computed_Dir = QtGui.QAction(MainWindow)
self.actionView_Computed_Dir.setObjectName(_fromUtf8("actionView_Computed_Dir"))
self.actionView_Global_Dir = QtGui.QAction(MainWindow)
self.actionView_Global_Dir.setObjectName(_fromUtf8("actionView_Global_Dir"))
self.actionWrite_Logs = QtGui.QAction(MainWindow)
self.actionWrite_Logs.setObjectName(_fromUtf8("actionWrite_Logs"))
self.actionDelete_Cached_Query_Results = QtGui.QAction(MainWindow)
self.actionDelete_Cached_Query_Results.setObjectName(_fromUtf8("actionDelete_Cached_Query_Results"))
self.actionDelete_Computed_Directory = QtGui.QAction(MainWindow)
self.actionDelete_Computed_Directory.setObjectName(_fromUtf8("actionDelete_Computed_Directory"))
self.actionDelete_Global_Preferences = QtGui.QAction(MainWindow)
self.actionDelete_Global_Preferences.setObjectName(_fromUtf8("actionDelete_Global_Preferences"))
self.actionDeveloper_Mode_IPython = QtGui.QAction(MainWindow)
self.actionDeveloper_Mode_IPython.setObjectName(_fromUtf8("actionDeveloper_Mode_IPython"))
self.actionDeveloper_Reload = QtGui.QAction(MainWindow)
self.actionDeveloper_Reload.setObjectName(_fromUtf8("actionDeveloper_Reload"))
self.menuFile.addAction(self.actionNew_Database)
self.menuFile.addAction(self.actionOpen_Database)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSave_Database)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionImport_Images_Select_file_s)
self.menuFile.addAction(self.actionImport_Images_Select_Directory)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuActions.addAction(self.actionAdd_Chip)
self.menuActions.addAction(self.actionNew_Chip_Property)
self.menuActions.addSeparator()
self.menuActions.addAction(self.actionQuery)
self.menuActions.addSeparator()
self.menuActions.addAction(self.actionReselect_ROI)
self.menuActions.addAction(self.actionReselect_Orientation)
self.menuActions.addSeparator()
self.menuActions.addAction(self.actionSelect_Next)
self.menuActions.addSeparator()
self.menuActions.addAction(self.actionDelete_Chip)
self.menuActions.addAction(self.actionDelete_Image)
self.menuBatch.addAction(self.actionPrecompute_Chips_Features)
self.menuBatch.addAction(self.actionPrecompute_Queries)
self.menuBatch.addSeparator()
self.menuBatch.addAction(self.actionScale_All_ROIs)
self.menuBatch.addSeparator()
self.menuBatch.addAction(self.actionConvert_All_Images_into_Chips)
self.menuOptions.addAction(self.actionLayout_Figures)
self.menuOptions.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addAction(self.actionView_Documentation)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionView_Database_Dir)
self.menuHelp.addAction(self.actionView_Computed_Dir)
self.menuHelp.addAction(self.actionView_Global_Dir)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionWrite_Logs)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionDelete_Cached_Query_Results)
self.menuHelp.addAction(self.actionDelete_Computed_Directory)
self.menuHelp.addAction(self.actionDelete_Global_Preferences)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionDeveloper_Mode_IPython)
self.menuHelp.addAction(self.actionDeveloper_Reload)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuActions.menuAction())
self.menubar.addAction(self.menuBatch.menuAction())
self.menubar.addAction(self.menuOptions.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.TableTabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.pushButton.setText(_translate("MainWindow", "Import Image(s)", None))
self.pushButton_2.setText(_translate("MainWindow", "Save Database", None))
self.AutoChip.setText(_translate("MainWindow", "AutoChip", None))
self.TableTabWidget.setTabText(self.TableTabWidget.indexOf(self.imageTab), _translate("MainWindow", "Image Table", None))
self.pushButton_3.setText(_translate("MainWindow", "Save Database", None))
self.AutoQuery.setText(_translate("MainWindow", "AutoQuery", None))
self.TableTabWidget.setTabText(self.TableTabWidget.indexOf(self.chipTab), _translate("MainWindow", "Chip Table", None))
self.TableTabWidget.setTabText(self.TableTabWidget.indexOf(self.nameTab), _translate("MainWindow", "Name View", None))
self.TableTabWidget.setTabText(self.TableTabWidget.indexOf(self.tab_6), _translate("MainWindow", "Query Results Table", None))
self.menuFile.setTitle(_translate("MainWindow", "File", None))
self.menuActions.setTitle(_translate("MainWindow", "Actions", None))
self.menuBatch.setTitle(_translate("MainWindow", "Batch", None))
self.menuOptions.setTitle(_translate("MainWindow", "Options", None))
self.menuHelp.setTitle(_translate("MainWindow", "Help", None))
self.actionNew_Database.setText(_translate("MainWindow", "New Database", None))
self.actionOpen_Database.setText(_translate("MainWindow", "Open Database", None))
self.actionSave_Database.setText(_translate("MainWindow", "Save Database", None))
self.actionImport_Images_Select_file_s.setText(_translate("MainWindow", "Import Images (Select file(s))", None))
self.actionImport_Images_Select_Directory.setText(_translate("MainWindow", "Import Images (Select Directory)", None))
self.actionQuit.setText(_translate("MainWindow", "Quit", None))
self.actionAdd_Chip.setText(_translate("MainWindow", "Add Chip", None))
self.actionNew_Chip_Property.setText(_translate("MainWindow", "New Chip Property", None))
self.actionQuery.setText(_translate("MainWindow", "Query", None))
self.actionReselect_ROI.setText(_translate("MainWindow", "Reselect ROI", None))
self.actionReselect_Orientation.setText(_translate("MainWindow", "Reselect Orientation", None))
self.actionSelect_Next.setText(_translate("MainWindow", "Select Next", None))
self.actionDelete_Chip.setText(_translate("MainWindow", "Delete Chip", None))
self.actionDelete_Image.setText(_translate("MainWindow", "Delete Image", None))
self.actionPrecompute_Chips_Features.setText(_translate("MainWindow", "Precompute Chips/Features", None))
self.actionPrecompute_Queries.setText(_translate("MainWindow", "Precompute Queries", None))
self.actionScale_All_ROIs.setText(_translate("MainWindow", "Scale All ROIs", None))
self.actionConvert_All_Images_into_Chips.setText(_translate("MainWindow", "Convert All Images into Chips", None))
self.actionLayout_Figures.setText(_translate("MainWindow", "Layout Figures", None))
self.actionAbout.setText(_translate("MainWindow", "About", None))
self.actionView_Documentation.setText(_translate("MainWindow", "View Documentation", None))
self.actionView_Database_Dir.setText(_translate("MainWindow", "View Database Dir", None))
self.actionView_Computed_Dir.setText(_translate("MainWindow", "View Computed Dir", None))
self.actionView_Global_Dir.setText(_translate("MainWindow", "View Global Dir", None))
self.actionWrite_Logs.setText(_translate("MainWindow", "Write Logs", None))
self.actionDelete_Cached_Query_Results.setText(_translate("MainWindow", "Delete Cached Query Results", None))
self.actionDelete_Computed_Directory.setText(_translate("MainWindow", "Delete Computed Directory", None))
self.actionDelete_Global_Preferences.setText(_translate("MainWindow", "Delete Global Preferences", None))
self.actionDeveloper_Mode_IPython.setText(_translate("MainWindow", "Developer Mode (IPython)", None))
self.actionDeveloper_Reload.setText(_translate("MainWindow", "Developer Reload", None))
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains the :class:`RMGDatabase` class, which is the primary class
for working with the RMG database.
"""
import os.path
import logging
from base import ForbiddenStructures
from thermo import ThermoDatabase
from transport import TransportDatabase
from rmgpy.data.kinetics.database import KineticsDatabase
from statmech import StatmechDatabase
from solvation import SolvationDatabase
from rmgpy.scoop_framework.util import get, broadcast
# Module-level variable to store the (only) instance of RMGDatabase in use.
database = None
################################################################################
class RMGDatabase:
"""
The primary class for working with the RMG database.
"""
def __init__(self):
self.thermo = None
self.transport = None
self.forbiddenStructures = None
self.kinetics = None
self.statmech = None
self.solvation = None
# Store the newly created database in the module.
global database
# assert database is None, "Should only make one instance of RMGDatabase because it's stored as a module-level variable."
if database is None:
database = self
else:
logging.warning("Should only make one instance of RMGDatabase because it's stored as a module-level variable!")
logging.warning("Unexpected behaviour may result!")
def load(self,
path,
thermoLibraries=None,
transportLibraries=None,
reactionLibraries=None,
seedMechanisms=None,
kineticsFamilies=None,
kineticsDepositories=None,
statmechLibraries=None,
depository=True,
solvation=True,
):
"""
Load the RMG database from the given `path` on disk, where `path`
points to the top-level folder of the RMG database. If none of the
optional arguments are provided, then the entire database will be
loaded. You can use the optional arguments to specify that only certain
components of the database be loaded.
"""
self.loadThermo(os.path.join(path, 'thermo'), thermoLibraries, depository)
self.loadTransport(os.path.join(path, 'transport'), transportLibraries)
self.loadForbiddenStructures(os.path.join(path, 'forbiddenStructures.py'))
self.loadKinetics(os.path.join(path, 'kinetics'),
reactionLibraries,
seedMechanisms,
kineticsFamilies,
kineticsDepositories
)
self.loadStatmech(os.path.join(path, 'statmech'), statmechLibraries, depository)
if solvation:
self.loadSolvation(os.path.join(path, 'solvation'))
def loadThermo(self, path, thermoLibraries=None, depository=True):
"""
Load the RMG thermo database from the given `path` on disk, where
`path` points to the top-level folder of the RMG thermo database.
"""
self.thermo = ThermoDatabase()
self.thermo.load(path, thermoLibraries, depository)
broadcast(self.thermo, 'thermo')
def loadTransport(self, path, transportLibraries=None):
"""
Load the RMG transport database from the given 'path' on disk, where
'path' points to the top-level folder of the RMG transport database.
"""
self.transport = TransportDatabase()
self.transport.load(path, transportLibraries)
broadcast(self.transport, 'transport')
def loadForbiddenStructures(self, path):
"""
Load the RMG forbidden structures from the given `path` on disk, where
`path` points to the forbidden structures file.
"""
self.forbiddenStructures = ForbiddenStructures()
self.forbiddenStructures.load(path)
broadcast(self.forbiddenStructures, 'forbidden')
def loadKinetics(self,
path,
reactionLibraries=None,
seedMechanisms=None,
kineticsFamilies=None,
kineticsDepositories=None
):
"""
Load the RMG kinetics database from the given `path` on disk, where
`path` points to the top-level folder of the RMG kinetics database.
"""
kineticsLibraries = []
libraryOrder = []
if seedMechanisms is None and reactionLibraries is None:
kineticsLibraries = None
if seedMechanisms is not None:
for library in seedMechanisms:
kineticsLibraries.append(library)
libraryOrder.append((library,'Seed'))
if reactionLibraries is not None:
for library in reactionLibraries:
kineticsLibraries.append(library)
libraryOrder.append((library,'Reaction Library'))
self.kinetics = KineticsDatabase()
self.kinetics.libraryOrder = libraryOrder
self.kinetics.load(path,
families=kineticsFamilies,
libraries=kineticsLibraries,
depositories=kineticsDepositories
)
broadcast(self.kinetics, 'kinetics')
def loadSolvation(self, path):
"""
Load the RMG solvation database from the given `path` on disk, where
`path` points to the top-level folder of the RMG solvation database.
"""
self.solvation = SolvationDatabase()
self.solvation.load(path)
broadcast(self.solvation, 'solvation')
def loadStatmech(self, path, statmechLibraries=None, depository=True):
"""
Load the RMG statmech database from the given `path` on disk, where
`path` points to the top-level folder of the RMG statmech database.
"""
self.statmech = StatmechDatabase()
self.statmech.load(path, statmechLibraries, depository)
broadcast(self.statmech, 'statmech')
def loadOld(self, path):
"""
Load the old RMG database from the given `path` on disk, where `path`
points to the top-level folder of the old RMG database.
"""
self.thermo = ThermoDatabase()
self.thermo.loadOld(path)
self.transport = TransportDatabase()
#self.transport.loadOld(path) # Currently no loadOld import function available for transport groups
self.forbiddenStructures = ForbiddenStructures()
self.forbiddenStructures.loadOld(os.path.join(path, 'ForbiddenStructures.txt'))
self.kinetics = KineticsDatabase()
self.kinetics.loadOld(path)
self.statmech = StatmechDatabase()
self.statmech.loadOld(path)
self.solvation = SolvationDatabase()
# Not completely implemented
# self.solvation.loadOld(path)
def save(self, path):
"""
Save the RMG database to the given `path` on disk.
"""
if not os.path.exists(path): os.makedirs(path)
self.forbiddenStructures.save(os.path.join(path, 'forbiddenStructures.py'))
self.thermo.save(os.path.join(path, 'thermo'))
# self.transport.save(os.path.join(path, 'transport')) #Currently no function for saving transport groups
self.kinetics.save(os.path.join(path, 'kinetics'))
self.statmech.save(os.path.join(path, 'statmech'))
self.solvation.save(os.path.join(path, 'solvation'))
self.transport.save(os.path.join(path, 'transport'))
def saveOld(self, path):
"""
Save the old RMG database to the given `path` on disk.
"""
if not os.path.exists(path): os.makedirs(path)
self.thermo.saveOld(path)
self.transport.saveOld(path)
self.forbiddenStructures.saveOld(os.path.join(path, 'ForbiddenStructures.txt'))
self.kinetics.saveOld(path)
self.statmech.saveOld(path)
def getDB(name):
"""
Returns the RMG database object that corresponds
to the parameter name.
First, the module level is queried. If this variable
is empty, the broadcasted variables are queried.
"""
global database
if database:
if name == 'kinetics':
return database.kinetics
elif name == 'thermo':
return database.thermo
elif name == 'transport':
return database.transport
elif name == 'solvation':
return database.solvation
elif name == 'statmech':
return database.statmech
elif name == 'forbidden':
return database.forbiddenStructures
else:
raise Exception('Unrecognized database keyword: {}'.format(name))
else:
try:
db = get(name)
if db:
return db
else:
raise Exception
except Exception, e:
logging.error("Did not find a way to obtain the broadcasted database for {}.".format(name))
raise e
raise Exception('Could not get database with name: {}'.format(name))
|
|
from sfepy.base.base import *
from sfepy.postprocess.utils import mlab
from sfepy.fem import Mesh
from sfepy.fem.meshio import MeshIO, vtk_cell_types, supported_formats
from sfepy.solvers.ts import TimeStepper
from dataset_manager import DatasetManager
from enthought.tvtk.api import tvtk
from enthought.mayavi.sources.vtk_data_source import VTKDataSource
from enthought.pyface.timer.api import Timer
def create_file_source(filename, watch=False, offscreen=True):
"""Factory function to create a file source corresponding to the
given file format."""
kwargs = {'watch' : watch, 'offscreen' : offscreen}
if isinstance(filename, str):
fmt = os.path.splitext(filename)[1]
is_sequence = False
else: # A sequence.
fmt = os.path.splitext(filename[0])[1]
is_sequence = True
fmt = fmt.lower()
if fmt == '.vtk':
# VTK is supported directly by Mayavi, no need to use MeshIO.
if is_sequence:
return VTKSequenceFileSource(filename, **kwargs)
else:
return VTKFileSource(filename, **kwargs)
elif fmt in supported_formats.keys():
if is_sequence:
if fmt == '.h5':
raise ValueError('format .h5 does not support file sequences!')
else:
return GenericSequenceFileSource(filename, **kwargs)
else:
return GenericFileSource(filename, **kwargs)
else:
raise ValueError('unknown file format! (%s)' % fmt)
class FileSource(Struct):
"""General file source."""
def __init__(self, filename, watch=False, offscreen=True):
"""Create a file source using the given file name."""
mlab.options.offscreen = offscreen
self.watch = watch
self.filename = filename
self.reset()
def __call__(self, step=0):
"""Get the file source."""
if self.source is None:
self.source = self.create_source()
if self.watch:
self.timer = Timer(1000, self.poll_file)
return self.source
def reset(self):
"""Reset."""
self.source = None
self.step_range = None
self.notify_obj = None
if self.watch:
self.last_stat = os.stat(self.filename)
self.set_step()
def set_step(self, step=0):
"""Set step of a data sequence."""
self.step = step
def get_step_range(self):
return self.step_range
def file_changed(self):
pass
def setup_notification(self, obj, attr):
"""The attribute 'attr' of the object 'obj' will be set to True
when the source file is watched and changes."""
self.notify_obj = obj
self.notify_attr = attr
def poll_file(self):
"""Check the source file's time stamp and notify the
self.notify_obj in case it changed. Subclasses should implement
the file_changed() method."""
if not self.notify_obj:
return
s = os.stat(self.filename)
if s[-2] == self.last_stat[-2]:
setattr(self.notify_obj, self.notify_attr, False)
else:
self.file_changed()
setattr(self.notify_obj, self.notify_attr, True)
self.last_stat = s
class VTKFileSource(FileSource):
"""A thin wrapper around mlab.pipeline.open()."""
def create_source(self):
"""Create a VTK file source """
return mlab.pipeline.open(self.filename)
def get_bounding_box(self):
bbox = nm.array(self.source.reader.unstructured_grid_output.bounds)
return bbox.reshape((3,2)).T
def set_filename(self, filename, vis_source):
self.filename = filename
vis_source.base_file_name = filename
def get_step_range(self):
return (0, 0)
class VTKSequenceFileSource(VTKFileSource):
"""A thin wrapper around mlab.pipeline.open() for VTK file sequences."""
def create_source(self):
"""Create a VTK file source """
return mlab.pipeline.open(self.filename[0])
def set_filename(self, filename, vis_source):
self.filename = filename
vis_source.base_file_name = filename[self.step]
def get_step_range(self):
return (0, len(self.filename) - 1)
class GenericFileSource(FileSource):
"""File source usable with any format supported by MeshIO classes."""
def __init__(self, *args, **kwargs):
FileSource.__init__(self, *args, **kwargs)
self.io = None
def read_common(self, filename):
self.io = MeshIO.any_from_filename(filename)
self.step_range = (0, self.io.read_last_step())
self.mesh = mesh = Mesh.from_file(filename)
self.n_nod, self.dim = self.mesh.coors.shape
def create_source(self):
"""Create a VTK source from data in a SfePy-supported file."""
if self.io is None:
self.read_common(self.filename)
dataset = self.create_dataset()
try:
out = self.io.read_data(self.step)
except ValueError:
out = None
if out is not None:
self.add_data_to_dataset(dataset, out)
src = VTKDataSource(data=dataset)
# src.print_traits()
# debug()
return src
def get_bounding_box(self):
bbox = self.mesh.get_bounding_box()
if self.dim == 2:
bbox = nm.c_[bbox, [0.0, 0.0]]
return bbox
def set_filename(self, filename, vis_source):
self.filename = filename
self.source = self.create_source()
vis_source.data = self.source.data
def get_step_range(self):
if self.step_range is None:
io = MeshIO.any_from_filename(self.filename)
self.step_range = (0, io.read_last_step())
return self.step_range
def file_changed(self):
self.step_range = (0, self.io.read_last_step())
def create_dataset(self):
"""Create a tvtk.UnstructuredGrid dataset from the Mesh instance of the
file source."""
mesh = self.mesh
n_nod, dim = self.n_nod, self.dim
n_el, n_els, n_e_ps = mesh.n_el, mesh.n_els, mesh.n_e_ps
if dim == 2:
nod_zz = nm.zeros((n_nod, 1), dtype=mesh.coors.dtype)
points = nm.c_[mesh.coors, nod_zz]
else:
points = mesh.coors
dataset = tvtk.UnstructuredGrid(points=points)
cell_types = []
cells = []
offset = [0]
for ig, conn in enumerate(mesh.conns):
cell_types += [vtk_cell_types[mesh.descs[ig]]] * n_els[ig]
nn = nm.array([conn.shape[1]] * n_els[ig])
aux = nm.c_[nn[:,None], conn]
cells.extend(aux.ravel())
offset.extend([aux.shape[1]] * n_els[ig])
cells = nm.array(cells)
cell_types = nm.array(cell_types)
offset = nm.cumsum(offset)[:-1]
cell_array = tvtk.CellArray()
cell_array.set_cells(n_el, cells)
dataset.set_cells(cell_types, offset, cell_array)
return dataset
def add_data_to_dataset(self, dataset, data):
"""Add point and cell data to the dataset."""
dim = self.dim
sym = (dim + 1) * dim / 2
dm = DatasetManager(dataset=dataset)
for key, val in data.iteritems():
vd = val.data
## print vd.shape
if val.mode == 'vertex':
if vd.shape[1] == 1:
aux = vd.reshape((vd.shape[0],))
elif vd.shape[1] == 2:
zz = nm.zeros((vd.shape[0], 1), dtype=vd.dtype)
aux = nm.c_[vd, zz]
elif vd.shape[1] == 3:
aux = vd
else:
raise ValueError('unknown vertex data format! (%s)'\
% vd.shape)
dm.add_array(aux, key, 'point')
elif val.mode == 'cell':
ne, aux, nr, nc = vd.shape
if (nr == 1) and (nc == 1):
aux = vd.reshape((ne,))
elif (nr == dim) and (nc == 1):
if dim == 3:
aux = vd.reshape((ne, dim))
else:
zz = nm.zeros((vd.shape[0], 1), dtype=vd.dtype);
aux = nm.c_[vd.squeeze(), zz]
elif (((nr == sym) or (nr == (dim * dim))) and (nc == 1)) \
or ((nr == dim) and (nc == dim)):
vd = vd.squeeze()
if dim == 3:
if nr == sym:
aux = vd[:,[0,3,4,3,1,5,4,5,2]]
elif nr == (dim * dim):
aux = vd[:,[0,3,4,6,1,5,7,8,2]]
else:
aux = vd.reshape((vd.shape[0], dim*dim))
else:
zz = nm.zeros((vd.shape[0], 1), dtype=vd.dtype);
if nr == sym:
aux = nm.c_[vd[:,[0,2]], zz, vd[:,[2,1]],
zz, zz, zz, zz]
elif nr == (dim * dim):
aux = nm.c_[vd[:,[0,2]], zz, vd[:,[3,1]],
zz, zz, zz, zz]
else:
aux = nm.c_[vd[:,0,[0,1]], zz, vd[:,1,[0,1]],
zz, zz, zz, zz]
dm.add_array(aux, key, 'cell')
class GenericSequenceFileSource(GenericFileSource):
"""File source usable with any format supported by MeshIO classes, with
exception of HDF5 (.h5), for file sequences."""
def create_source(self):
"""Create a VTK source from data in a SfePy-supported file."""
if self.io is None:
self.read_common(self.filename[self.step])
dataset = self.create_dataset()
src = VTKDataSource(data=dataset)
return src
def set_filename(self, filename, vis_source):
self.filename = filename
self.io = None
self.source = self.create_source()
vis_source.data = self.source.data
def get_step_range(self):
return (0, len(self.filename) - 1)
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._vim_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file is autogenerated by scripts/get_vimkw.py
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Split up in multiple functions so it's importable by jython, which has a
# per-method size limit.
def _getauto():
var = (
('BufAdd','BufAdd'),
('BufCreate','BufCreate'),
('BufDelete','BufDelete'),
('BufEnter','BufEnter'),
('BufFilePost','BufFilePost'),
('BufFilePre','BufFilePre'),
('BufHidden','BufHidden'),
('BufLeave','BufLeave'),
('BufNew','BufNew'),
('BufNewFile','BufNewFile'),
('BufRead','BufRead'),
('BufReadCmd','BufReadCmd'),
('BufReadPost','BufReadPost'),
('BufReadPre','BufReadPre'),
('BufUnload','BufUnload'),
('BufWinEnter','BufWinEnter'),
('BufWinLeave','BufWinLeave'),
('BufWipeout','BufWipeout'),
('BufWrite','BufWrite'),
('BufWriteCmd','BufWriteCmd'),
('BufWritePost','BufWritePost'),
('BufWritePre','BufWritePre'),
('Cmd','Cmd'),
('CmdwinEnter','CmdwinEnter'),
('CmdwinLeave','CmdwinLeave'),
('ColorScheme','ColorScheme'),
('CompleteDone','CompleteDone'),
('CursorHold','CursorHold'),
('CursorHoldI','CursorHoldI'),
('CursorMoved','CursorMoved'),
('CursorMovedI','CursorMovedI'),
('EncodingChanged','EncodingChanged'),
('FileAppendCmd','FileAppendCmd'),
('FileAppendPost','FileAppendPost'),
('FileAppendPre','FileAppendPre'),
('FileChangedRO','FileChangedRO'),
('FileChangedShell','FileChangedShell'),
('FileChangedShellPost','FileChangedShellPost'),
('FileEncoding','FileEncoding'),
('FileReadCmd','FileReadCmd'),
('FileReadPost','FileReadPost'),
('FileReadPre','FileReadPre'),
('FileType','FileType'),
('FileWriteCmd','FileWriteCmd'),
('FileWritePost','FileWritePost'),
('FileWritePre','FileWritePre'),
('FilterReadPost','FilterReadPost'),
('FilterReadPre','FilterReadPre'),
('FilterWritePost','FilterWritePost'),
('FilterWritePre','FilterWritePre'),
('FocusGained','FocusGained'),
('FocusLost','FocusLost'),
('FuncUndefined','FuncUndefined'),
('GUIEnter','GUIEnter'),
('GUIFailed','GUIFailed'),
('InsertChange','InsertChange'),
('InsertCharPre','InsertCharPre'),
('InsertEnter','InsertEnter'),
('InsertLeave','InsertLeave'),
('MenuPopup','MenuPopup'),
('QuickFixCmdPost','QuickFixCmdPost'),
('QuickFixCmdPre','QuickFixCmdPre'),
('QuitPre','QuitPre'),
('RemoteReply','RemoteReply'),
('SessionLoadPost','SessionLoadPost'),
('ShellCmdPost','ShellCmdPost'),
('ShellFilterPost','ShellFilterPost'),
('SourceCmd','SourceCmd'),
('SourcePre','SourcePre'),
('SpellFileMissing','SpellFileMissing'),
('StdinReadPost','StdinReadPost'),
('StdinReadPre','StdinReadPre'),
('SwapExists','SwapExists'),
('Syntax','Syntax'),
('TabEnter','TabEnter'),
('TabLeave','TabLeave'),
('TermChanged','TermChanged'),
('TermResponse','TermResponse'),
('TextChanged','TextChanged'),
('TextChangedI','TextChangedI'),
('User','User'),
('UserGettingBored','UserGettingBored'),
('VimEnter','VimEnter'),
('VimLeave','VimLeave'),
('VimLeavePre','VimLeavePre'),
('VimResized','VimResized'),
('WinEnter','WinEnter'),
('WinLeave','WinLeave'),
('event','event'),
)
return var
auto = _getauto()
def _getcommand():
var = (
('a','a'),
('ab','ab'),
('abc','abclear'),
('abo','aboveleft'),
('al','all'),
('ar','ar'),
('ar','args'),
('arga','argadd'),
('argd','argdelete'),
('argdo','argdo'),
('arge','argedit'),
('argg','argglobal'),
('argl','arglocal'),
('argu','argument'),
('as','ascii'),
('au','au'),
('b','buffer'),
('bN','bNext'),
('ba','ball'),
('bad','badd'),
('bd','bdelete'),
('bel','belowright'),
('bf','bfirst'),
('bl','blast'),
('bm','bmodified'),
('bn','bnext'),
('bo','botright'),
('bp','bprevious'),
('br','br'),
('br','brewind'),
('brea','break'),
('breaka','breakadd'),
('breakd','breakdel'),
('breakl','breaklist'),
('bro','browse'),
('bu','bu'),
('buf','buf'),
('bufdo','bufdo'),
('buffers','buffers'),
('bun','bunload'),
('bw','bwipeout'),
('c','c'),
('c','change'),
('cN','cN'),
('cN','cNext'),
('cNf','cNf'),
('cNf','cNfile'),
('cabc','cabclear'),
('cad','cad'),
('cad','caddexpr'),
('caddb','caddbuffer'),
('caddf','caddfile'),
('cal','call'),
('cat','catch'),
('cb','cbuffer'),
('cc','cc'),
('ccl','cclose'),
('cd','cd'),
('ce','center'),
('cex','cexpr'),
('cf','cfile'),
('cfir','cfirst'),
('cg','cgetfile'),
('cgetb','cgetbuffer'),
('cgete','cgetexpr'),
('changes','changes'),
('chd','chdir'),
('che','checkpath'),
('checkt','checktime'),
('cl','cl'),
('cl','clist'),
('cla','clast'),
('clo','close'),
('cmapc','cmapclear'),
('cn','cn'),
('cn','cnext'),
('cnew','cnewer'),
('cnf','cnf'),
('cnf','cnfile'),
('co','copy'),
('col','colder'),
('colo','colorscheme'),
('com','com'),
('comc','comclear'),
('comp','compiler'),
('con','con'),
('con','continue'),
('conf','confirm'),
('cope','copen'),
('cp','cprevious'),
('cpf','cpfile'),
('cq','cquit'),
('cr','crewind'),
('cs','cs'),
('cscope','cscope'),
('cstag','cstag'),
('cuna','cunabbrev'),
('cw','cwindow'),
('d','d'),
('d','delete'),
('de','de'),
('debug','debug'),
('debugg','debuggreedy'),
('del','del'),
('delc','delcommand'),
('delel','delel'),
('delep','delep'),
('deletel','deletel'),
('deletep','deletep'),
('deletl','deletl'),
('deletp','deletp'),
('delf','delf'),
('delf','delfunction'),
('dell','dell'),
('delm','delmarks'),
('delp','delp'),
('dep','dep'),
('di','di'),
('di','display'),
('diffg','diffget'),
('diffo','diffoff'),
('diffp','diffpatch'),
('diffpu','diffput'),
('diffs','diffsplit'),
('difft','diffthis'),
('diffu','diffupdate'),
('dig','dig'),
('dig','digraphs'),
('dir','dir'),
('dj','djump'),
('dl','dl'),
('dli','dlist'),
('do','do'),
('doau','doau'),
('dp','dp'),
('dr','drop'),
('ds','dsearch'),
('dsp','dsplit'),
('e','e'),
('e','edit'),
('ea','ea'),
('earlier','earlier'),
('ec','ec'),
('echoe','echoerr'),
('echom','echomsg'),
('echon','echon'),
('el','else'),
('elsei','elseif'),
('em','emenu'),
('en','en'),
('en','endif'),
('endf','endf'),
('endf','endfunction'),
('endfo','endfor'),
('endfun','endfun'),
('endt','endtry'),
('endw','endwhile'),
('ene','enew'),
('ex','ex'),
('exi','exit'),
('exu','exusage'),
('f','f'),
('f','file'),
('files','files'),
('filet','filet'),
('filetype','filetype'),
('fin','fin'),
('fin','find'),
('fina','finally'),
('fini','finish'),
('fir','first'),
('fix','fixdel'),
('fo','fold'),
('foldc','foldclose'),
('foldd','folddoopen'),
('folddoc','folddoclosed'),
('foldo','foldopen'),
('for','for'),
('fu','fu'),
('fu','function'),
('fun','fun'),
('g','g'),
('go','goto'),
('gr','grep'),
('grepa','grepadd'),
('gui','gui'),
('gvim','gvim'),
('h','h'),
('h','help'),
('ha','hardcopy'),
('helpf','helpfind'),
('helpg','helpgrep'),
('helpt','helptags'),
('hi','hi'),
('hid','hide'),
('his','history'),
('i','i'),
('ia','ia'),
('iabc','iabclear'),
('if','if'),
('ij','ijump'),
('il','ilist'),
('imapc','imapclear'),
('in','in'),
('intro','intro'),
('is','isearch'),
('isp','isplit'),
('iuna','iunabbrev'),
('j','join'),
('ju','jumps'),
('k','k'),
('kee','keepmarks'),
('keepa','keepa'),
('keepalt','keepalt'),
('keepj','keepjumps'),
('keepp','keeppatterns'),
('l','l'),
('l','list'),
('lN','lN'),
('lN','lNext'),
('lNf','lNf'),
('lNf','lNfile'),
('la','la'),
('la','last'),
('lad','lad'),
('lad','laddexpr'),
('laddb','laddbuffer'),
('laddf','laddfile'),
('lan','lan'),
('lan','language'),
('lat','lat'),
('later','later'),
('lb','lbuffer'),
('lc','lcd'),
('lch','lchdir'),
('lcl','lclose'),
('lcs','lcs'),
('lcscope','lcscope'),
('le','left'),
('lefta','leftabove'),
('lex','lexpr'),
('lf','lfile'),
('lfir','lfirst'),
('lg','lgetfile'),
('lgetb','lgetbuffer'),
('lgete','lgetexpr'),
('lgr','lgrep'),
('lgrepa','lgrepadd'),
('lh','lhelpgrep'),
('ll','ll'),
('lla','llast'),
('lli','llist'),
('lmak','lmake'),
('lmapc','lmapclear'),
('lne','lne'),
('lne','lnext'),
('lnew','lnewer'),
('lnf','lnf'),
('lnf','lnfile'),
('lo','lo'),
('lo','loadview'),
('loadk','loadk'),
('loadkeymap','loadkeymap'),
('loc','lockmarks'),
('lockv','lockvar'),
('lol','lolder'),
('lop','lopen'),
('lp','lprevious'),
('lpf','lpfile'),
('lr','lrewind'),
('ls','ls'),
('lt','ltag'),
('lua','lua'),
('luado','luado'),
('luafile','luafile'),
('lv','lvimgrep'),
('lvimgrepa','lvimgrepadd'),
('lw','lwindow'),
('m','move'),
('ma','ma'),
('ma','mark'),
('mak','make'),
('marks','marks'),
('mat','match'),
('menut','menut'),
('menut','menutranslate'),
('mes','mes'),
('messages','messages'),
('mk','mk'),
('mk','mkexrc'),
('mks','mksession'),
('mksp','mkspell'),
('mkv','mkv'),
('mkv','mkvimrc'),
('mkvie','mkview'),
('mo','mo'),
('mod','mode'),
('mz','mz'),
('mz','mzscheme'),
('mzf','mzfile'),
('n','n'),
('n','next'),
('nb','nbkey'),
('nbc','nbclose'),
('nbs','nbstart'),
('ne','ne'),
('new','new'),
('nmapc','nmapclear'),
('noa','noa'),
('noautocmd','noautocmd'),
('noh','nohlsearch'),
('nu','number'),
('o','o'),
('o','open'),
('ol','oldfiles'),
('omapc','omapclear'),
('on','only'),
('opt','options'),
('ownsyntax','ownsyntax'),
('p','p'),
('p','print'),
('pc','pclose'),
('pe','pe'),
('pe','perl'),
('ped','pedit'),
('perld','perldo'),
('po','pop'),
('popu','popu'),
('popu','popup'),
('pp','ppop'),
('pr','pr'),
('pre','preserve'),
('prev','previous'),
('pro','pro'),
('prof','profile'),
('profd','profdel'),
('promptf','promptfind'),
('promptr','promptrepl'),
('ps','psearch'),
('ptN','ptN'),
('ptN','ptNext'),
('pta','ptag'),
('ptf','ptfirst'),
('ptj','ptjump'),
('ptl','ptlast'),
('ptn','ptn'),
('ptn','ptnext'),
('ptp','ptprevious'),
('ptr','ptrewind'),
('pts','ptselect'),
('pu','put'),
('pw','pwd'),
('py','py'),
('py','python'),
('py3','py3'),
('py3','py3'),
('py3do','py3do'),
('pydo','pydo'),
('pyf','pyfile'),
('python3','python3'),
('q','q'),
('q','quit'),
('qa','qall'),
('quita','quitall'),
('r','r'),
('r','read'),
('re','re'),
('rec','recover'),
('red','red'),
('red','redo'),
('redi','redir'),
('redr','redraw'),
('redraws','redrawstatus'),
('reg','registers'),
('res','resize'),
('ret','retab'),
('retu','return'),
('rew','rewind'),
('ri','right'),
('rightb','rightbelow'),
('ru','ru'),
('ru','runtime'),
('rub','ruby'),
('rubyd','rubydo'),
('rubyf','rubyfile'),
('rundo','rundo'),
('rv','rviminfo'),
('sN','sNext'),
('sa','sargument'),
('sal','sall'),
('san','sandbox'),
('sav','saveas'),
('sb','sbuffer'),
('sbN','sbNext'),
('sba','sball'),
('sbf','sbfirst'),
('sbl','sblast'),
('sbm','sbmodified'),
('sbn','sbnext'),
('sbp','sbprevious'),
('sbr','sbrewind'),
('scrip','scrip'),
('scrip','scriptnames'),
('scripte','scriptencoding'),
('scs','scs'),
('scscope','scscope'),
('se','set'),
('setf','setfiletype'),
('setg','setglobal'),
('setl','setlocal'),
('sf','sfind'),
('sfir','sfirst'),
('sh','shell'),
('si','si'),
('sig','sig'),
('sign','sign'),
('sil','silent'),
('sim','simalt'),
('sl','sl'),
('sl','sleep'),
('sla','slast'),
('sm','smagic'),
('sm','smap'),
('sme','sme'),
('smenu','smenu'),
('sn','snext'),
('sni','sniff'),
('sno','snomagic'),
('snoreme','snoreme'),
('snoremenu','snoremenu'),
('so','so'),
('so','source'),
('sor','sort'),
('sp','split'),
('spe','spe'),
('spe','spellgood'),
('spelld','spelldump'),
('spelli','spellinfo'),
('spellr','spellrepall'),
('spellu','spellundo'),
('spellw','spellwrong'),
('spr','sprevious'),
('sre','srewind'),
('st','st'),
('st','stop'),
('sta','stag'),
('star','star'),
('star','startinsert'),
('start','start'),
('startg','startgreplace'),
('startr','startreplace'),
('stj','stjump'),
('stopi','stopinsert'),
('sts','stselect'),
('sun','sunhide'),
('sunme','sunme'),
('sunmenu','sunmenu'),
('sus','suspend'),
('sv','sview'),
('sw','swapname'),
('sy','sy'),
('syn','syn'),
('sync','sync'),
('syncbind','syncbind'),
('syntime','syntime'),
('t','t'),
('tN','tN'),
('tN','tNext'),
('ta','ta'),
('ta','tag'),
('tab','tab'),
('tabN','tabN'),
('tabN','tabNext'),
('tabc','tabclose'),
('tabd','tabdo'),
('tabe','tabedit'),
('tabf','tabfind'),
('tabfir','tabfirst'),
('tabl','tablast'),
('tabm','tabmove'),
('tabn','tabnext'),
('tabnew','tabnew'),
('tabo','tabonly'),
('tabp','tabprevious'),
('tabr','tabrewind'),
('tabs','tabs'),
('tags','tags'),
('tc','tcl'),
('tcld','tcldo'),
('tclf','tclfile'),
('te','tearoff'),
('tf','tfirst'),
('th','throw'),
('tj','tjump'),
('tl','tlast'),
('tm','tm'),
('tm','tmenu'),
('tn','tn'),
('tn','tnext'),
('to','topleft'),
('tp','tprevious'),
('tr','tr'),
('tr','trewind'),
('try','try'),
('ts','tselect'),
('tu','tu'),
('tu','tunmenu'),
('u','u'),
('u','undo'),
('un','un'),
('una','unabbreviate'),
('undoj','undojoin'),
('undol','undolist'),
('unh','unhide'),
('unl','unl'),
('unlo','unlockvar'),
('uns','unsilent'),
('up','update'),
('v','v'),
('ve','ve'),
('ve','version'),
('verb','verbose'),
('vert','vertical'),
('vi','vi'),
('vi','visual'),
('vie','view'),
('vim','vimgrep'),
('vimgrepa','vimgrepadd'),
('viu','viusage'),
('vmapc','vmapclear'),
('vne','vnew'),
('vs','vsplit'),
('w','w'),
('w','write'),
('wN','wNext'),
('wa','wall'),
('wh','while'),
('win','win'),
('win','winsize'),
('winc','wincmd'),
('windo','windo'),
('winp','winpos'),
('wn','wnext'),
('wp','wprevious'),
('wq','wq'),
('wqa','wqall'),
('ws','wsverb'),
('wundo','wundo'),
('wv','wviminfo'),
('x','x'),
('x','xit'),
('xa','xall'),
('xmapc','xmapclear'),
('xme','xme'),
('xmenu','xmenu'),
('xnoreme','xnoreme'),
('xnoremenu','xnoremenu'),
('xunme','xunme'),
('xunmenu','xunmenu'),
('xwininfo','xwininfo'),
('y','yank'),
)
return var
command = _getcommand()
def _getoption():
var = (
('acd','acd'),
('ai','ai'),
('akm','akm'),
('al','al'),
('aleph','aleph'),
('allowrevins','allowrevins'),
('altkeymap','altkeymap'),
('ambiwidth','ambiwidth'),
('ambw','ambw'),
('anti','anti'),
('antialias','antialias'),
('ar','ar'),
('arab','arab'),
('arabic','arabic'),
('arabicshape','arabicshape'),
('ari','ari'),
('arshape','arshape'),
('autochdir','autochdir'),
('autoindent','autoindent'),
('autoread','autoread'),
('autowrite','autowrite'),
('autowriteall','autowriteall'),
('aw','aw'),
('awa','awa'),
('background','background'),
('backspace','backspace'),
('backup','backup'),
('backupcopy','backupcopy'),
('backupdir','backupdir'),
('backupext','backupext'),
('backupskip','backupskip'),
('balloondelay','balloondelay'),
('ballooneval','ballooneval'),
('balloonexpr','balloonexpr'),
('bdir','bdir'),
('bdlay','bdlay'),
('beval','beval'),
('bex','bex'),
('bexpr','bexpr'),
('bg','bg'),
('bh','bh'),
('bin','bin'),
('binary','binary'),
('biosk','biosk'),
('bioskey','bioskey'),
('bk','bk'),
('bkc','bkc'),
('bl','bl'),
('bomb','bomb'),
('breakat','breakat'),
('brk','brk'),
('browsedir','browsedir'),
('bs','bs'),
('bsdir','bsdir'),
('bsk','bsk'),
('bt','bt'),
('bufhidden','bufhidden'),
('buflisted','buflisted'),
('buftype','buftype'),
('casemap','casemap'),
('cb','cb'),
('cc','cc'),
('ccv','ccv'),
('cd','cd'),
('cdpath','cdpath'),
('cedit','cedit'),
('cf','cf'),
('cfu','cfu'),
('ch','ch'),
('charconvert','charconvert'),
('ci','ci'),
('cin','cin'),
('cindent','cindent'),
('cink','cink'),
('cinkeys','cinkeys'),
('cino','cino'),
('cinoptions','cinoptions'),
('cinw','cinw'),
('cinwords','cinwords'),
('clipboard','clipboard'),
('cmdheight','cmdheight'),
('cmdwinheight','cmdwinheight'),
('cmp','cmp'),
('cms','cms'),
('co','co'),
('cocu','cocu'),
('cole','cole'),
('colorcolumn','colorcolumn'),
('columns','columns'),
('com','com'),
('comments','comments'),
('commentstring','commentstring'),
('compatible','compatible'),
('complete','complete'),
('completefunc','completefunc'),
('completeopt','completeopt'),
('concealcursor','concealcursor'),
('conceallevel','conceallevel'),
('confirm','confirm'),
('consk','consk'),
('conskey','conskey'),
('copyindent','copyindent'),
('cot','cot'),
('cp','cp'),
('cpo','cpo'),
('cpoptions','cpoptions'),
('cpt','cpt'),
('crb','crb'),
('cryptmethod','cryptmethod'),
('cscopepathcomp','cscopepathcomp'),
('cscopeprg','cscopeprg'),
('cscopequickfix','cscopequickfix'),
('cscoperelative','cscoperelative'),
('cscopetag','cscopetag'),
('cscopetagorder','cscopetagorder'),
('cscopeverbose','cscopeverbose'),
('cspc','cspc'),
('csprg','csprg'),
('csqf','csqf'),
('csre','csre'),
('cst','cst'),
('csto','csto'),
('csverb','csverb'),
('cuc','cuc'),
('cul','cul'),
('cursorbind','cursorbind'),
('cursorcolumn','cursorcolumn'),
('cursorline','cursorline'),
('cwh','cwh'),
('debug','debug'),
('deco','deco'),
('def','def'),
('define','define'),
('delcombine','delcombine'),
('dex','dex'),
('dg','dg'),
('dict','dict'),
('dictionary','dictionary'),
('diff','diff'),
('diffexpr','diffexpr'),
('diffopt','diffopt'),
('digraph','digraph'),
('dip','dip'),
('dir','dir'),
('directory','directory'),
('display','display'),
('dy','dy'),
('ea','ea'),
('ead','ead'),
('eadirection','eadirection'),
('eb','eb'),
('ed','ed'),
('edcompatible','edcompatible'),
('ef','ef'),
('efm','efm'),
('ei','ei'),
('ek','ek'),
('enc','enc'),
('encoding','encoding'),
('endofline','endofline'),
('eol','eol'),
('ep','ep'),
('equalalways','equalalways'),
('equalprg','equalprg'),
('errorbells','errorbells'),
('errorfile','errorfile'),
('errorformat','errorformat'),
('esckeys','esckeys'),
('et','et'),
('eventignore','eventignore'),
('ex','ex'),
('expandtab','expandtab'),
('exrc','exrc'),
('fcl','fcl'),
('fcs','fcs'),
('fdc','fdc'),
('fde','fde'),
('fdi','fdi'),
('fdl','fdl'),
('fdls','fdls'),
('fdm','fdm'),
('fdn','fdn'),
('fdo','fdo'),
('fdt','fdt'),
('fen','fen'),
('fenc','fenc'),
('fencs','fencs'),
('fex','fex'),
('ff','ff'),
('ffs','ffs'),
('fic','fic'),
('fileencoding','fileencoding'),
('fileencodings','fileencodings'),
('fileformat','fileformat'),
('fileformats','fileformats'),
('fileignorecase','fileignorecase'),
('filetype','filetype'),
('fillchars','fillchars'),
('fk','fk'),
('fkmap','fkmap'),
('flp','flp'),
('fml','fml'),
('fmr','fmr'),
('fo','fo'),
('foldclose','foldclose'),
('foldcolumn','foldcolumn'),
('foldenable','foldenable'),
('foldexpr','foldexpr'),
('foldignore','foldignore'),
('foldlevel','foldlevel'),
('foldlevelstart','foldlevelstart'),
('foldmarker','foldmarker'),
('foldmethod','foldmethod'),
('foldminlines','foldminlines'),
('foldnestmax','foldnestmax'),
('foldopen','foldopen'),
('foldtext','foldtext'),
('formatexpr','formatexpr'),
('formatlistpat','formatlistpat'),
('formatoptions','formatoptions'),
('formatprg','formatprg'),
('fp','fp'),
('fs','fs'),
('fsync','fsync'),
('ft','ft'),
('gcr','gcr'),
('gd','gd'),
('gdefault','gdefault'),
('gfm','gfm'),
('gfn','gfn'),
('gfs','gfs'),
('gfw','gfw'),
('ghr','ghr'),
('go','go'),
('gp','gp'),
('grepformat','grepformat'),
('grepprg','grepprg'),
('gtl','gtl'),
('gtt','gtt'),
('guicursor','guicursor'),
('guifont','guifont'),
('guifontset','guifontset'),
('guifontwide','guifontwide'),
('guiheadroom','guiheadroom'),
('guioptions','guioptions'),
('guipty','guipty'),
('guitablabel','guitablabel'),
('guitabtooltip','guitabtooltip'),
('helpfile','helpfile'),
('helpheight','helpheight'),
('helplang','helplang'),
('hf','hf'),
('hh','hh'),
('hi','hi'),
('hid','hid'),
('hidden','hidden'),
('highlight','highlight'),
('history','history'),
('hk','hk'),
('hkmap','hkmap'),
('hkmapp','hkmapp'),
('hkp','hkp'),
('hl','hl'),
('hlg','hlg'),
('hls','hls'),
('hlsearch','hlsearch'),
('ic','ic'),
('icon','icon'),
('iconstring','iconstring'),
('ignorecase','ignorecase'),
('im','im'),
('imactivatefunc','imactivatefunc'),
('imactivatekey','imactivatekey'),
('imaf','imaf'),
('imak','imak'),
('imc','imc'),
('imcmdline','imcmdline'),
('imd','imd'),
('imdisable','imdisable'),
('imi','imi'),
('iminsert','iminsert'),
('ims','ims'),
('imsearch','imsearch'),
('imsf','imsf'),
('imstatusfunc','imstatusfunc'),
('inc','inc'),
('include','include'),
('includeexpr','includeexpr'),
('incsearch','incsearch'),
('inde','inde'),
('indentexpr','indentexpr'),
('indentkeys','indentkeys'),
('indk','indk'),
('inex','inex'),
('inf','inf'),
('infercase','infercase'),
('inoremap','inoremap'),
('insertmode','insertmode'),
('invacd','invacd'),
('invai','invai'),
('invakm','invakm'),
('invallowrevins','invallowrevins'),
('invaltkeymap','invaltkeymap'),
('invanti','invanti'),
('invantialias','invantialias'),
('invar','invar'),
('invarab','invarab'),
('invarabic','invarabic'),
('invarabicshape','invarabicshape'),
('invari','invari'),
('invarshape','invarshape'),
('invautochdir','invautochdir'),
('invautoindent','invautoindent'),
('invautoread','invautoread'),
('invautowrite','invautowrite'),
('invautowriteall','invautowriteall'),
('invaw','invaw'),
('invawa','invawa'),
('invbackup','invbackup'),
('invballooneval','invballooneval'),
('invbeval','invbeval'),
('invbin','invbin'),
('invbinary','invbinary'),
('invbiosk','invbiosk'),
('invbioskey','invbioskey'),
('invbk','invbk'),
('invbl','invbl'),
('invbomb','invbomb'),
('invbuflisted','invbuflisted'),
('invcf','invcf'),
('invci','invci'),
('invcin','invcin'),
('invcindent','invcindent'),
('invcompatible','invcompatible'),
('invconfirm','invconfirm'),
('invconsk','invconsk'),
('invconskey','invconskey'),
('invcopyindent','invcopyindent'),
('invcp','invcp'),
('invcrb','invcrb'),
('invcscoperelative','invcscoperelative'),
('invcscopetag','invcscopetag'),
('invcscopeverbose','invcscopeverbose'),
('invcsre','invcsre'),
('invcst','invcst'),
('invcsverb','invcsverb'),
('invcuc','invcuc'),
('invcul','invcul'),
('invcursorbind','invcursorbind'),
('invcursorcolumn','invcursorcolumn'),
('invcursorline','invcursorline'),
('invdeco','invdeco'),
('invdelcombine','invdelcombine'),
('invdg','invdg'),
('invdiff','invdiff'),
('invdigraph','invdigraph'),
('invea','invea'),
('inveb','inveb'),
('inved','inved'),
('invedcompatible','invedcompatible'),
('invek','invek'),
('invendofline','invendofline'),
('inveol','inveol'),
('invequalalways','invequalalways'),
('inverrorbells','inverrorbells'),
('invesckeys','invesckeys'),
('invet','invet'),
('invex','invex'),
('invexpandtab','invexpandtab'),
('invexrc','invexrc'),
('invfen','invfen'),
('invfic','invfic'),
('invfileignorecase','invfileignorecase'),
('invfk','invfk'),
('invfkmap','invfkmap'),
('invfoldenable','invfoldenable'),
('invgd','invgd'),
('invgdefault','invgdefault'),
('invguipty','invguipty'),
('invhid','invhid'),
('invhidden','invhidden'),
('invhk','invhk'),
('invhkmap','invhkmap'),
('invhkmapp','invhkmapp'),
('invhkp','invhkp'),
('invhls','invhls'),
('invhlsearch','invhlsearch'),
('invic','invic'),
('invicon','invicon'),
('invignorecase','invignorecase'),
('invim','invim'),
('invimc','invimc'),
('invimcmdline','invimcmdline'),
('invimd','invimd'),
('invimdisable','invimdisable'),
('invincsearch','invincsearch'),
('invinf','invinf'),
('invinfercase','invinfercase'),
('invinsertmode','invinsertmode'),
('invis','invis'),
('invjoinspaces','invjoinspaces'),
('invjs','invjs'),
('invlazyredraw','invlazyredraw'),
('invlbr','invlbr'),
('invlinebreak','invlinebreak'),
('invlisp','invlisp'),
('invlist','invlist'),
('invloadplugins','invloadplugins'),
('invlpl','invlpl'),
('invlz','invlz'),
('invma','invma'),
('invmacatsui','invmacatsui'),
('invmagic','invmagic'),
('invmh','invmh'),
('invml','invml'),
('invmod','invmod'),
('invmodeline','invmodeline'),
('invmodifiable','invmodifiable'),
('invmodified','invmodified'),
('invmore','invmore'),
('invmousef','invmousef'),
('invmousefocus','invmousefocus'),
('invmousehide','invmousehide'),
('invnu','invnu'),
('invnumber','invnumber'),
('invodev','invodev'),
('invopendevice','invopendevice'),
('invpaste','invpaste'),
('invpi','invpi'),
('invpreserveindent','invpreserveindent'),
('invpreviewwindow','invpreviewwindow'),
('invprompt','invprompt'),
('invpvw','invpvw'),
('invreadonly','invreadonly'),
('invrelativenumber','invrelativenumber'),
('invremap','invremap'),
('invrestorescreen','invrestorescreen'),
('invrevins','invrevins'),
('invri','invri'),
('invrightleft','invrightleft'),
('invrl','invrl'),
('invrnu','invrnu'),
('invro','invro'),
('invrs','invrs'),
('invru','invru'),
('invruler','invruler'),
('invsb','invsb'),
('invsc','invsc'),
('invscb','invscb'),
('invscrollbind','invscrollbind'),
('invscs','invscs'),
('invsecure','invsecure'),
('invsft','invsft'),
('invshellslash','invshellslash'),
('invshelltemp','invshelltemp'),
('invshiftround','invshiftround'),
('invshortname','invshortname'),
('invshowcmd','invshowcmd'),
('invshowfulltag','invshowfulltag'),
('invshowmatch','invshowmatch'),
('invshowmode','invshowmode'),
('invsi','invsi'),
('invsm','invsm'),
('invsmartcase','invsmartcase'),
('invsmartindent','invsmartindent'),
('invsmarttab','invsmarttab'),
('invsmd','invsmd'),
('invsn','invsn'),
('invsol','invsol'),
('invspell','invspell'),
('invsplitbelow','invsplitbelow'),
('invsplitright','invsplitright'),
('invspr','invspr'),
('invsr','invsr'),
('invssl','invssl'),
('invsta','invsta'),
('invstartofline','invstartofline'),
('invstmp','invstmp'),
('invswapfile','invswapfile'),
('invswf','invswf'),
('invta','invta'),
('invtagbsearch','invtagbsearch'),
('invtagrelative','invtagrelative'),
('invtagstack','invtagstack'),
('invtbi','invtbi'),
('invtbidi','invtbidi'),
('invtbs','invtbs'),
('invtermbidi','invtermbidi'),
('invterse','invterse'),
('invtextauto','invtextauto'),
('invtextmode','invtextmode'),
('invtf','invtf'),
('invtgst','invtgst'),
('invtildeop','invtildeop'),
('invtimeout','invtimeout'),
('invtitle','invtitle'),
('invto','invto'),
('invtop','invtop'),
('invtr','invtr'),
('invttimeout','invttimeout'),
('invttybuiltin','invttybuiltin'),
('invttyfast','invttyfast'),
('invtx','invtx'),
('invudf','invudf'),
('invundofile','invundofile'),
('invvb','invvb'),
('invvisualbell','invvisualbell'),
('invwa','invwa'),
('invwarn','invwarn'),
('invwb','invwb'),
('invweirdinvert','invweirdinvert'),
('invwfh','invwfh'),
('invwfw','invwfw'),
('invwic','invwic'),
('invwildignorecase','invwildignorecase'),
('invwildmenu','invwildmenu'),
('invwinfixheight','invwinfixheight'),
('invwinfixwidth','invwinfixwidth'),
('invwiv','invwiv'),
('invwmnu','invwmnu'),
('invwrap','invwrap'),
('invwrapscan','invwrapscan'),
('invwrite','invwrite'),
('invwriteany','invwriteany'),
('invwritebackup','invwritebackup'),
('invws','invws'),
('is','is'),
('isf','isf'),
('isfname','isfname'),
('isi','isi'),
('isident','isident'),
('isk','isk'),
('iskeyword','iskeyword'),
('isp','isp'),
('isprint','isprint'),
('joinspaces','joinspaces'),
('js','js'),
('key','key'),
('keymap','keymap'),
('keymodel','keymodel'),
('keywordprg','keywordprg'),
('km','km'),
('kmp','kmp'),
('kp','kp'),
('langmap','langmap'),
('langmenu','langmenu'),
('laststatus','laststatus'),
('lazyredraw','lazyredraw'),
('lbr','lbr'),
('lcs','lcs'),
('linebreak','linebreak'),
('lines','lines'),
('linespace','linespace'),
('lisp','lisp'),
('lispwords','lispwords'),
('list','list'),
('listchars','listchars'),
('lm','lm'),
('lmap','lmap'),
('loadplugins','loadplugins'),
('lpl','lpl'),
('ls','ls'),
('lsp','lsp'),
('lw','lw'),
('lz','lz'),
('ma','ma'),
('macatsui','macatsui'),
('magic','magic'),
('makeef','makeef'),
('makeprg','makeprg'),
('mat','mat'),
('matchpairs','matchpairs'),
('matchtime','matchtime'),
('maxcombine','maxcombine'),
('maxfuncdepth','maxfuncdepth'),
('maxmapdepth','maxmapdepth'),
('maxmem','maxmem'),
('maxmempattern','maxmempattern'),
('maxmemtot','maxmemtot'),
('mco','mco'),
('mef','mef'),
('menuitems','menuitems'),
('mfd','mfd'),
('mh','mh'),
('mis','mis'),
('mkspellmem','mkspellmem'),
('ml','ml'),
('mls','mls'),
('mm','mm'),
('mmd','mmd'),
('mmp','mmp'),
('mmt','mmt'),
('mod','mod'),
('modeline','modeline'),
('modelines','modelines'),
('modifiable','modifiable'),
('modified','modified'),
('more','more'),
('mouse','mouse'),
('mousef','mousef'),
('mousefocus','mousefocus'),
('mousehide','mousehide'),
('mousem','mousem'),
('mousemodel','mousemodel'),
('mouses','mouses'),
('mouseshape','mouseshape'),
('mouset','mouset'),
('mousetime','mousetime'),
('mp','mp'),
('mps','mps'),
('msm','msm'),
('mzq','mzq'),
('mzquantum','mzquantum'),
('nf','nf'),
('nnoremap','nnoremap'),
('noacd','noacd'),
('noai','noai'),
('noakm','noakm'),
('noallowrevins','noallowrevins'),
('noaltkeymap','noaltkeymap'),
('noanti','noanti'),
('noantialias','noantialias'),
('noar','noar'),
('noarab','noarab'),
('noarabic','noarabic'),
('noarabicshape','noarabicshape'),
('noari','noari'),
('noarshape','noarshape'),
('noautochdir','noautochdir'),
('noautoindent','noautoindent'),
('noautoread','noautoread'),
('noautowrite','noautowrite'),
('noautowriteall','noautowriteall'),
('noaw','noaw'),
('noawa','noawa'),
('nobackup','nobackup'),
('noballooneval','noballooneval'),
('nobeval','nobeval'),
('nobin','nobin'),
('nobinary','nobinary'),
('nobiosk','nobiosk'),
('nobioskey','nobioskey'),
('nobk','nobk'),
('nobl','nobl'),
('nobomb','nobomb'),
('nobuflisted','nobuflisted'),
('nocf','nocf'),
('noci','noci'),
('nocin','nocin'),
('nocindent','nocindent'),
('nocompatible','nocompatible'),
('noconfirm','noconfirm'),
('noconsk','noconsk'),
('noconskey','noconskey'),
('nocopyindent','nocopyindent'),
('nocp','nocp'),
('nocrb','nocrb'),
('nocscoperelative','nocscoperelative'),
('nocscopetag','nocscopetag'),
('nocscopeverbose','nocscopeverbose'),
('nocsre','nocsre'),
('nocst','nocst'),
('nocsverb','nocsverb'),
('nocuc','nocuc'),
('nocul','nocul'),
('nocursorbind','nocursorbind'),
('nocursorcolumn','nocursorcolumn'),
('nocursorline','nocursorline'),
('nodeco','nodeco'),
('nodelcombine','nodelcombine'),
('nodg','nodg'),
('nodiff','nodiff'),
('nodigraph','nodigraph'),
('noea','noea'),
('noeb','noeb'),
('noed','noed'),
('noedcompatible','noedcompatible'),
('noek','noek'),
('noendofline','noendofline'),
('noeol','noeol'),
('noequalalways','noequalalways'),
('noerrorbells','noerrorbells'),
('noesckeys','noesckeys'),
('noet','noet'),
('noex','noex'),
('noexpandtab','noexpandtab'),
('noexrc','noexrc'),
('nofen','nofen'),
('nofic','nofic'),
('nofileignorecase','nofileignorecase'),
('nofk','nofk'),
('nofkmap','nofkmap'),
('nofoldenable','nofoldenable'),
('nogd','nogd'),
('nogdefault','nogdefault'),
('noguipty','noguipty'),
('nohid','nohid'),
('nohidden','nohidden'),
('nohk','nohk'),
('nohkmap','nohkmap'),
('nohkmapp','nohkmapp'),
('nohkp','nohkp'),
('nohls','nohls'),
('nohlsearch','nohlsearch'),
('noic','noic'),
('noicon','noicon'),
('noignorecase','noignorecase'),
('noim','noim'),
('noimc','noimc'),
('noimcmdline','noimcmdline'),
('noimd','noimd'),
('noimdisable','noimdisable'),
('noincsearch','noincsearch'),
('noinf','noinf'),
('noinfercase','noinfercase'),
('noinsertmode','noinsertmode'),
('nois','nois'),
('nojoinspaces','nojoinspaces'),
('nojs','nojs'),
('nolazyredraw','nolazyredraw'),
('nolbr','nolbr'),
('nolinebreak','nolinebreak'),
('nolisp','nolisp'),
('nolist','nolist'),
('noloadplugins','noloadplugins'),
('nolpl','nolpl'),
('nolz','nolz'),
('noma','noma'),
('nomacatsui','nomacatsui'),
('nomagic','nomagic'),
('nomh','nomh'),
('noml','noml'),
('nomod','nomod'),
('nomodeline','nomodeline'),
('nomodifiable','nomodifiable'),
('nomodified','nomodified'),
('nomore','nomore'),
('nomousef','nomousef'),
('nomousefocus','nomousefocus'),
('nomousehide','nomousehide'),
('nonu','nonu'),
('nonumber','nonumber'),
('noodev','noodev'),
('noopendevice','noopendevice'),
('nopaste','nopaste'),
('nopi','nopi'),
('nopreserveindent','nopreserveindent'),
('nopreviewwindow','nopreviewwindow'),
('noprompt','noprompt'),
('nopvw','nopvw'),
('noreadonly','noreadonly'),
('norelativenumber','norelativenumber'),
('noremap','noremap'),
('norestorescreen','norestorescreen'),
('norevins','norevins'),
('nori','nori'),
('norightleft','norightleft'),
('norl','norl'),
('nornu','nornu'),
('noro','noro'),
('nors','nors'),
('noru','noru'),
('noruler','noruler'),
('nosb','nosb'),
('nosc','nosc'),
('noscb','noscb'),
('noscrollbind','noscrollbind'),
('noscs','noscs'),
('nosecure','nosecure'),
('nosft','nosft'),
('noshellslash','noshellslash'),
('noshelltemp','noshelltemp'),
('noshiftround','noshiftround'),
('noshortname','noshortname'),
('noshowcmd','noshowcmd'),
('noshowfulltag','noshowfulltag'),
('noshowmatch','noshowmatch'),
('noshowmode','noshowmode'),
('nosi','nosi'),
('nosm','nosm'),
('nosmartcase','nosmartcase'),
('nosmartindent','nosmartindent'),
('nosmarttab','nosmarttab'),
('nosmd','nosmd'),
('nosn','nosn'),
('nosol','nosol'),
('nospell','nospell'),
('nosplitbelow','nosplitbelow'),
('nosplitright','nosplitright'),
('nospr','nospr'),
('nosr','nosr'),
('nossl','nossl'),
('nosta','nosta'),
('nostartofline','nostartofline'),
('nostmp','nostmp'),
('noswapfile','noswapfile'),
('noswf','noswf'),
('nota','nota'),
('notagbsearch','notagbsearch'),
('notagrelative','notagrelative'),
('notagstack','notagstack'),
('notbi','notbi'),
('notbidi','notbidi'),
('notbs','notbs'),
('notermbidi','notermbidi'),
('noterse','noterse'),
('notextauto','notextauto'),
('notextmode','notextmode'),
('notf','notf'),
('notgst','notgst'),
('notildeop','notildeop'),
('notimeout','notimeout'),
('notitle','notitle'),
('noto','noto'),
('notop','notop'),
('notr','notr'),
('nottimeout','nottimeout'),
('nottybuiltin','nottybuiltin'),
('nottyfast','nottyfast'),
('notx','notx'),
('noudf','noudf'),
('noundofile','noundofile'),
('novb','novb'),
('novisualbell','novisualbell'),
('nowa','nowa'),
('nowarn','nowarn'),
('nowb','nowb'),
('noweirdinvert','noweirdinvert'),
('nowfh','nowfh'),
('nowfw','nowfw'),
('nowic','nowic'),
('nowildignorecase','nowildignorecase'),
('nowildmenu','nowildmenu'),
('nowinfixheight','nowinfixheight'),
('nowinfixwidth','nowinfixwidth'),
('nowiv','nowiv'),
('nowmnu','nowmnu'),
('nowrap','nowrap'),
('nowrapscan','nowrapscan'),
('nowrite','nowrite'),
('nowriteany','nowriteany'),
('nowritebackup','nowritebackup'),
('nows','nows'),
('nrformats','nrformats'),
('nu','nu'),
('number','number'),
('numberwidth','numberwidth'),
('nuw','nuw'),
('odev','odev'),
('oft','oft'),
('ofu','ofu'),
('omnifunc','omnifunc'),
('opendevice','opendevice'),
('operatorfunc','operatorfunc'),
('opfunc','opfunc'),
('osfiletype','osfiletype'),
('pa','pa'),
('para','para'),
('paragraphs','paragraphs'),
('paste','paste'),
('pastetoggle','pastetoggle'),
('patchexpr','patchexpr'),
('patchmode','patchmode'),
('path','path'),
('pdev','pdev'),
('penc','penc'),
('pex','pex'),
('pexpr','pexpr'),
('pfn','pfn'),
('ph','ph'),
('pheader','pheader'),
('pi','pi'),
('pm','pm'),
('pmbcs','pmbcs'),
('pmbfn','pmbfn'),
('popt','popt'),
('preserveindent','preserveindent'),
('previewheight','previewheight'),
('previewwindow','previewwindow'),
('printdevice','printdevice'),
('printencoding','printencoding'),
('printexpr','printexpr'),
('printfont','printfont'),
('printheader','printheader'),
('printmbcharset','printmbcharset'),
('printmbfont','printmbfont'),
('printoptions','printoptions'),
('prompt','prompt'),
('pt','pt'),
('pumheight','pumheight'),
('pvh','pvh'),
('pvw','pvw'),
('qe','qe'),
('quoteescape','quoteescape'),
('rdt','rdt'),
('re','re'),
('readonly','readonly'),
('redrawtime','redrawtime'),
('regexpengine','regexpengine'),
('relativenumber','relativenumber'),
('remap','remap'),
('report','report'),
('restorescreen','restorescreen'),
('revins','revins'),
('ri','ri'),
('rightleft','rightleft'),
('rightleftcmd','rightleftcmd'),
('rl','rl'),
('rlc','rlc'),
('rnu','rnu'),
('ro','ro'),
('rs','rs'),
('rtp','rtp'),
('ru','ru'),
('ruf','ruf'),
('ruler','ruler'),
('rulerformat','rulerformat'),
('runtimepath','runtimepath'),
('sb','sb'),
('sbo','sbo'),
('sbr','sbr'),
('sc','sc'),
('scb','scb'),
('scr','scr'),
('scroll','scroll'),
('scrollbind','scrollbind'),
('scrolljump','scrolljump'),
('scrolloff','scrolloff'),
('scrollopt','scrollopt'),
('scs','scs'),
('sect','sect'),
('sections','sections'),
('secure','secure'),
('sel','sel'),
('selection','selection'),
('selectmode','selectmode'),
('sessionoptions','sessionoptions'),
('sft','sft'),
('sh','sh'),
('shcf','shcf'),
('shell','shell'),
('shellcmdflag','shellcmdflag'),
('shellpipe','shellpipe'),
('shellquote','shellquote'),
('shellredir','shellredir'),
('shellslash','shellslash'),
('shelltemp','shelltemp'),
('shelltype','shelltype'),
('shellxescape','shellxescape'),
('shellxquote','shellxquote'),
('shiftround','shiftround'),
('shiftwidth','shiftwidth'),
('shm','shm'),
('shortmess','shortmess'),
('shortname','shortname'),
('showbreak','showbreak'),
('showcmd','showcmd'),
('showfulltag','showfulltag'),
('showmatch','showmatch'),
('showmode','showmode'),
('showtabline','showtabline'),
('shq','shq'),
('si','si'),
('sidescroll','sidescroll'),
('sidescrolloff','sidescrolloff'),
('siso','siso'),
('sj','sj'),
('slm','slm'),
('sm','sm'),
('smartcase','smartcase'),
('smartindent','smartindent'),
('smarttab','smarttab'),
('smc','smc'),
('smd','smd'),
('sn','sn'),
('so','so'),
('softtabstop','softtabstop'),
('sol','sol'),
('sp','sp'),
('spc','spc'),
('spell','spell'),
('spellcapcheck','spellcapcheck'),
('spellfile','spellfile'),
('spelllang','spelllang'),
('spellsuggest','spellsuggest'),
('spf','spf'),
('spl','spl'),
('splitbelow','splitbelow'),
('splitright','splitright'),
('spr','spr'),
('sps','sps'),
('sr','sr'),
('srr','srr'),
('ss','ss'),
('ssl','ssl'),
('ssop','ssop'),
('st','st'),
('sta','sta'),
('stal','stal'),
('startofline','startofline'),
('statusline','statusline'),
('stl','stl'),
('stmp','stmp'),
('sts','sts'),
('su','su'),
('sua','sua'),
('suffixes','suffixes'),
('suffixesadd','suffixesadd'),
('sw','sw'),
('swapfile','swapfile'),
('swapsync','swapsync'),
('swb','swb'),
('swf','swf'),
('switchbuf','switchbuf'),
('sws','sws'),
('sxe','sxe'),
('sxq','sxq'),
('syn','syn'),
('synmaxcol','synmaxcol'),
('syntax','syntax'),
('t_AB','t_AB'),
('t_AF','t_AF'),
('t_AL','t_AL'),
('t_CS','t_CS'),
('t_CV','t_CV'),
('t_Ce','t_Ce'),
('t_Co','t_Co'),
('t_Cs','t_Cs'),
('t_DL','t_DL'),
('t_EI','t_EI'),
('t_F1','t_F1'),
('t_F2','t_F2'),
('t_F3','t_F3'),
('t_F4','t_F4'),
('t_F5','t_F5'),
('t_F6','t_F6'),
('t_F7','t_F7'),
('t_F8','t_F8'),
('t_F9','t_F9'),
('t_IE','t_IE'),
('t_IS','t_IS'),
('t_K1','t_K1'),
('t_K3','t_K3'),
('t_K4','t_K4'),
('t_K5','t_K5'),
('t_K6','t_K6'),
('t_K7','t_K7'),
('t_K8','t_K8'),
('t_K9','t_K9'),
('t_KA','t_KA'),
('t_KB','t_KB'),
('t_KC','t_KC'),
('t_KD','t_KD'),
('t_KE','t_KE'),
('t_KF','t_KF'),
('t_KG','t_KG'),
('t_KH','t_KH'),
('t_KI','t_KI'),
('t_KJ','t_KJ'),
('t_KK','t_KK'),
('t_KL','t_KL'),
('t_RI','t_RI'),
('t_RV','t_RV'),
('t_SI','t_SI'),
('t_Sb','t_Sb'),
('t_Sf','t_Sf'),
('t_WP','t_WP'),
('t_WS','t_WS'),
('t_ZH','t_ZH'),
('t_ZR','t_ZR'),
('t_al','t_al'),
('t_bc','t_bc'),
('t_cd','t_cd'),
('t_ce','t_ce'),
('t_cl','t_cl'),
('t_cm','t_cm'),
('t_cs','t_cs'),
('t_da','t_da'),
('t_db','t_db'),
('t_dl','t_dl'),
('t_fs','t_fs'),
('t_k1','t_k1'),
('t_k2','t_k2'),
('t_k3','t_k3'),
('t_k4','t_k4'),
('t_k5','t_k5'),
('t_k6','t_k6'),
('t_k7','t_k7'),
('t_k8','t_k8'),
('t_k9','t_k9'),
('t_kB','t_kB'),
('t_kD','t_kD'),
('t_kI','t_kI'),
('t_kN','t_kN'),
('t_kP','t_kP'),
('t_kb','t_kb'),
('t_kd','t_kd'),
('t_ke','t_ke'),
('t_kh','t_kh'),
('t_kl','t_kl'),
('t_kr','t_kr'),
('t_ks','t_ks'),
('t_ku','t_ku'),
('t_le','t_le'),
('t_mb','t_mb'),
('t_md','t_md'),
('t_me','t_me'),
('t_mr','t_mr'),
('t_ms','t_ms'),
('t_nd','t_nd'),
('t_op','t_op'),
('t_se','t_se'),
('t_so','t_so'),
('t_sr','t_sr'),
('t_te','t_te'),
('t_ti','t_ti'),
('t_ts','t_ts'),
('t_u7','t_u7'),
('t_ue','t_ue'),
('t_us','t_us'),
('t_ut','t_ut'),
('t_vb','t_vb'),
('t_ve','t_ve'),
('t_vi','t_vi'),
('t_vs','t_vs'),
('t_xs','t_xs'),
('ta','ta'),
('tabline','tabline'),
('tabpagemax','tabpagemax'),
('tabstop','tabstop'),
('tag','tag'),
('tagbsearch','tagbsearch'),
('taglength','taglength'),
('tagrelative','tagrelative'),
('tags','tags'),
('tagstack','tagstack'),
('tal','tal'),
('tb','tb'),
('tbi','tbi'),
('tbidi','tbidi'),
('tbis','tbis'),
('tbs','tbs'),
('tenc','tenc'),
('term','term'),
('termbidi','termbidi'),
('termencoding','termencoding'),
('terse','terse'),
('textauto','textauto'),
('textmode','textmode'),
('textwidth','textwidth'),
('tf','tf'),
('tgst','tgst'),
('thesaurus','thesaurus'),
('tildeop','tildeop'),
('timeout','timeout'),
('timeoutlen','timeoutlen'),
('title','title'),
('titlelen','titlelen'),
('titleold','titleold'),
('titlestring','titlestring'),
('tl','tl'),
('tm','tm'),
('to','to'),
('toolbar','toolbar'),
('toolbariconsize','toolbariconsize'),
('top','top'),
('tpm','tpm'),
('tr','tr'),
('ts','ts'),
('tsl','tsl'),
('tsr','tsr'),
('ttimeout','ttimeout'),
('ttimeoutlen','ttimeoutlen'),
('ttm','ttm'),
('tty','tty'),
('ttybuiltin','ttybuiltin'),
('ttyfast','ttyfast'),
('ttym','ttym'),
('ttymouse','ttymouse'),
('ttyscroll','ttyscroll'),
('ttytype','ttytype'),
('tw','tw'),
('tx','tx'),
('uc','uc'),
('udf','udf'),
('udir','udir'),
('ul','ul'),
('undodir','undodir'),
('undofile','undofile'),
('undolevels','undolevels'),
('undoreload','undoreload'),
('updatecount','updatecount'),
('updatetime','updatetime'),
('ur','ur'),
('ut','ut'),
('vb','vb'),
('vbs','vbs'),
('vdir','vdir'),
('ve','ve'),
('verbose','verbose'),
('verbosefile','verbosefile'),
('vfile','vfile'),
('vi','vi'),
('viewdir','viewdir'),
('viewoptions','viewoptions'),
('viminfo','viminfo'),
('virtualedit','virtualedit'),
('visualbell','visualbell'),
('vnoremap','vnoremap'),
('vop','vop'),
('wa','wa'),
('wak','wak'),
('warn','warn'),
('wb','wb'),
('wc','wc'),
('wcm','wcm'),
('wd','wd'),
('weirdinvert','weirdinvert'),
('wfh','wfh'),
('wfw','wfw'),
('wh','wh'),
('whichwrap','whichwrap'),
('wi','wi'),
('wic','wic'),
('wig','wig'),
('wildchar','wildchar'),
('wildcharm','wildcharm'),
('wildignore','wildignore'),
('wildignorecase','wildignorecase'),
('wildmenu','wildmenu'),
('wildmode','wildmode'),
('wildoptions','wildoptions'),
('wim','wim'),
('winaltkeys','winaltkeys'),
('window','window'),
('winfixheight','winfixheight'),
('winfixwidth','winfixwidth'),
('winheight','winheight'),
('winminheight','winminheight'),
('winminwidth','winminwidth'),
('winwidth','winwidth'),
('wiv','wiv'),
('wiw','wiw'),
('wm','wm'),
('wmh','wmh'),
('wmnu','wmnu'),
('wmw','wmw'),
('wop','wop'),
('wrap','wrap'),
('wrapmargin','wrapmargin'),
('wrapscan','wrapscan'),
('write','write'),
('writeany','writeany'),
('writebackup','writebackup'),
('writedelay','writedelay'),
('ws','ws'),
('ww','ww'),
)
return var
option = _getoption()
|
|
import os
import re
import math
from digoie.conf.storage import __elastic_search_dir__, __reverb_input_dir__, REVERB_INPUT_EXT
from digoie.core.files.names import load_names
from digoie.conf.global_settings import TARGET_PERSON_NAME, TARGET_PHONE_NUMBER
def extract(raw, target=TARGET_PERSON_NAME):
print 'extract features...'
featured = []
for line in raw:
line = preprocess_line(line)
# load basic info for reverb output line
confidence = load_confidence_symbol(line[11])
rvd_arg1_val, rvd_rel_val, rvd_arg2_val = load_ar_vals(line, target=target)
rvd_arg1_post_tags, rvd_arg1_ct_tags, rvd_rel_post_tags, rvd_rel_ct_tags, rvd_arg2_post_tags, rvd_arg2_ct_tags = preprocess_tags(line)
var_list = [
confidence,
rvd_arg1_val,
rvd_arg1_post_tags,
rvd_arg1_ct_tags,
rvd_rel_val,
rvd_rel_post_tags,
rvd_rel_ct_tags,
rvd_arg2_val,
rvd_arg2_post_tags,
rvd_arg2_ct_tags
]
rv4fe_data = ' '.join(var_list)
featured.append(rv4fe_data)
return featured
def preprocess_line(line):
line = line[:-1]
line = line.split('\t')
return line
def load_ar_vals(line, target=TARGET_PERSON_NAME):
if target == TARGET_PERSON_NAME:
return load_av4name(line)
elif target == TARGET_PHONE_NUMBER:
return load_av4phoneno(line)
else:
return load_av4name(line)
def load_av4name(line):
rvd_arg1_val = str(line[15]).replace('.', '')
rvd_rel_val = str(line[16]).replace('.', '')
rvd_arg2_val = str(line[17]).replace('.', '')
# filter features
names = load_names()
filter = FeatureFilter(names=names)
rvd_arg1_val = filter.filtering(rvd_arg1_val)
rvd_rel_val = filter.filtering(rvd_rel_val)
rvd_arg2_val = filter.filtering(rvd_arg2_val)
return rvd_arg1_val, rvd_rel_val, rvd_arg2_val
def load_av4phoneno(line):
rvd_arg1_val = str(line[15]).replace('.', '')
rvd_rel_val = str(line[16]).replace('.', '')
rvd_arg2_val = str(line[17]).replace('.', '')
return rvd_arg1_val, rvd_rel_val, rvd_arg2_val
def preprocess_tags(line):
rvd_post = str(line[13]).split(' ')
rvd_ct = str(line[14]).split(' ')
rvd_arg1_start_idx = int(line[5])
rvd_arg1_end_idx = int(line[6])
rvd_rel_start_idx = int(line[7])
rvd_rel_end_idx = int(line[8])
rvd_arg2_start_idx = int(line[9])
rvd_arg2_end_idx = int(line[10])
# load post and chunk tags
rvd_arg1_post_tags = rvd_post[rvd_arg1_start_idx:rvd_arg1_end_idx]
rvd_arg1_ct_tags = rvd_ct[rvd_arg1_start_idx:rvd_arg1_end_idx]
rvd_rel_post_tags = rvd_post[rvd_rel_start_idx:rvd_rel_end_idx]
rvd_rel_ct_tags = rvd_ct[rvd_rel_start_idx:rvd_rel_end_idx]
rvd_arg2_post_tags = rvd_post[rvd_arg2_start_idx:rvd_arg2_end_idx]
rvd_arg2_ct_tags = rvd_ct[rvd_arg2_start_idx:rvd_arg2_end_idx]
# format chunk tags
rvd_arg1_ct_tags = [tag.replace('-','2') for tag in rvd_arg1_ct_tags]
rvd_rel_ct_tags = [tag.replace('-','2') for tag in rvd_rel_ct_tags]
rvd_arg2_ct_tags = [tag.replace('-','2') for tag in rvd_arg2_ct_tags]
# add prefix for tags
prefix = 'S4'
rvd_arg1_post_tags = [prefix + elt for elt in rvd_arg1_post_tags]
rvd_arg1_ct_tags = [prefix + elt for elt in rvd_arg1_ct_tags]
prefix = 'P4'
rvd_rel_post_tags = [prefix + elt for elt in rvd_rel_post_tags]
rvd_rel_ct_tags = [prefix + elt for elt in rvd_rel_ct_tags]
prefix = 'O4'
rvd_arg2_post_tags = [prefix + elt for elt in rvd_arg2_post_tags]
rvd_arg2_ct_tags = [prefix + elt for elt in rvd_arg2_ct_tags]
# transfer list into string
rvd_arg1_post_tags = ' '.join(rvd_arg1_post_tags)
rvd_arg1_ct_tags = ' '.join(rvd_arg1_ct_tags)
rvd_rel_post_tags = ' '.join(rvd_rel_post_tags)
rvd_rel_ct_tags = ' '.join(rvd_rel_ct_tags)
rvd_arg2_post_tags = ' '.join(rvd_arg2_post_tags)
rvd_arg2_ct_tags = ' '.join(rvd_arg2_ct_tags)
return rvd_arg1_post_tags, rvd_arg1_ct_tags, rvd_rel_post_tags, rvd_rel_ct_tags, rvd_arg2_post_tags, rvd_arg2_ct_tags
# remove names from feature
# rvd_arg1_val = remove_names(rvd_arg1_val)
# rvd_rel_val = remove_names(rvd_rel_val)
# rvd_arg2_val = remove_names(rvd_arg2_val)
def load_confidence_symbol(conf):
return 'conf' + str(conf)[2]
# return 'conf' + str(math.floor(float(conf*10)))
class FeatureFilter():
def __init__(self, names=None):
self.names = names
def filtering(self, sentence):
result = []
word_list = sentence.split(' ')
for word in word_list:
word = self.refine_word(word)
if self.is_valid_word(word):
result.append(word)
return ' '.join(result)
def refine_word(self, word):
word = word.lower()
return word
def is_valid_word(self, word):
# if self.is_contain_name(word):
# return False
if len(word) < 2:
return False
reg = re.compile("^[a_zA_Z]{2,}$")
if re.match(reg, word):
return False
return True
def is_contain_name(self, word):
if word in self.names:
return True
else:
return False
"""
def remove_names(vals):
result = []
# load name
path = os.path.join(__elastic_search_dir__, 'names')
names_file = open(path, 'rU')
names = list([name[:-1] for name in names_file])
names_file.close()
val_list = vals.split(' ')
for val in val_list:
if val.lower() not in names:
result.append(val.lower())
return ' '.join(result)
"""
|
|
"""
Convolution (using **FFT**, **NTT**, **FWHT**), Subset Convolution,
Covering Product, Intersecting Product
"""
from __future__ import print_function, division
from sympy.core import S, sympify
from sympy.core.compatibility import range, as_int, iterable
from sympy.core.function import expand_mul
from sympy.discrete.transforms import (
fft, ifft, ntt, intt, fwht, ifwht,
mobius_transform, inverse_mobius_transform)
def convolution(a, b, cycle=0, dps=None, prime=None, dyadic=None, subset=None):
"""
Performs convolution by determining the type of desired
convolution using hints.
Exactly one of ``dps``, ``prime``, ``dyadic``, ``subset`` arguments
should be specified explicitly for identifying the type of convolution,
and the argument ``cycle`` can be specified optionally.
For the default arguments, linear convolution is performed using **FFT**.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
cycle : Integer
Specifies the length for doing cyclic convolution.
dps : Integer
Specifies the number of decimal digits for precision for
performing **FFT** on the sequence.
prime : Integer
Prime modulus of the form `(m 2^k + 1)` to be used for
performing **NTT** on the sequence.
dyadic : bool
Identifies the convolution type as dyadic (*bitwise-XOR*)
convolution, which is performed using **FWHT**.
subset : bool
Identifies the convolution type as subset convolution.
Examples
========
>>> from sympy import convolution, symbols, S, I
>>> u, v, w, x, y, z = symbols('u v w x y z')
>>> convolution([1 + 2*I, 4 + 3*I], [S(5)/4, 6], dps=3)
[1.25 + 2.5*I, 11.0 + 15.8*I, 24.0 + 18.0*I]
>>> convolution([1, 2, 3], [4, 5, 6], cycle=3)
[31, 31, 28]
>>> convolution([111, 777], [888, 444], prime=19*2**10 + 1)
[1283, 19351, 14219]
>>> convolution([111, 777], [888, 444], prime=19*2**10 + 1, cycle=2)
[15502, 19351]
>>> convolution([u, v], [x, y, z], dyadic=True)
[u*x + v*y, u*y + v*x, u*z, v*z]
>>> convolution([u, v], [x, y, z], dyadic=True, cycle=2)
[u*x + u*z + v*y, u*y + v*x + v*z]
>>> convolution([u, v, w], [x, y, z], subset=True)
[u*x, u*y + v*x, u*z + w*x, v*z + w*y]
>>> convolution([u, v, w], [x, y, z], subset=True, cycle=3)
[u*x + v*z + w*y, u*y + v*x, u*z + w*x]
"""
c = as_int(cycle)
if c < 0:
raise ValueError("The length for cyclic convolution "
"must be non-negative")
dyadic = True if dyadic else None
subset = True if subset else None
if sum(x is not None for x in (prime, dps, dyadic, subset)) > 1:
raise TypeError("Ambiguity in determining the type of convolution")
if prime is not None:
ls = convolution_ntt(a, b, prime=prime)
return ls if not c else [sum(ls[i::c]) % prime for i in range(c)]
if dyadic:
ls = convolution_fwht(a, b)
elif subset:
ls = convolution_subset(a, b)
else:
ls = convolution_fft(a, b, dps=dps)
return ls if not c else [sum(ls[i::c]) for i in range(c)]
#----------------------------------------------------------------------------#
# #
# Convolution for Complex domain #
# #
#----------------------------------------------------------------------------#
def convolution_fft(a, b, dps=None):
"""
Performs linear convolution using Fast Fourier Transform.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
dps : Integer
Specifies the number of decimal digits for precision.
Examples
========
>>> from sympy import S, I
>>> from sympy.discrete.convolutions import convolution_fft
>>> convolution_fft([2, 3], [4, 5])
[8, 22, 15]
>>> convolution_fft([2, 5], [6, 7, 3])
[12, 44, 41, 15]
>>> convolution_fft([1 + 2*I, 4 + 3*I], [S(5)/4, 6])
[5/4 + 5*I/2, 11 + 63*I/4, 24 + 18*I]
References
==========
.. [1] https://en.wikipedia.org/wiki/Convolution_theorem
.. [2] https://en.wikipedia.org/wiki/Discrete_Fourier_transform_(general%29
"""
a, b = a[:], b[:]
n = m = len(a) + len(b) - 1 # convolution size
if n > 0 and n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
a, b = fft(a, dps), fft(b, dps)
a = [expand_mul(x*y) for x, y in zip(a, b)]
a = ifft(a, dps)[:m]
return a
#----------------------------------------------------------------------------#
# #
# Convolution for GF(p) #
# #
#----------------------------------------------------------------------------#
def convolution_ntt(a, b, prime):
"""
Performs linear convolution using Number Theoretic Transform.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
prime : Integer
Prime modulus of the form `(m 2^k + 1)` to be used for performing
**NTT** on the sequence.
Examples
========
>>> from sympy.discrete.convolutions import convolution_ntt
>>> convolution_ntt([2, 3], [4, 5], prime=19*2**10 + 1)
[8, 22, 15]
>>> convolution_ntt([2, 5], [6, 7, 3], prime=19*2**10 + 1)
[12, 44, 41, 15]
>>> convolution_ntt([333, 555], [222, 666], prime=19*2**10 + 1)
[15555, 14219, 19404]
References
==========
.. [1] https://en.wikipedia.org/wiki/Convolution_theorem
.. [2] https://en.wikipedia.org/wiki/Discrete_Fourier_transform_(general%29
"""
a, b, p = a[:], b[:], as_int(prime)
n = m = len(a) + len(b) - 1 # convolution size
if n > 0 and n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [0]*(n - len(a))
b += [0]*(n - len(b))
a, b = ntt(a, p), ntt(b, p)
a = [x*y % p for x, y in zip(a, b)]
a = intt(a, p)[:m]
return a
#----------------------------------------------------------------------------#
# #
# Convolution for 2**n-group #
# #
#----------------------------------------------------------------------------#
def convolution_fwht(a, b):
"""
Performs dyadic (*bitwise-XOR*) convolution using Fast Walsh Hadamard
Transform.
The convolution is automatically padded to the right with zeros, as the
*radix-2 FWHT* requires the number of sample points to be a power of 2.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
Examples
========
>>> from sympy import symbols, S, I
>>> from sympy.discrete.convolutions import convolution_fwht
>>> u, v, x, y = symbols('u v x y')
>>> convolution_fwht([u, v], [x, y])
[u*x + v*y, u*y + v*x]
>>> convolution_fwht([2, 3], [4, 5])
[23, 22]
>>> convolution_fwht([2, 5 + 4*I, 7], [6*I, 7, 3 + 4*I])
[56 + 68*I, -10 + 30*I, 6 + 50*I, 48 + 32*I]
>>> convolution_fwht([S(33)/7, S(55)/6, S(7)/4], [S(2)/3, 5])
[2057/42, 1870/63, 7/6, 35/4]
References
==========
.. [1] https://www.radioeng.cz/fulltexts/2002/02_03_40_42.pdf
.. [2] https://en.wikipedia.org/wiki/Hadamard_transform
"""
if not a or not b:
return []
a, b = a[:], b[:]
n = max(len(a), len(b))
if n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
a, b = fwht(a), fwht(b)
a = [expand_mul(x*y) for x, y in zip(a, b)]
a = ifwht(a)
return a
#----------------------------------------------------------------------------#
# #
# Subset Convolution #
# #
#----------------------------------------------------------------------------#
def convolution_subset(a, b):
"""
Performs Subset Convolution of given sequences.
The indices of each argument, considered as bit strings, correspond to
subsets of a finite set.
The sequence is automatically padded to the right with zeros, as the
definition of subset based on bitmasks (indices) requires the size of
sequence to be a power of 2.
Parameters
==========
a, b : iterables
The sequences for which convolution is performed.
Examples
========
>>> from sympy import symbols, S, I
>>> from sympy.discrete.convolutions import convolution_subset
>>> u, v, x, y, z = symbols('u v x y z')
>>> convolution_subset([u, v], [x, y])
[u*x, u*y + v*x]
>>> convolution_subset([u, v, x], [y, z])
[u*y, u*z + v*y, x*y, x*z]
>>> convolution_subset([1, S(2)/3], [3, 4])
[3, 6]
>>> convolution_subset([1, 3, S(5)/7], [7])
[7, 21, 5, 0]
References
==========
.. [1] https://people.csail.mit.edu/rrw/presentations/subset-conv.pdf
"""
if not a or not b:
return []
if not iterable(a) or not iterable(b):
raise TypeError("Expected a sequence of coefficients for convolution")
a = [sympify(arg) for arg in a]
b = [sympify(arg) for arg in b]
n = max(len(a), len(b))
if n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
c = [S.Zero]*n
for mask in range(n):
smask = mask
while smask > 0:
c[mask] += expand_mul(a[smask] * b[mask^smask])
smask = (smask - 1)&mask
c[mask] += expand_mul(a[smask] * b[mask^smask])
return c
#----------------------------------------------------------------------------#
# #
# Covering Product #
# #
#----------------------------------------------------------------------------#
def covering_product(a, b):
"""
Returns the covering product of given sequences.
The indices of each argument, considered as bit strings, correspond to
subsets of a finite set.
The covering product of given sequences is a sequence which contains
the sum of products of the elements of the given sequences grouped by
the *bitwise-OR* of the corresponding indices.
The sequence is automatically padded to the right with zeros, as the
definition of subset based on bitmasks (indices) requires the size of
sequence to be a power of 2.
Parameters
==========
a, b : iterables
The sequences for which covering product is to be obtained.
Examples
========
>>> from sympy import symbols, S, I, covering_product
>>> u, v, x, y, z = symbols('u v x y z')
>>> covering_product([u, v], [x, y])
[u*x, u*y + v*x + v*y]
>>> covering_product([u, v, x], [y, z])
[u*y, u*z + v*y + v*z, x*y, x*z]
>>> covering_product([1, S(2)/3], [3, 4 + 5*I])
[3, 26/3 + 25*I/3]
>>> covering_product([1, 3, S(5)/7], [7, 8])
[7, 53, 5, 40/7]
References
==========
.. [1] https://people.csail.mit.edu/rrw/presentations/subset-conv.pdf
"""
if not a or not b:
return []
a, b = a[:], b[:]
n = max(len(a), len(b))
if n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
a, b = mobius_transform(a), mobius_transform(b)
a = [expand_mul(x*y) for x, y in zip(a, b)]
a = inverse_mobius_transform(a)
return a
#----------------------------------------------------------------------------#
# #
# Intersecting Product #
# #
#----------------------------------------------------------------------------#
def intersecting_product(a, b):
"""
Returns the intersecting product of given sequences.
The indices of each argument, considered as bit strings, correspond to
subsets of a finite set.
The intersecting product of given sequences is the sequence which
contains the sum of products of the elements of the given sequences
grouped by the *bitwise-AND* of the corresponding indices.
The sequence is automatically padded to the right with zeros, as the
definition of subset based on bitmasks (indices) requires the size of
sequence to be a power of 2.
Parameters
==========
a, b : iterables
The sequences for which intersecting product is to be obtained.
Examples
========
>>> from sympy import symbols, S, I, intersecting_product
>>> u, v, x, y, z = symbols('u v x y z')
>>> intersecting_product([u, v], [x, y])
[u*x + u*y + v*x, v*y]
>>> intersecting_product([u, v, x], [y, z])
[u*y + u*z + v*y + x*y + x*z, v*z, 0, 0]
>>> intersecting_product([1, S(2)/3], [3, 4 + 5*I])
[9 + 5*I, 8/3 + 10*I/3]
>>> intersecting_product([1, 3, S(5)/7], [7, 8])
[327/7, 24, 0, 0]
References
==========
.. [1] https://people.csail.mit.edu/rrw/presentations/subset-conv.pdf
"""
if not a or not b:
return []
a, b = a[:], b[:]
n = max(len(a), len(b))
if n&(n - 1): # not a power of 2
n = 2**n.bit_length()
# padding with zeros
a += [S.Zero]*(n - len(a))
b += [S.Zero]*(n - len(b))
a, b = mobius_transform(a, subset=False), mobius_transform(b, subset=False)
a = [expand_mul(x*y) for x, y in zip(a, b)]
a = inverse_mobius_transform(a, subset=False)
return a
|
|
'''
Created on Jun 29, 2009
@author: Torre Wenaus
'''
class ErrorCodes:
errorFields = ('pilot','exe','sup','ddm','brokerage','jobdispatcher','taskbuffer')
errorCodes = {}
errorStages = {}
def __init__(self):
for f in self.errorFields:
self.errorCodes['%serrorcode'%f] = {}
self.errorStages['%serrorcode'%f] = {}
## Panda errors can be found at https://twiki.cern.ch/twiki/bin/view/Atlas/PandaErrorCodes
self.errorCodes['ddmerrorcode'][100] = 'DQ2 server error'
self.errorStages['ddmerrorcode'][100] = 'ddm-start'
self.errorCodes['ddmerrorcode'][200] = 'Could not add output files to dataset'
self.errorStages['ddmerrorcode'][200] = 'ddm-end'
self.errorCodes['ddmerrorcode'][201] = 'Panda server failed to register subscription in DQ2'
self.errorStages['ddmerrorcode'][201] = 'ddm-end'
self.errorCodes['jobdispatchererrorcode'][100] = 'Lost heartbeat'
self.errorStages['jobdispatchererrorcode'][100] = 'time-during'
self.errorCodes['jobdispatchererrorcode'][101] = 'Job recovery failed for three days'
self.errorStages['jobdispatchererrorcode'][101] = 'time-during'
self.errorCodes['jobdispatchererrorcode'][102] = 'No reply to sent job'
self.errorStages['jobdispatchererrorcode'][102] = 'time-during'
self.errorCodes['taskbuffererrorcode'][100] = 'Job expired and killed three days after submission (or killed by user)'
self.errorStages['taskbuffererrorcode'][100] = 'user-during'
self.errorCodes['taskbuffererrorcode'][101] = 'transfer timeout (2weeks)'
self.errorStages['taskbuffererrorcode'][101] = 'time-end'
self.errorCodes['taskbuffererrorcode'][102] = 'Expired three days after submission'
self.errorStages['taskbuffererrorcode'][102] = 'time-end'
self.errorCodes['taskbuffererrorcode'][103] = 'Aborted by executor interface'
self.errorStages['taskbuffererrorcode'][103] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][104] = 'Waiting job timed out'
self.errorStages['taskbuffererrorcode'][104] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][105] = 'Reassigned by rebrokeage'
self.errorStages['taskbuffererrorcode'][105] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][106] = 'Reassigned by server-side retry'
self.errorStages['taskbuffererrorcode'][106] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][107] = 'Retried by pilot'
self.errorStages['taskbuffererrorcode'][107] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][110] = 'Input file lost in SE'
self.errorStages['taskbuffererrorcode'][110] = 'panda-during'
self.errorCodes['piloterrorcode'][1008] = 'General pilot error, consult batch log'
self.errorStages['piloterrorcode'][1008] = 'ddm-start'
self.errorCodes['piloterrorcode'][1097] = 'Get function can not be called for staging input file'
self.errorStages['piloterrorcode'][1097] = 'ddm-start'
self.errorCodes['piloterrorcode'][1098] = 'No space left on local disk'
self.errorStages['piloterrorcode'][1098] = 'athena-during'
self.errorCodes['piloterrorcode'][1099] = 'Get error: Staging input file failed'
self.errorStages['piloterrorcode'][1099] = 'ddm-start'
self.errorCodes['piloterrorcode'][1100] = 'Get error: Replica not found'
self.errorStages['piloterrorcode'][1100] = 'ddm-start'
self.errorCodes['piloterrorcode'][1101] = 'LRC registration error: Connection refused'
self.errorStages['piloterrorcode'][1101] = 'ddm-end'
self.errorCodes['piloterrorcode'][1102] = 'Expected output file does not exist'
self.errorStages['piloterrorcode'][1102] = 'athena-end'
self.errorCodes['piloterrorcode'][1103] = 'No such file or directory'
self.errorStages['piloterrorcode'][1103] = 'ddm-start'
self.errorCodes['piloterrorcode'][1104] = 'User work directory too large'
self.errorStages['piloterrorcode'][1104] = 'user-during'
self.errorCodes['piloterrorcode'][1105] = 'Put error: Failed to add file size and checksum to LFC'
self.errorStages['piloterrorcode'][1105] = 'ddm-end'
self.errorCodes['piloterrorcode'][1106] = 'Payload stdout file too big'
self.errorStages['piloterrorcode'][1106] = 'user-during'
self.errorCodes['piloterrorcode'][1107] = 'Get error: Missing DBRelease file'
self.errorStages['piloterrorcode'][1107] = 'ddm-start'
self.errorCodes['piloterrorcode'][1108] = 'Put error: LCG registration failed'
self.errorStages['piloterrorcode'][1108] = 'ddm-end'
self.errorCodes['piloterrorcode'][1109] = 'Required CMTCONFIG incompatible with WN'
self.errorStages['piloterrorcode'][1109] = 'ddm-start'
self.errorCodes['piloterrorcode'][1110] = 'Failed during setup'
self.errorStages['piloterrorcode'][1110] = 'ddm-start'
self.errorCodes['piloterrorcode'][1111] = 'Exception caught by runJob'
self.errorStages['piloterrorcode'][1111] = 'ddm-start'
self.errorCodes['piloterrorcode'][1112] = 'Exception caught by pilot'
self.errorStages['piloterrorcode'][1112] = 'ddm-start'
self.errorCodes['piloterrorcode'][1113] = 'Get error: Failed to import LFC python module'
self.errorStages['piloterrorcode'][1113] = 'ddm-start'
self.errorCodes['piloterrorcode'][1114] = 'Put error: Failed to import LFC python module'
self.errorStages['piloterrorcode'][1114] = 'ddm-end'
self.errorCodes['piloterrorcode'][1115] = 'NFS SQLite locking problems'
self.errorStages['piloterrorcode'][1115] = 'athena-end'
self.errorCodes['piloterrorcode'][1116] = 'Pilot could not download queuedata'
self.errorStages['piloterrorcode'][1116] = 'ddm-start'
self.errorCodes['piloterrorcode'][1117] = 'Pilot found non-valid queuedata'
self.errorStages['piloterrorcode'][1117] = 'ddm-start'
self.errorCodes['piloterrorcode'][1118] = 'Pilot could not curl space report'
self.errorStages['piloterrorcode'][1118] = 'ddm-start'
self.errorCodes['piloterrorcode'][1119] = 'Pilot aborted due to DDM space shortage'
self.errorStages['piloterrorcode'][1119] = 'ddm-start'
self.errorCodes['piloterrorcode'][1120] = 'Space token descriptor does not match destination path'
self.errorStages['piloterrorcode'][1120] = 'ddm-end'
self.errorCodes['piloterrorcode'][1121] = 'Can not read the xml file for registering output files to dispatcher'
self.errorStages['piloterrorcode'][1121] = 'athena-end'
self.errorCodes['piloterrorcode'][1122] = 'Bad replica entry returned by lfc_getreplicas(): SFN not set in LFC for this guid'
self.errorStages['piloterrorcode'][1122] = 'ddm-start'
self.errorCodes['piloterrorcode'][1123] = 'Missing guid in output file list'
self.errorStages['piloterrorcode'][1123] = 'ddm-end'
self.errorCodes['piloterrorcode'][1124] = 'Output file too large'
self.errorStages['piloterrorcode'][1124] = 'athena-during'
self.errorCodes['piloterrorcode'][1130] = 'Get error: Failed to get POOL file catalog'
self.errorStages['piloterrorcode'][1130] = 'ddm-start'
self.errorCodes['piloterrorcode'][1131] = 'Put function can not be called for staging out'
self.errorStages['piloterrorcode'][1131] = 'ddm-end'
self.errorCodes['piloterrorcode'][1132] = 'LRC registration error (consult log file)'
self.errorStages['piloterrorcode'][1132] = 'ddm-end'
self.errorCodes['piloterrorcode'][1133] = 'Put error: Fetching default storage URL failed'
self.errorStages['piloterrorcode'][1133] = 'ddm-end'
self.errorCodes['piloterrorcode'][1134] = 'Put error: Error in mkdir on localSE, not allowed or no available space'
self.errorStages['piloterrorcode'][1134] = 'ddm-end'
self.errorCodes['piloterrorcode'][1135] = 'Could not get file size in job workdir'
self.errorStages['piloterrorcode'][1135] = 'ddm-end'
self.errorCodes['piloterrorcode'][1136] = 'Put error: Error running md5sum to the file in job workdir'
self.errorStages['piloterrorcode'][1136] = 'ddm-end'
self.errorCodes['piloterrorcode'][1137] = 'Put error: Error in copying the file from job workdir to localSE'
self.errorStages['piloterrorcode'][1137] = 'ddm-end'
self.errorCodes['piloterrorcode'][1138] = 'Put error: could not get the file size on localSE'
self.errorStages['piloterrorcode'][1138] = 'ddm-end'
self.errorCodes['piloterrorcode'][1139] = 'Put error: Problem with copying from job workdir to local SE: size mismatch'
self.errorStages['piloterrorcode'][1139] = 'ddm-end'
self.errorCodes['piloterrorcode'][1140] = 'Put error: Error running md5sum to the file on localSE'
self.errorStages['piloterrorcode'][1140] = 'ddm-end'
self.errorCodes['piloterrorcode'][1141] = 'Put error: Problem with copying from job workdir to local SE: md5sum mismatch'
self.errorStages['piloterrorcode'][1141] = 'ddm-end'
self.errorCodes['piloterrorcode'][1142] = 'Put error: failed to register the file on local SE'
self.errorStages['piloterrorcode'][1142] = 'ddm-end'
self.errorCodes['piloterrorcode'][1143] = 'Failed to chmod trf'
self.errorStages['piloterrorcode'][1143] = 'ddm-start'
self.errorCodes['piloterrorcode'][1144] = 'Job killed by panda server'
self.errorStages['piloterrorcode'][1144] = 'user-during'
self.errorCodes['piloterrorcode'][1145] = 'Get error: md5sum mismatch on input file'
self.errorStages['piloterrorcode'][1145] = 'ddm-start'
self.errorCodes['piloterrorcode'][1146] = 'Trf installation dir does not exist and could not be installed'
self.errorStages['piloterrorcode'][1146] = 'ddm-start'
self.errorCodes['piloterrorcode'][1147] = 'Put error: dccp returned readOnly'
self.errorStages['piloterrorcode'][1147] = 'ddm-end'
self.errorCodes['piloterrorcode'][1148] = 'Put error: Failed to remove readOnly file in dCache'
self.errorStages['piloterrorcode'][1148] = 'ddm-end'
self.errorCodes['piloterrorcode'][1149] = 'wget command failed to download trf'
self.errorStages['piloterrorcode'][1149] = 'ddm-start'
self.errorCodes['piloterrorcode'][1150] = 'Looping job killed by pilot'
self.errorStages['piloterrorcode'][1150] = 'athena-end'
self.errorCodes['piloterrorcode'][1151] = 'Get error: Input file staging timed out'
self.errorStages['piloterrorcode'][1151] = 'ddm-start'
self.errorCodes['piloterrorcode'][1152] = 'Put error: File copy timed out'
self.errorStages['piloterrorcode'][1152] = 'ddm-end'
self.errorCodes['piloterrorcode'][1153] = 'Lost job was not finished'
self.errorStages['piloterrorcode'][1153] = 'athena-end'
self.errorCodes['piloterrorcode'][1154] = 'Failed to register log file'
self.errorStages['piloterrorcode'][1154] = 'athena-end'
self.errorCodes['piloterrorcode'][1155] = 'Failed to move output files for lost job'
self.errorStages['piloterrorcode'][1155] = 'athena-end'
self.errorCodes['piloterrorcode'][1156] = 'Pilot could not recover job'
self.errorStages['piloterrorcode'][1156] = 'athena-end'
self.errorCodes['piloterrorcode'][1157] = 'Could not create log file'
self.errorStages['piloterrorcode'][1157] = 'athena-end'
self.errorCodes['piloterrorcode'][1158] = 'Reached maximum number of recovery attempts'
self.errorStages['piloterrorcode'][1158] = 'athena-end'
self.errorCodes['piloterrorcode'][1159] = 'Job recovery could not read PoolFileCatalog.xml file (guids lost)'
self.errorStages['piloterrorcode'][1159] = 'athena-end'
self.errorCodes['piloterrorcode'][1160] = 'LRC registration error: file name string limit exceeded 250'
self.errorStages['piloterrorcode'][1160] = 'ddm-end'
self.errorCodes['piloterrorcode'][1161] = 'Job recovery could not generate xml for remaining output files'
self.errorStages['piloterrorcode'][1161] = 'athena-end'
self.errorCodes['piloterrorcode'][1162] = 'LRC registration error: Non-unique LFN'
self.errorStages['piloterrorcode'][1162] = 'ddm-end'
self.errorCodes['piloterrorcode'][1163] = 'Grid proxy not valid'
self.errorStages['piloterrorcode'][1163] = 'ddm-start'
self.errorCodes['piloterrorcode'][1164] = 'Get error: Local input file missing'
self.errorStages['piloterrorcode'][1164] = 'ddm-start'
self.errorCodes['piloterrorcode'][1165] = 'Put error: Local output file missing'
self.errorStages['piloterrorcode'][1165] = 'ddm-end'
self.errorCodes['piloterrorcode'][1166] = 'Put error: File copy broken by SIGPIPE'
self.errorStages['piloterrorcode'][1166] = 'ddm-end'
self.errorCodes['piloterrorcode'][1167] = 'Get error: Input file missing in PoolFileCatalog.xml'
self.errorStages['piloterrorcode'][1167] = 'ddm-start'
self.errorCodes['piloterrorcode'][1168] = 'Get error: Total file size too large'
self.errorStages['piloterrorcode'][1168] = 'user-start'
self.errorCodes['piloterrorcode'][1169] = 'Put error: LFC registration failed'
self.errorStages['piloterrorcode'][1169] = 'ddm-end'
self.errorCodes['piloterrorcode'][1170] = 'Error running adler32 on the file in job workdir'
self.errorStages['piloterrorcode'][1170] = 'ddm-start'
self.errorCodes['piloterrorcode'][1171] = 'Get error: adler32 mismatch on input file'
self.errorStages['piloterrorcode'][1171] = 'ddm-start'
self.errorCodes['piloterrorcode'][1172] = 'Put error: Problem with copying from job workdir to local SE: adler32 mismatch'
self.errorStages['piloterrorcode'][1172] = 'ddm-end'
self.errorCodes['piloterrorcode'][1173] = 'PandaMover staging error: File is not cached'
self.errorStages['piloterrorcode'][1173] = 'athena-end'
self.errorCodes['piloterrorcode'][1174] = 'PandaMover transfer failure'
self.errorStages['piloterrorcode'][1174] = 'athena-end'
self.errorCodes['piloterrorcode'][1175] = 'Get error: Problem with copying from local SE to job workdir: size mismatch'
self.errorStages['piloterrorcode'][1175] = 'ddm-start'
self.errorCodes['piloterrorcode'][1176] = 'Pilot has no child processes (job wrapper has either crashed or did not send final status)'
self.errorStages['piloterrorcode'][1176] = 'panda-end'
self.errorCodes['piloterrorcode'][1177] = 'Voms proxy not valid'
self.errorStages['piloterrorcode'][1177] = 'ddm-start'
self.errorCodes['piloterrorcode'][1178] = 'Get error: No input files are staged'
self.errorStages['piloterrorcode'][1178] = 'ddm-start'
self.errorCodes['piloterrorcode'][1179] = 'Get error: Failed to get LFC replicas'
self.errorStages['piloterrorcode'][1179] = 'ddm-start'
self.errorCodes['piloterrorcode'][1180] = 'Get error: Globus system error'
self.errorStages['piloterrorcode'][1180] = 'ddm-start'
self.errorCodes['piloterrorcode'][1181] = 'Put error: Globus system error'
self.errorStages['piloterrorcode'][1181] = 'ddm-end'
self.errorCodes['piloterrorcode'][1182] = 'Get error: Failed to get LFC replica'
self.errorStages['piloterrorcode'][1182] = 'ddm-start'
self.errorCodes['piloterrorcode'][1183] = 'LRC registration error: Guid-metadata entry already exists'
self.errorStages['piloterrorcode'][1183] = 'ddm-end'
self.errorCodes['piloterrorcode'][1184] = 'Put error: PoolFileCatalog could not be found in workdir'
self.errorStages['piloterrorcode'][1184] = 'ddm-end'
self.errorCodes['piloterrorcode'][1185] = 'Put error: Error running adler32 on the file in job workdir'
self.errorStages['piloterrorcode'][1185] = 'ddm-end'
self.errorCodes['piloterrorcode'][1186] = 'Software directory does not exist'
self.errorStages['piloterrorcode'][1186] = 'panda-start'
self.errorCodes['piloterrorcode'][1187] = 'Athena metadata is not available'
self.errorStages['piloterrorcode'][1187] = 'athena-end'
self.errorCodes['piloterrorcode'][1188] = 'lcg-getturls failed'
self.errorStages['piloterrorcode'][1188] = 'panda-during'
self.errorCodes['piloterrorcode'][1189] = 'lcg-getturls was timed-out'
self.errorStages['piloterrorcode'][1189] = 'panda-during'
self.errorCodes['piloterrorcode'][1190] = 'LFN too long (exceeding limit of 150 characters)'
self.errorStages['piloterrorcode'][1190] = 'panda-during'
self.errorCodes['piloterrorcode'][1191] = 'Illegal zero file size'
self.errorStages['piloterrorcode'][1191] = 'ddm-end'
self.errorCodes['piloterrorcode'][1192] = 'DBRelease file has not been transferred yet'
self.errorStages['piloterrorcode'][1192] = 'ddm-start'
self.errorCodes['piloterrorcode'][1194] = 'File verification failed'
self.errorStages['piloterrorcode'][1194] = 'panda-during'
self.errorCodes['piloterrorcode'][1195] = 'Command timed out'
self.errorStages['piloterrorcode'][1195] = 'panda-during'
self.errorCodes['piloterrorcode'][1198] = 'Can not check the child process status from the heartbeat process'
self.errorStages['piloterrorcode'][1198] = 'panda-during'
self.errorCodes['piloterrorcode'][1199] = 'Could not create the directory'
self.errorStages['piloterrorcode'][1199] = 'panda-start'
self.errorCodes['piloterrorcode'][1200] = 'Job terminated by unknown kill signal'
self.errorStages['piloterrorcode'][1200] = 'panda-during'
self.errorCodes['piloterrorcode'][1201] = 'Job killed from the batch system: SIGTERM'
self.errorStages['piloterrorcode'][1201] = 'panda-during'
self.errorCodes['piloterrorcode'][1202] = 'Job killed from the batch system: SIGQUIT'
self.errorStages['piloterrorcode'][1202] = 'panda-during'
self.errorCodes['piloterrorcode'][1203] = 'Job killed from the batch system: SIGSEGV'
self.errorStages['piloterrorcode'][1203] = 'panda-during'
self.errorCodes['piloterrorcode'][1204] = 'Job killed from the batch system: SIGXCPU'
self.errorStages['piloterrorcode'][1204] = 'panda-during'
self.errorCodes['piloterrorcode'][1205] = 'Job killed by user'
self.errorStages['piloterrorcode'][1205] = 'user-during'
self.errorCodes['piloterrorcode'][1210] = 'No athena output'
self.errorStages['piloterrorcode'][1210] = 'athena-end'
self.errorCodes['piloterrorcode'][1211] = 'Missing installation'
self.errorStages['piloterrorcode'][1211] = 'panda-end'
self.errorCodes['piloterrorcode'][1212] = 'Athena ran out of memory'
self.errorStages['piloterrorcode'][1212] = 'athena-during'
self.errorCodes['piloterrorcode'][1220] = 'Job failed due to unknown reason (consult log file)'
self.errorStages['piloterrorcode'][1220] = 'athena-end'
self.errorCodes['exeerrorcode'][99] = 'Transformation error code mismatch'
self.errorStages['exeerrorcode'][99] = 'athena-end'
self.errorCodes['exeerrorcode'][100] = 'Transformation not found in run directory'
self.errorStages['exeerrorcode'][100] = 'ddm-start'
for code in range ( 1000, 2000 ):
if self.errorCodes['piloterrorcode'].has_key(code) or self.errorStages['piloterrorcode'].has_key(code):
self.errorCodes['exeerrorcode'][code] = self.errorCodes['piloterrorcode'][code]
self.errorStages['exeerrorcode'][code] = self.errorStages['piloterrorcode'][code]
# errors at http://alxr.usatlas.bnl.gov/lxr/source/atlas/Tools/PyJobTransformsCore/share/atlas_error_categories.db?v=current
self.errorCodes['exeerrorcode'][60000] = 'segmentation violation'
self.errorStages['exeerrorcode'][60000] = 'athena-during'
self.errorCodes['exeerrorcode'][60010] = 'segmentation fault'
self.errorStages['exeerrorcode'][60010] = 'athena-during'
self.errorCodes['exeerrorcode'][60100] = 'CBNT_Athena::execute() error'
self.errorStages['exeerrorcode'][60100] = 'athena-during'
self.errorCodes['exeerrorcode'][60101] = 'TRTDigitization::execute() error'
self.errorStages['exeerrorcode'][60101] = 'athena-during'
self.errorCodes['exeerrorcode'][60200] = 'egammaShowerShape: Cluster is neither in Barrel nor in Endcap cannot calculate ShowerShape'
self.errorStages['exeerrorcode'][60200] = 'athena-during'
self.errorCodes['exeerrorcode'][60201] = 'LArEMECEnergyCorrection::CalculateChargeCollection error'
self.errorStages['exeerrorcode'][60201] = 'athena-during'
self.errorCodes['exeerrorcode'][60600] = 'Transform Keyboard interrupt'
self.errorStages['exeerrorcode'][60600] = 'athena-during'
self.errorCodes['exeerrorcode'][60701] = 'CBNT_Audit could not allocate memory'
self.errorStages['exeerrorcode'][60701] = 'athena-during'
self.errorCodes['exeerrorcode'][61000] = 'ApplicationMgr Failed to load modules'
self.errorStages['exeerrorcode'][61000] = 'athena-during'
self.errorCodes['exeerrorcode'][61010] = 'DllClassManager Could not load module'
self.errorStages['exeerrorcode'][61010] = 'athena-during'
self.errorCodes['exeerrorcode'][61020] = 'DllClassManager Entry point failure in module'
self.errorStages['exeerrorcode'][61020] = 'athena-during'
self.errorCodes['exeerrorcode'][61100] = 'EventLoopMgr Unable to initialize Algorithm'
self.errorStages['exeerrorcode'][61100] = 'athena-during'
self.errorCodes['exeerrorcode'][61200] = 'ServiceManager Unable to initialize Service'
self.errorStages['exeerrorcode'][61200] = 'athena-during'
self.errorCodes['exeerrorcode'][62000] = 'ServiceManager Finalization of service AthenaSealSvc failed'
self.errorStages['exeerrorcode'][62000] = 'athena-during'
self.errorCodes['exeerrorcode'][62100] = 'pixelRoI service_i: can not locate service DetectorStore'
self.errorStages['exeerrorcode'][62100] = 'athena-during'
self.errorCodes['exeerrorcode'][62200] = 'pool::PersistencySvc::UserDatabase::connectForRead: PFN is not existing in the catalog'
self.errorStages['exeerrorcode'][62200] = 'athena-during'
self.errorCodes['exeerrorcode'][62300] = 'ServiceManager: unable to initialize Service: EventSelector'
self.errorStages['exeerrorcode'][62300] = 'athena-during'
self.errorCodes['exeerrorcode'][62400] = 'JobOptionsSvc error'
self.errorStages['exeerrorcode'][62400] = 'athena-during'
self.errorCodes['exeerrorcode'][62500] = 'PartPropSvc: could not open PDT file'
self.errorStages['exeerrorcode'][62500] = 'athena-during'
self.errorCodes['exeerrorcode'][62510] = 'PartPropSvc: unable to access any PDT file'
self.errorStages['exeerrorcode'][62510] = 'athena-during'
self.errorCodes['exeerrorcode'][62600] = 'AthenaCrash'
self.errorStages['exeerrorcode'][62600] = 'athena-during'
self.errorCodes['exeerrorcode'][62700] = 'DetectorStore: no valid proxy for default object'
self.errorStages['exeerrorcode'][62700] = 'athena-during'
self.errorCodes['exeerrorcode'][62800] = 'JobOptionsSvc: unable to set property'
self.errorStages['exeerrorcode'][62800] = 'athena-during'
self.errorCodes['exeerrorcode'][62900] = 'DllClassManager: system Error'
self.errorStages['exeerrorcode'][62900] = 'athena-during'
self.errorCodes['exeerrorcode'][62910] = 'ApplicationMgr: failure loading declared DLL\'s'
self.errorStages['exeerrorcode'][62910] = 'athena-during'
self.errorCodes['exeerrorcode'][63000] = 'Transform python errors'
self.errorStages['exeerrorcode'][63000] = 'athena-during'
self.errorCodes['exeerrorcode'][63010] = 'Transform python syntax error'
self.errorStages['exeerrorcode'][63010] = 'athena-during'
self.errorCodes['exeerrorcode'][63020] = 'Transform python import error'
self.errorStages['exeerrorcode'][63020] = 'athena-during'
self.errorCodes['exeerrorcode'][63100] = 'Transform argument errors'
self.errorStages['exeerrorcode'][63100] = 'athena-during'
self.errorCodes['exeerrorcode'][63110] = 'maxEvents argument: Too few events requested'
self.errorStages['exeerrorcode'][63110] = 'user-during'
self.errorCodes['exeerrorcode'][63111] = 'maxEvents argument: Too many events requested'
self.errorStages['exeerrorcode'][63111] = 'user-during'
self.errorCodes['exeerrorcode'][63200] = 'Transform definition errors'
self.errorStages['exeerrorcode'][63200] = 'athena-during'
self.errorCodes['exeerrorcode'][63300] = 'Transform environment errors'
self.errorStages['exeerrorcode'][63300] = 'athena-during'
self.errorCodes['exeerrorcode'][63400] = 'Transform unknown exceptions'
self.errorStages['exeerrorcode'][63400] = 'athena-during'
self.errorCodes['exeerrorcode'][63500] = 'Transform execution timeout'
self.errorStages['exeerrorcode'][63500] = 'athena-during'
self.errorCodes['exeerrorcode'][63600] = 'Transform execution retries exhausted'
self.errorStages['exeerrorcode'][63600] = 'athena-during'
self.errorCodes['exeerrorcode'][63900] = 'Transform file errors'
self.errorStages['exeerrorcode'][63900] = 'athena-during'
self.errorCodes['exeerrorcode'][64000] = 'Transform input file errors'
self.errorStages['exeerrorcode'][64000] = 'athena-during'
self.errorCodes['exeerrorcode'][64010] = 'Transform input file not found'
self.errorStages['exeerrorcode'][64010] = 'athena-during'
self.errorCodes['exeerrorcode'][64020] = 'Transform input file not readable'
self.errorStages['exeerrorcode'][64020] = 'athena-during'
self.errorCodes['exeerrorcode'][64030] = 'Transform input file empty'
self.errorStages['exeerrorcode'][64030] = 'athena-during'
self.errorCodes['exeerrorcode'][64031] = 'Transform input file contains too few events'
self.errorStages['exeerrorcode'][64031] = 'athena-during'
self.errorCodes['exeerrorcode'][64032] = 'Transform input file contains too many events'
self.errorStages['exeerrorcode'][64032] = 'athena-during'
self.errorCodes['exeerrorcode'][64033] = 'Transform input file: Event counting failed'
self.errorStages['exeerrorcode'][64033] = 'athena-during'
self.errorCodes['exeerrorcode'][64040] = 'Transform input file corrupted'
self.errorStages['exeerrorcode'][64040] = 'athena-during'
self.errorCodes['exeerrorcode'][64100] = 'Transform output file errors'
self.errorStages['exeerrorcode'][64100] = 'athena-during'
self.errorCodes['exeerrorcode'][64110] = 'Transform output file not found'
self.errorStages['exeerrorcode'][64110] = 'athena-during'
self.errorCodes['exeerrorcode'][64120] = 'Transform output file not readable'
self.errorStages['exeerrorcode'][64120] = 'athena-during'
self.errorCodes['exeerrorcode'][64130] = 'Transform output file empty'
self.errorStages['exeerrorcode'][64130] = 'athena-during'
self.errorCodes['exeerrorcode'][64131] = 'Transform output file contains too few events'
self.errorStages['exeerrorcode'][64131] = 'athena-during'
self.errorCodes['exeerrorcode'][64132] = 'Transform output file contains too many events'
self.errorStages['exeerrorcode'][64132] = 'athena-during'
self.errorCodes['exeerrorcode'][64133] = 'Transform output file: Event counting failed'
self.errorStages['exeerrorcode'][64133] = 'athena-during'
self.errorCodes['exeerrorcode'][64140] = 'Transform output file corrupted'
self.errorStages['exeerrorcode'][64140] = 'athena-during'
self.errorCodes['exeerrorcode'][64150] = 'Transform output file already exists'
self.errorStages['exeerrorcode'][64150] = 'athena-during'
self.errorCodes['exeerrorcode'][64200] = 'Error in transform configuration file'
self.errorStages['exeerrorcode'][64200] = 'athena-during'
self.errorCodes['exeerrorcode'][65000] = 'Problems with Database'
self.errorStages['exeerrorcode'][65000] = 'athena-during'
self.errorCodes['exeerrorcode'][65100] = 'Problems with DBRelease'
self.errorStages['exeerrorcode'][65100] = 'athena-during'
self.errorCodes['exeerrorcode'][65110] = 'DBRelease not setup'
self.errorStages['exeerrorcode'][65110] = 'athena-during'
self.errorCodes['exeerrorcode'][65120] = 'Wrong version of DBRelease setup'
self.errorStages['exeerrorcode'][65120] = 'athena-during'
self.errorCodes['exeerrorcode'][65130] = 'Problems with the DBRelease tarfile'
self.errorStages['exeerrorcode'][65130] = 'athena-during'
self.errorCodes['exeerrorcode'][65200] = 'Problems with geometry tag'
self.errorStages['exeerrorcode'][65200] = 'athena-during'
self.errorCodes['exeerrorcode'][65210] = 'Mismatch between Geometry Tag in transform argument geometryVersion and in input file'
self.errorStages['exeerrorcode'][65210] = 'athena-during'
self.errorCodes['exeerrorcode'][66000] = 'Bad file descriptor'
self.errorStages['exeerrorcode'][66000] = 'athena-during'
self.errorCodes['exeerrorcode'][69999] = 'Unknown Transform error'
self.errorStages['exeerrorcode'][69999] = 'athena-during'
self.errorCodes['exeerrorcode'][10000] = 'Athena/Transformation error'
self.errorStages['exeerrorcode'][10000] = 'athena-during'
self.errorCodes['exeerrorcode'][10100] = 'At/Tr connection error'
self.errorStages['exeerrorcode'][10100] = 'athena-during'
self.errorCodes['exeerrorcode'][10102] = 'Nova DB problems'
self.errorStages['exeerrorcode'][10102] = 'athena-during'
self.errorCodes['exeerrorcode'][10103] = 'Calibration DB problems'
self.errorStages['exeerrorcode'][10103] = 'athena-during'
self.errorCodes['exeerrorcode'][10104] = 'Oracle error ORA-03113'
self.errorStages['exeerrorcode'][10104] = 'panda-during'
self.errorCodes['exeerrorcode'][10110] = 'Conditions database problems'
self.errorStages['exeerrorcode'][10110] = 'athena-during'
self.errorCodes['exeerrorcode'][10120] = 'nfs lock problems with sqlite database'
self.errorStages['exeerrorcode'][10120] = 'athena-during'
self.errorCodes['exeerrorcode'][10130] = 'Lost connection to MySQL server'
self.errorStages['exeerrorcode'][10130] = 'panda-during'
self.errorCodes['exeerrorcode'][10140] = 'Oracle error ORA-02391: exceeded simultaneous SESSIONS_PER_USER limit'
self.errorStages['exeerrorcode'][10140] = 'panda-during'
self.errorCodes['exeerrorcode'][10200] = 'Athena crashes'
self.errorStages['exeerrorcode'][10200] = 'athena-during'
self.errorCodes['exeerrorcode'][10210] = 'Athena init failed'
self.errorStages['exeerrorcode'][10210] = 'athena-during'
self.errorCodes['exeerrorcode'][10212] = 'Missing PFN in PoolFileCatalog'
self.errorStages['exeerrorcode'][10212] = 'athena-during'
self.errorCodes['exeerrorcode'][10213] = 'AuditorSvc init failed'
self.errorStages['exeerrorcode'][10213] = 'athena-during'
self.errorCodes['exeerrorcode'][10214] = 'Pythia DLL not loaded'
self.errorStages['exeerrorcode'][10214] = 'athena-during'
self.errorCodes['exeerrorcode'][10220] = 'Input file corrupted (Wrong input)'
self.errorStages['exeerrorcode'][10220] = 'athena-during'
self.errorCodes['exeerrorcode'][10300] = 'ApplicationMgr Failed to load modules'
self.errorStages['exeerrorcode'][10300] = 'athena-during'
self.errorCodes['exeerrorcode'][10310] = 'DllClassManager Could not load module'
self.errorStages['exeerrorcode'][10310] = 'athena-during'
self.errorCodes['exeerrorcode'][10400] = 'Problems loading dynamic libraries'
self.errorStages['exeerrorcode'][10400] = 'athena-during'
self.errorCodes['exeerrorcode'][10410] = 'Problem loading shared library'
self.errorStages['exeerrorcode'][10410] = 'athena-during'
self.errorCodes['exeerrorcode'][10420] = 'ApplicationMgr: failure loading declared DLL\'s'
self.errorStages['exeerrorcode'][10420] = 'athena-during'
self.errorCodes['exeerrorcode'][10430] = 'Problems loading shared libraries in LD_PRELOAD '
self.errorStages['exeerrorcode'][10430] = 'athena-during'
self.errorCodes['exeerrorcode'][10500] = 'JobOptions errors'
self.errorStages['exeerrorcode'][10500] = 'user-during'
self.errorCodes['exeerrorcode'][10510] = 'JobOptions file not found'
self.errorStages['exeerrorcode'][10510] = 'user-during'
self.errorCodes['exeerrorcode'][10520] = 'Error in jobOptions'
self.errorStages['exeerrorcode'][10520] = 'user-during'
self.errorCodes['exeerrorcode'][10600] = 'Athena Keyboard interrupt'
self.errorStages['exeerrorcode'][10600] = 'user-during'
self.errorCodes['exeerrorcode'][10700] = 'Athena StoreGateSvc errors'
self.errorStages['exeerrorcode'][10700] = 'athena-during'
self.errorCodes['exeerrorcode'][10710] = 'StoreGateSvc retrieve errors'
self.errorStages['exeerrorcode'][10710] = 'athena-during'
self.errorCodes['exeerrorcode'][10711] = 'StoreGateSvc retrieve(default): No valid proxy for object'
self.errorStages['exeerrorcode'][10711] = 'athena-during'
self.errorCodes['exeerrorcode'][10712] = 'StoreGateSvc retrieve(non-const): No valid proxy for object'
self.errorStages['exeerrorcode'][10712] = 'athena-during'
self.errorCodes['exeerrorcode'][10713] = 'StoreGateSvc retrieve(const): No valid proxy for object'
self.errorStages['exeerrorcode'][10713] = 'athena-during'
self.errorCodes['exeerrorcode'][10720] = 'StoreGateSvc record: object not added to store'
self.errorStages['exeerrorcode'][10720] = 'athena-during'
self.errorCodes['exeerrorcode'][10800] = 'Athena DetectorStore errors'
self.errorStages['exeerrorcode'][10800] = 'athena-during'
self.errorCodes['exeerrorcode'][10810] = 'DetectorStore retrieve errors'
self.errorStages['exeerrorcode'][10810] = 'athena-during'
self.errorCodes['exeerrorcode'][10811] = 'DetectorStore retrieve(default): No valid proxy for object'
self.errorStages['exeerrorcode'][10811] = 'athena-during'
self.errorCodes['exeerrorcode'][10812] = 'DetectorStore retrieve(non-const): No valid proxy for object'
self.errorStages['exeerrorcode'][10812] = 'athena-during'
self.errorCodes['exeerrorcode'][10813] = 'DetectorStore retrieve(const): No valid proxy for object'
self.errorStages['exeerrorcode'][10813] = 'athena-during'
self.errorCodes['exeerrorcode'][10820] = 'DetectorStore record: object not added to store'
self.errorStages['exeerrorcode'][10820] = 'athena-during'
self.errorCodes['exeerrorcode'][10900] = 'Problems with software installation'
self.errorStages['exeerrorcode'][10900] = 'athena-during'
self.errorCodes['exeerrorcode'][10910] = 'Missing system libraries'
self.errorStages['exeerrorcode'][10910] = 'athena-during'
self.errorCodes['exeerrorcode'][10920] = 'Missing libraries'
self.errorStages['exeerrorcode'][10920] = 'athena-during'
self.errorCodes['exeerrorcode'][11000] = 'Athena non-zero exit'
self.errorStages['exeerrorcode'][11000] = 'athena-during'
self.errorCodes['exeerrorcode'][13400] = 'Athena unknown exception'
self.errorStages['exeerrorcode'][13400] = 'athena-during'
self.errorCodes['exeerrorcode'][13410] = 'Athena python exception'
self.errorStages['exeerrorcode'][13410] = 'athena-during'
self.errorCodes['exeerrorcode'][13420] = 'Athena C++ exception'
self.errorStages['exeerrorcode'][13420] = 'athena-during'
self.errorCodes['exeerrorcode'][14100] = 'Athena output file errors'
self.errorStages['exeerrorcode'][14100] = 'athena-during'
self.errorCodes['exeerrorcode'][14110] = 'Athena pool.root file too large (root opened second file)'
self.errorStages['exeerrorcode'][14110] = 'athena-during'
self.errorCodes['exeerrorcode'][15010] = 'Geant4 got stuck in event'
self.errorStages['exeerrorcode'][15010] = 'athena-during'
self.errorCodes['exeerrorcode'][15000] = 'Problems with ElementLink'
self.errorStages['exeerrorcode'][15000] = 'athena-during'
self.errorCodes['transexitcode'] = {}
self.errorStages['transexitcode'] = {}
self.errorCodes['transexitcode'][1] = 'Unspecified error, consult log file'
self.errorStages['transexitcode'][1] = 'athena-during'
self.errorCodes['transexitcode'][2] = 'Athena core dump'
self.errorStages['transexitcode'][2] = 'athena-during'
self.errorCodes['transexitcode'][6] = 'TRF_SEGVIO - Segmentation violation'
self.errorStages['transexitcode'][6] = 'athena-during'
self.errorCodes['transexitcode'][10] = 'ATH_FAILURE - Athena non-zero exit'
self.errorStages['transexitcode'][10] = 'athena-during'
self.errorCodes['transexitcode'][26] = 'TRF_ATHENACRASH - Athena crash'
self.errorStages['transexitcode'][26] = 'athena-during'
self.errorCodes['transexitcode'][30] = 'TRF_PYT - transformation python error'
self.errorStages['transexitcode'][30] = 'athena-during'
self.errorCodes['transexitcode'][31] = 'TRF_ARG - transformation argument error'
self.errorStages['transexitcode'][31] = 'athena-during'
self.errorCodes['transexitcode'][32] = 'TRF_DEF - transformation definition error'
self.errorStages['transexitcode'][32] = 'athena-during'
self.errorCodes['transexitcode'][33] = 'TRF_ENV - transformation environment error'
self.errorStages['transexitcode'][33] = 'athena-during'
self.errorCodes['transexitcode'][34] = 'TRF_EXC - transformation exception'
self.errorStages['transexitcode'][34] = 'athena-during'
self.errorCodes['transexitcode'][40] = 'Athena crash - consult log file'
self.errorStages['transexitcode'][40] = 'athena-during'
self.errorCodes['transexitcode'][41] = 'TRF_OUTFILE - output file error'
self.errorStages['transexitcode'][41] = 'athena-during'
self.errorCodes['transexitcode'][42] = 'TRF_CONFIG - transform config file error'
self.errorStages['transexitcode'][42] = 'athena-during'
self.errorCodes['transexitcode'][50] = 'TRF_DB - problems with database'
self.errorStages['transexitcode'][50] = 'athena-during'
self.errorCodes['transexitcode'][51] = 'TRF_DBREL_TARFILE - Problems with the DBRelease tarfile'
self.errorStages['transexitcode'][51] = 'athena-during'
self.errorCodes['transexitcode'][60] = 'TRF_GBB_TIME - GriBB - output limit exceeded (time, memory, CPU)'
self.errorStages['transexitcode'][60] = 'athena-during'
self.errorCodes['transexitcode'][79] = 'Copying input file failed'
self.errorStages['transexitcode'][79] = 'ddm-start'
self.errorCodes['transexitcode'][80] = 'file in trf definition not found, using the expandable syntax'
self.errorStages['transexitcode'][80] = 'athena-during'
self.errorCodes['transexitcode'][81] = 'file in trf definition not found, using the expandable syntax -- pileup case'
self.errorStages['transexitcode'][81] = 'athena-during'
self.errorCodes['transexitcode'][98] = 'Oracle error - session limit reached'
self.errorStages['transexitcode'][98] = 'panda-during'
self.errorCodes['transexitcode'][99] = 'TRF_UNKNOWN - unknown transformation error'
self.errorStages['transexitcode'][99] = 'athena-during'
self.errorCodes['transexitcode'][102] = 'One of the output files did not get produced by the job'
self.errorStages['transexitcode'][102] = 'athena-end'
self.errorCodes['transexitcode'][104] = 'Copying the output file to local SE failed (md5sum or size mismatch, or LFNnonunique)'
self.errorStages['transexitcode'][104] = 'ddm-end'
self.errorCodes['transexitcode'][126] = 'Transformation not executable - consult log file'
self.errorStages['transexitcode'][126] = 'athena-start'
self.errorCodes['transexitcode'][127] = 'Transformation not installed in CE'
self.errorStages['transexitcode'][127] = 'panda-start'
self.errorCodes['transexitcode'][134] = 'Athena core dump or timeout, or conddb DB connect exception'
self.errorStages['transexitcode'][134] = 'athena-during'
self.errorCodes['transexitcode'][141] = "No input file available - check availability of input dataset at site"
self.errorStages['transexitcode'][141] = 'ddm-start'
self.errorCodes['transexitcode'][200] = 'Log file not transferred to destination'
self.errorStages['transexitcode'][200] = 'ddm-end'
self.errorCodes['transexitcode'][220] = 'Proot: An exception occurred in the user analysis code'
self.errorStages['transexitcode'][220] = 'athena-during'
self.errorCodes['transexitcode'][221] = 'Proot: Framework decided to abort the job due to an internal problem'
self.errorStages['transexitcode'][221] = 'athena-during'
self.errorCodes['transexitcode'][222] = 'Proot: Job completed without reading all input files'
self.errorStages['transexitcode'][222] = 'athena-during'
self.errorCodes['transexitcode'][223] = 'Proot: Input files cannot be opened'
self.errorStages['transexitcode'][223] = 'athena-during'
for code in ( 1008, 1098, 1112, 1116, 1117, 1118, 1119, 1163, 1177, 1178 ):
self.errorCodes['transexitcode'][code] = self.errorCodes['piloterrorcode'][code]
self.errorStages['transexitcode'][code] = self.errorStages['piloterrorcode'][code]
self.errorCodes['transexitcode'][1198] = 'Can\'t check the child process status from the heartbeat process'
self.errorStages['transexitcode'][1198] = 'athena-during'
self.errorCodes['transexitcode'][2100] = "MyProxyError 2100: server name not specified"
self.errorStages['transexitcode'][2100] = 'panda-start'
self.errorCodes['transexitcode'][2101] = "MyProxyError 2101: voms attributes not specified"
self.errorStages['transexitcode'][2101] = 'panda-start'
self.errorCodes['transexitcode'][2102] = "MyProxyError 2102: user DN not specified"
self.errorStages['transexitcode'][2102] = 'panda-start'
self.errorCodes['transexitcode'][2103] = "MyProxyError 2103: pilot owner DN not specified"
self.errorStages['transexitcode'][2103] = 'panda-start'
self.errorCodes['transexitcode'][2104] = "MyProxyError 2104: invalid path for the delegated proxy"
self.errorStages['transexitcode'][2104] = 'panda-start'
self.errorCodes['transexitcode'][2105] = "MyProxyError 2105: invalid pilot proxy path"
self.errorStages['transexitcode'][2105] = 'panda-start'
self.errorCodes['transexitcode'][2106] = "MyProxyError 2106: no path to delegated proxy specified"
self.errorStages['transexitcode'][2106] = 'panda-start'
self.errorCodes['transexitcode'][2200] = "MyProxyError 2200: myproxy-init not available in PATH"
self.errorStages['transexitcode'][2200] = 'panda-start'
self.errorCodes['transexitcode'][2201] = "MyProxyError 2201: myproxy-logon not available in PATH"
self.errorStages['transexitcode'][2201] = 'panda-start'
self.errorCodes['transexitcode'][2202] = "MyProxyError 2202: myproxy-init version not valid"
self.errorStages['transexitcode'][2202] = 'panda-start'
self.errorCodes['transexitcode'][2203] = "MyProxyError 2203: myproxy-logon version not valid"
self.errorStages['transexitcode'][2203] = 'panda-start'
self.errorCodes['transexitcode'][2300] = "MyProxyError 2300: proxy delegation failed"
self.errorStages['transexitcode'][2300] = 'panda-start'
self.errorCodes['transexitcode'][2301] = "MyProxyError 2301: proxy retrieval failed"
self.errorStages['transexitcode'][2301] = 'panda-start'
self.errorCodes['transexitcode'][2400] = "MyProxyError 2400: security violation. Logname and DN do not match"
self.errorStages['transexitcode'][2400] = 'panda-start'
self.errorCodes['transexitcode'][2500] = "MyProxyError 2500: there is no a valid proxy"
self.errorStages['transexitcode'][2500] = 'panda-start'
self.errorCodes['transexitcode'][2501] = "MyProxyError 2501: voms-proxy-info not available in PATH"
self.errorStages['transexitcode'][2501] = 'panda-start'
self.errorCodes['transexitcode'][3000] = 'curl failed to download pilot wrapper'
self.errorStages['transexitcode'][3000] = 'panda-start'
self.errorCodes['transexitcode'][3001] = 'Failed to download pilot code'
self.errorStages['transexitcode'][3001] = 'panda-start'
# dq2_cr error codes
self.errorCodes['transexitcode'][10020] = 'dq2_cr environment variables not properly defined'
self.errorStages['transexitcode'][10020] = 'ddm-end'
self.errorCodes['transexitcode'][10030] = 'dq2_cr getVUID error'
self.errorStages['transexitcode'][10030] = 'ddm-end'
self.errorCodes['transexitcode'][10040] = 'dq2_cr queryFilesInDataset error'
self.errorStages['transexitcode'][10040] = 'ddm-end'
self.errorCodes['transexitcode'][10050] = 'dq2_cr getLocation error'
self.errorStages['transexitcode'][10050] = 'ddm-end'
self.errorCodes['transexitcode'][10060] = 'dq2_cr requested protocol is not supported'
self.errorStages['transexitcode'][10060] = 'ddm-end'
self.errorCodes['transexitcode'][10070] = 'dq2_cr EC_MAIN error, check logfile'
self.errorStages['transexitcode'][10070] = 'ddm-end'
self.errorCodes['transexitcode'][10080] = 'dq2_cr PFNfromLFC error'
self.errorStages['transexitcode'][10080] = 'ddm-end'
self.errorCodes['transexitcode'][10090] = 'dq2_cr file size check failed'
self.errorStages['transexitcode'][10090] = 'ddm-end'
self.errorCodes['transexitcode'][10100] = 'dq2_cr could not create LFC directory'
self.errorStages['transexitcode'][10100] = 'ddm-end'
self.errorCodes['transexitcode'][10110] = 'dq2_cr LS error'
self.errorStages['transexitcode'][10110] = 'ddm-end'
self.errorCodes['transexitcode'][10120] = 'dq2_cr could not get dataset state from DQ2 server'
self.errorStages['transexitcode'][10120] = 'ddm-end'
self.errorCodes['transexitcode'][10130] = 'dq2_cr could not load ToA'
self.errorStages['transexitcode'][10130] = 'ddm-end'
self.errorCodes['transexitcode'][10140] = 'dq2_cr could not parse XML'
self.errorStages['transexitcode'][10140] = 'ddm-end'
self.errorCodes['transexitcode'][10150] = 'dq2_cr FileNotFound error'
self.errorStages['transexitcode'][10150] = 'ddm-end'
# ----------------------------------------------------------------------
# D A Y A B A Y E R R O R S
# ----------------------------------------------------------------------
self.errorCodes['transexitcode'][1000001] = 'ERROR message'
self.errorStages['transexitcode'][0010001] = 'panda-start'
self.errorCodes['transexitcode'][1000002] = 'FATAL message'
self.errorStages['transexitcode'][1000002] = 'panda-start'
self.errorCodes['transexitcode'][1000003] = 'segmentation violation message'
self.errorStages['transexitcode'][1000003] = 'panda-start'
self.errorCodes['transexitcode'][1000004] = 'IOError message'
self.errorStages['transexitcode'][1000004] = 'panda-start'
self.errorCodes['transexitcode'][1000005] = 'ValueError message'
self.errorStages['transexitcode'][1000005] = 'panda-start'
def getErrorCodes(self):
return self.errorFields, self.errorCodes, self.errorStages
|
|
import re
import time
import urllib
import lxml.html
import feedparser
import logging
from util import *
from dateutil.parser import parse
from lxml.cssselect import CSSSelector
from lxml.etree import XMLSyntaxError
from urlparse import urlparse, parse_qs
from twisted.internet import reactor, defer, task
from twisted.internet.defer import inlineCallbacks, Deferred, DeferredList
feedparser._HTMLSanitizer.acceptable_elements.update(['iframe'])
SOURCES_MAX_CONCURRENCY = 4
SOURCES_CHECK_INTERVAL = 24*3600
YOUTUBE_CHANNEL_URL = u'https://www.googleapis.com/youtube/v3/channels?part=contentDetails&id={id}&key={api_key}'
YOUTUBE_PLAYLISTITEMS_URL = u'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId={id}&page_token&pageToken={token}&maxResults=50&key={api_key}'
YOUTUBE_PLAYLIST_URL = u'https://www.googleapis.com/youtube/v3/playlistItems?key={api_key}&playlistId={id}&part=snippet&pageToken={token}&maxResults=50&order=date'
SOUNDCLOUD_RESOLVE_URL = u'https://api.soundcloud.com/resolve.json?url={url}&client_id={api_key}'
SOUNDCLOUD_FAVORITES_URL = 'http://api.soundcloud.com/users/{user_id}/favorites?client_id={api_key}'
LASTFM_LOGIN_URL = u'https://secure.last.fm/login'
LASTFM_TRACKS_URL = u'http://www.last.fm/music/{artist}/+tracks'
LASTFM_SETTINGS_URL = u'http://www.last.fm/settings/website'
def extract_youtube_id(url):
o = urlparse(url)
query = parse_qs(o.query)
if 'v' in query:
return query['v'][0]
else:
path_split = o.path.split('/')
if 'v' in path_split or 'e' in path_split or 'embed' in path_split:
return o.path.split('/')[-1]
def extract_soundcloud_id(url):
urls = [url, urllib.unquote(url)]
for url in urls:
index = url.find('/tracks/')
if index > 0:
return str(re.findall(r'\d+', url[index + 8:])[0])
@inlineCallbacks
def request_soundcloud_id(url, api_key):
url = url.replace('https://w.soundcloud.com/player/?url=', '')
response = yield get_request(SOUNDCLOUD_RESOLVE_URL.format(url=url, api_key=api_key))
try:
response_dict = response.json
except ValueError:
logger = logging.getLogger(__name__)
logger.error('Could not get soundcloud id for: %s', url)
else:
if isinstance(response_dict, dict) and response_dict.get('kind', None) == 'track':
defer.returnValue(response_dict['id'])
class SourceChecker(object):
def __init__(self, database, config):
self.logger = logging.getLogger(__name__)
self.database = database
self.config = config
self.checking = False
self.sources = {}
self.load_sources()
self.check_loop()
def load_sources(self):
sources = self.database.get_all_sources()
for source_dict in sources:
source = self.create_source(source_dict)
if source is None:
self.logger.error('Incorrect source found in database, skipping')
else:
self.sources[source_dict['_id']] = source
def create_source(self, source_dict):
if not 'site' in source_dict or not 'type' in source_dict:
return
last_check = source_dict.get('last_check', 0)
if source_dict['site'] == 'youtube':
return YoutubeSource(source_dict['type'], source_dict['data'], self.config, last_check)
elif source_dict['site'] == 'lastfm':
return LastfmSource(source_dict['type'], source_dict['data'], self.config, last_check)
elif source_dict['site'] == 'soundcloud':
return SoundcloudSource(source_dict['type'], source_dict['data'], self.config, last_check)
elif source_dict['type'] == 'rss':
return RSSSource(source_dict['data'], self.config, last_check)
@inlineCallbacks
def check_source(self, source_id, source):
tracks = yield source.fetch(source.last_check)
for track in tracks:
track['sources'] = [source_id]
count = self.database.add_tracks(tracks)
self.logger.info('Got %s track(s) for source %s (%s are new)', len(tracks), source, count)
self.database.set_source_last_check(source_id, source.last_check)
@inlineCallbacks
def check_loop(self):
self.checking = True
now = int(time.time())
sources = [(source_id, source) for source_id, source in self.sources.iteritems() if now - source.last_check >= SOURCES_CHECK_INTERVAL]
self.logger.info('Checking %s sources', len(sources))
deferreds = []
coop = task.Cooperator()
work = (self.check_source(*source) for source in sources)
for i in xrange(SOURCES_MAX_CONCURRENCY):
d = coop.coiterate(work)
deferreds.append(d)
dl = defer.DeferredList(deferreds)
yield dl
self.logger.info('Finished checking sources')
self.checking = False
reactor.callLater(SOURCES_CHECK_INTERVAL, self.check_loop)
class RSSSource(object):
def __init__(self, url, config, last_check=0):
self.logger = logging.getLogger(__name__)
self.url = url
self.config = config
self.last_check = last_check
@inlineCallbacks
def fetch(self, since=0):
results = []
response = yield get_request(self.url)
feed = feedparser.parse(response.content)
for entry in feed.get('entries', []):
if entry.get('published_parsed', None) is not None:
epoch_time = int(time.mktime(entry['published_parsed']))
else:
epoch_time = -1
if epoch_time < since:
continue
audio_links = []
for link in entry['links']:
if link.get('type', None) == 'audio/mpeg':
audio_links.append(link['href'])
else:
response = yield get_request(link['href'])
more_links = yield self.extract_audio_links(response.content)
audio_links.extend(more_links)
if 'description' in entry:
more_links = yield self.extract_audio_links(entry['description'])
audio_links.extend(more_links)
for audio_link in audio_links:
self.logger.debug('Found link in RSS source: %s - %s', entry['title'], audio_link)
item = {'title': entry['title'],
'link': audio_link,
'ts': epoch_time}
if 'image' in entry:
item['image'] = entry['image']['href']
results.append(item)
self.last_check = int(time.time())
defer.returnValue(results)
@inlineCallbacks
def extract_audio_links(self, text):
# Extract Youtube/Soundcloud id's from iframes/anchors
audio_links = []
try:
tree = lxml.html.fromstring(text)
except:
tree = None
if tree is not None:
urls = []
# Find iframes/anchors urls
iframe_sel = CSSSelector('iframe')
for iframe in iframe_sel(tree):
url = iframe.get('src')
if url:
urls.append(url)
anchor_sel = CSSSelector('a')
for anchor in anchor_sel(tree):
url = anchor.get('href')
if url:
urls.append(url)
# Process urls
for url in urls:
url_split = url.split('/')
if len(url_split) < 3:
continue
if url_split[2].endswith('youtube.com'):
youtube_id = extract_youtube_id(url)
if youtube_id:
audio_links.append('youtube:' + youtube_id)
elif url_split[2].endswith('soundcloud.com'):
api_key = self.config.get('sources', 'soundcloud_api_key')
soundcloud_id = extract_soundcloud_id(url) or (yield request_soundcloud_id(url, api_key))
if soundcloud_id:
audio_links.append('soundcloud:' + str(soundcloud_id))
defer.returnValue(audio_links)
def __str__(self):
return "RSSSource_%s" % self.url
class YoutubeSource(object):
def __init__(self, type, id, config, last_check=0):
self.logger = logging.getLogger(__name__)
self.type = type
self.id = id
self.config = config
self.last_check = last_check
def has_error(self, response_dict):
if 'error' in response_dict:
reason = response_dict['error']['errors'][0].get('reason', 'no reason given')
self.logger.error('Error from Youtube %s : %s (%s)', self.type, response_dict['error']['message'], reason)
return True
return False
def fetch(self, since=0):
results = None
if self.type == 'channel':
results = self._fetch_channel(since)
elif self.type == 'playlist':
results = self._fetch_playlist(since)
if results is not None:
self.last_check = int(time.time())
return results
@inlineCallbacks
def _fetch_channel(self, since=0):
results = []
api_key = self.config.get('sources', 'youtube_api_key')
response = yield get_request(YOUTUBE_CHANNEL_URL.format(api_key=api_key, id=self.id))
response_dict = response.json
if self.has_error(response_dict) or len(response_dict['items']) == 0:
defer.returnValue(results)
uploads = response_dict['items'][0]['contentDetails']['relatedPlaylists']['uploads']
page_token = ''
while page_token is not None:
response = yield get_request(YOUTUBE_PLAYLIST_URL.format(api_key=api_key, id=uploads, token=page_token))
response_dict = response.json
if self.has_error(response_dict):
break
items = response_dict['items']
for item in items:
snippet = item['snippet']
ts = datetime_to_ts(parse(snippet['publishedAt']))
if ts < since:
# We don't care about anything older then this
defer.returnValue(results)
results.append({'title': snippet['title'],
'link': 'youtube:' + snippet['resourceId']['videoId'],
'ts': ts,
'image': snippet['thumbnails']['default']['url']})
page_token = response_dict.get('nextPageToken', None)
defer.returnValue(results)
@inlineCallbacks
def _fetch_playlist(self, since=0):
results = []
page_token = ''
api_key = self.config.get('sources', 'youtube_api_key')
while page_token is not None:
url = YOUTUBE_PLAYLIST_URL.format(api_key=api_key, id=self.id, token=page_token)
response = yield get_request(url)
response_dict = response.json
if self.has_error(response_dict):
break
items = response_dict['items']
for item in items:
snippet = item['snippet']
if snippet['title'] in ['Deleted video', 'Private video']:
continue
ts = datetime_to_ts(parse(snippet['publishedAt']))
if ts < since:
# We don't care about anything older then this
defer.returnValue(results)
results.append({'title': snippet['title'],
'link': 'youtube:' + snippet['resourceId']['videoId'],
'ts': ts,
'image': snippet['thumbnails']['default']['url']})
page_token = response_dict.get('nextPageToken', None)
defer.returnValue(results)
def __str__(self):
return "YoutubeSource_%s_%s" % (self.type, self.id)
class LastfmSource(object):
def __init__(self, type, data, config, last_check=0):
self.logger = logging.getLogger(__name__)
self.type = type
self.data = data
self.config = config
self.last_check = last_check
@inlineCallbacks
def fetch(self, since=0):
results = []
username = self.config.get('sources', 'lastfm_username')
password = self.config.get('sources', 'lastfm_password')
response = yield get_request(LASTFM_LOGIN_URL)
if not response.content:
defer.returnValue(results)
cookies = response.cookies
token = cookies['csrftoken']
data = {'csrfmiddlewaretoken': token,
'username': username,
'password': password}
response = yield post_request(LASTFM_LOGIN_URL, data=data, headers={'Referer': LASTFM_LOGIN_URL}, cookies=cookies)
cookies['sessionid'] = response.cookies['sessionid']
data = {'csrfmiddlewaretoken': token,
'preferred_affiliate': 'youtube'}
response = yield post_request(LASTFM_SETTINGS_URL, data=data, headers={'Referer': LASTFM_SETTINGS_URL}, cookies=cookies)
if not response.content:
defer.returnValue(results)
encoded_data = urllib.quote(self.data.encode('utf-8'))
response = yield get_request(LASTFM_TRACKS_URL.format(artist=encoded_data), cookies=cookies)
if not response.content:
defer.returnValue(results)
try:
tree = lxml.html.fromstring(response.content)
except:
tree = None
results = []
if tree is not None:
# Find youtube ids
youtube_sel = CSSSelector('a[data-youtube-id]')
for youtube_video in youtube_sel(tree):
artist_name = youtube_video.get('data-artist-name')
track_name = youtube_video.get('data-track-name')
youtube_id = youtube_video.get('data-youtube-id')
results.append({'title': artist_name + ' - ' + track_name,
'link': 'youtube:' + youtube_id,
'ts': -1,
'image': 'https://i.ytimg.com/vi/' + youtube_id + '/default.jpg',
'musicinfo': {'artist_name': artist_name,
'track_name': track_name}})
self.last_check = int(time.time())
defer.returnValue(results)
def __str__(self):
return "LastfmSource_%s_%s" % (self.type, self.data)
class SoundcloudSource(object):
def __init__(self, type, data, config, last_check=0):
self.logger = logging.getLogger(__name__)
self.type = type
self.data = data
self.config = config
self.last_check = last_check
@inlineCallbacks
def fetch(self, since=0):
results = []
api_key = self.config.get('sources', 'soundcloud_api_key')
response = yield get_request(SOUNDCLOUD_FAVORITES_URL.format(user_id=self.data, api_key=api_key))
try:
response_dict = response.json
except ValueError:
response_dict = None
if response_dict:
for track in response_dict:
results.append({'title': track['title'],
'link': 'soundcloud:%s' % track['id'],
'ts': datetime_to_ts(parse(track['created_at'])),
'image': track['artwork_url']})
defer.returnValue(results)
def __str__(self):
return "SoundcloudSource_%s_%s" % (self.type, self.data)
|
|
# Copyright (c) 2015 Thales Services SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from concurrent import futures
import contextlib
import functools
import os
import random
import re
import select
import shlex
import signal
import subprocess
import fixtures
import netaddr
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from neutron.agent.common import config
from neutron.agent.common import ovs_lib
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.db import db_base_plugin_common
from neutron.tests import base as tests_base
from neutron.tests.common import base as common_base
from neutron.tests import tools
UNDEFINED = object()
NS_PREFIX = 'test-'
BR_PREFIX = 'test-br'
PORT_PREFIX = 'test-port'
VETH0_PREFIX = 'test-veth0'
VETH1_PREFIX = 'test-veth1'
PATCH_PREFIX = 'patch'
SS_SOURCE_PORT_PATTERN = re.compile(
r'^.*\s+\d+\s+.*:(?P<port>\d+)\s+[0-9:].*')
READ_TIMEOUT = os.environ.get('OS_TEST_READ_TIMEOUT', 5)
CHILD_PROCESS_TIMEOUT = os.environ.get('OS_TEST_CHILD_PROCESS_TIMEOUT', 20)
CHILD_PROCESS_SLEEP = os.environ.get('OS_TEST_CHILD_PROCESS_SLEEP', 0.5)
TRANSPORT_PROTOCOLS = (n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP)
def increment_ip_cidr(ip_cidr, offset=1):
"""Increment ip_cidr offset times.
example: increment_ip_cidr("1.2.3.4/24", 2) ==> "1.2.3.6/24"
"""
net0 = netaddr.IPNetwork(ip_cidr)
net = netaddr.IPNetwork(ip_cidr)
net.value += offset
if not net0.network < net.ip < net0[-1]:
tools.fail(
'Incorrect ip_cidr,offset tuple (%s,%s): "incremented" ip_cidr is '
'outside ip_cidr' % (ip_cidr, offset))
return str(net)
def set_namespace_gateway(port_dev, gateway_ip):
"""Set gateway for the namespace associated to the port."""
if not port_dev.namespace:
tools.fail('tests should not change test machine gateway')
port_dev.route.add_gateway(gateway_ip)
def assert_ping(src_namespace, dst_ip, timeout=1, count=1):
ipversion = netaddr.IPAddress(dst_ip).version
ping_command = 'ping' if ipversion == 4 else 'ping6'
ns_ip_wrapper = ip_lib.IPWrapper(src_namespace)
ns_ip_wrapper.netns.execute([ping_command, '-c', count, '-W', timeout,
dst_ip])
@contextlib.contextmanager
def async_ping(namespace, ips):
with futures.ThreadPoolExecutor(max_workers=len(ips)) as executor:
fs = [executor.submit(assert_ping, namespace, ip, count=10)
for ip in ips]
yield lambda: all(f.done() for f in fs)
futures.wait(fs)
for f in fs:
f.result()
def assert_no_ping(src_namespace, dst_ip, timeout=1, count=1):
try:
assert_ping(src_namespace, dst_ip, timeout, count)
except RuntimeError:
pass
else:
tools.fail("destination ip %(destination)s is replying to ping from "
"namespace %(ns)s, but it shouldn't" %
{'ns': src_namespace, 'destination': dst_ip})
def assert_arping(src_namespace, dst_ip, source=None, timeout=1, count=1):
"""Send arp request using arping executable.
NOTE: ARP protocol is used in IPv4 only. IPv6 uses Neighbour Discovery
Protocol instead.
"""
ns_ip_wrapper = ip_lib.IPWrapper(src_namespace)
arping_cmd = ['arping', '-c', count, '-w', timeout]
if source:
arping_cmd.extend(['-s', source])
arping_cmd.append(dst_ip)
ns_ip_wrapper.netns.execute(arping_cmd)
def assert_no_arping(src_namespace, dst_ip, source=None, timeout=1, count=1):
try:
assert_arping(src_namespace, dst_ip, source, timeout, count)
except RuntimeError:
pass
else:
tools.fail("destination ip %(destination)s is replying to arp from "
"namespace %(ns)s, but it shouldn't" %
{'ns': src_namespace, 'destination': dst_ip})
def _get_source_ports_from_ss_output(output):
ports = set()
for line in output.splitlines():
match = SS_SOURCE_PORT_PATTERN.match(line)
if match:
ports.add(match.group('port'))
return ports
def get_unused_port(used, start=1024, end=65535):
candidates = set(range(start, end + 1))
return random.choice(list(candidates - used))
def get_free_namespace_port(protocol, namespace=None):
"""Return an unused port from given namespace
WARNING: This function returns a port that is free at the execution time of
this function. If this port is used later for binding then there
is a potential danger that port will be no longer free. It's up to
the programmer to handle error if port is already in use.
:param protocol: Return free port for given protocol. Supported protocols
are 'tcp' and 'udp'.
"""
if protocol == n_const.PROTO_NAME_TCP:
param = '-tna'
elif protocol == n_const.PROTO_NAME_UDP:
param = '-una'
else:
raise ValueError("Unsupported procotol %s" % protocol)
ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
output = ip_wrapper.netns.execute(['ss', param])
used_ports = _get_source_ports_from_ss_output(output)
return get_unused_port(used_ports)
def create_patch_ports(source, destination):
"""Hook up two OVS bridges.
The result is two patch ports, each end connected to a bridge.
The two patch port names will start with 'patch-', followed by identical
four characters. For example patch-xyzw-fedora, and patch-xyzw-ubuntu,
where fedora and ubuntu are random strings.
:param source: Instance of OVSBridge
:param destination: Instance of OVSBridge
"""
common = tests_base.get_rand_name(max_length=4, prefix='')
prefix = '%s-%s-' % (PATCH_PREFIX, common)
source_name = tests_base.get_rand_device_name(prefix=prefix)
destination_name = tests_base.get_rand_device_name(prefix=prefix)
source.add_patch_port(source_name, destination_name)
destination.add_patch_port(destination_name, source_name)
class RootHelperProcess(subprocess.Popen):
def __init__(self, cmd, *args, **kwargs):
for arg in ('stdin', 'stdout', 'stderr'):
kwargs.setdefault(arg, subprocess.PIPE)
self.namespace = kwargs.pop('namespace', None)
self.cmd = cmd
if self.namespace is not None:
cmd = ['ip', 'netns', 'exec', self.namespace] + cmd
root_helper = config.get_root_helper(utils.cfg.CONF)
cmd = shlex.split(root_helper) + cmd
self.child_pid = None
super(RootHelperProcess, self).__init__(cmd, *args, **kwargs)
self._wait_for_child_process()
def kill(self, sig=signal.SIGKILL):
pid = self.child_pid or str(self.pid)
utils.execute(['kill', '-%d' % sig, pid], run_as_root=True)
def read_stdout(self, timeout=None):
return self._read_stream(self.stdout, timeout)
@staticmethod
def _read_stream(stream, timeout):
if timeout:
poller = select.poll()
poller.register(stream.fileno())
poll_predicate = functools.partial(poller.poll, 1)
utils.wait_until_true(poll_predicate, timeout, 0.1,
RuntimeError(
'No output in %.2f seconds' % timeout))
return stream.readline()
def writeline(self, data):
self.stdin.write(data + os.linesep)
self.stdin.flush()
def _wait_for_child_process(self, timeout=CHILD_PROCESS_TIMEOUT,
sleep=CHILD_PROCESS_SLEEP):
def child_is_running():
child_pid = utils.get_root_helper_child_pid(
self.pid, run_as_root=True)
if utils.pid_invoked_with_cmdline(child_pid, self.cmd):
return True
utils.wait_until_true(
child_is_running,
timeout,
exception=RuntimeError("Process %s hasn't been spawned "
"in %d seconds" % (self.cmd, timeout)))
self.child_pid = utils.get_root_helper_child_pid(
self.pid, run_as_root=True)
class NetcatTester(object):
TCP = n_const.PROTO_NAME_TCP
UDP = n_const.PROTO_NAME_UDP
def __init__(self, client_namespace, server_namespace, address,
dst_port, protocol, server_address='0.0.0.0', src_port=None):
"""
Tool for testing connectivity on transport layer using netcat
executable.
The processes are spawned lazily.
:param client_namespace: Namespace in which netcat process that
connects to other netcat will be spawned
:param server_namespace: Namespace in which listening netcat process
will be spawned
:param address: Server address from client point of view
:param dst_port: Port on which netcat listens
:param protocol: Transport protocol, either 'tcp' or 'udp'
:param server_address: Address in server namespace on which netcat
should listen
:param src_port: Source port of netcat process spawned in client
namespace - packet will have src_port in TCP/UDP
header with this value
"""
self.client_namespace = client_namespace
self.server_namespace = server_namespace
self._client_process = None
self._server_process = None
self.address = address
self.server_address = server_address
self.dst_port = str(dst_port)
self.src_port = str(src_port) if src_port else None
if protocol not in TRANSPORT_PROTOCOLS:
raise ValueError("Unsupported protocol %s" % protocol)
self.protocol = protocol
@property
def client_process(self):
if not self._client_process:
self.establish_connection()
return self._client_process
@property
def server_process(self):
if not self._server_process:
self._spawn_server_process()
return self._server_process
def _spawn_server_process(self):
self._server_process = self._spawn_nc_in_namespace(
self.server_namespace,
address=self.server_address,
listen=True)
@property
def is_established(self):
return bool(self._client_process and not self._client_process.poll())
def establish_connection(self):
if self._client_process:
raise RuntimeError('%(proto)s connection to %(ip_addr)s is already'
' established' %
{'proto': self.protocol,
'ip_addr': self.address})
if not self._server_process:
self._spawn_server_process()
self._client_process = self._spawn_nc_in_namespace(
self.client_namespace,
address=self.address)
if self.protocol == self.UDP:
# Create an ASSURED entry in conntrack table for UDP packets,
# that requires 3-way communication
# 1st transmission creates UNREPLIED
# 2nd transmission removes UNREPLIED
# 3rd transmission creates ASSURED
data = 'foo'
self.client_process.writeline(data)
self.server_process.read_stdout(READ_TIMEOUT)
self.server_process.writeline(data)
self.client_process.read_stdout(READ_TIMEOUT)
self.client_process.writeline(data)
self.server_process.read_stdout(READ_TIMEOUT)
def test_connectivity(self, respawn=False):
testing_string = uuidutils.generate_uuid()
if respawn:
self.stop_processes()
self.client_process.writeline(testing_string)
message = self.server_process.read_stdout(READ_TIMEOUT).strip()
self.server_process.writeline(message)
message = self.client_process.read_stdout(READ_TIMEOUT).strip()
return message == testing_string
def _spawn_nc_in_namespace(self, namespace, address, listen=False):
cmd = ['nc', address, self.dst_port]
if self.protocol == self.UDP:
cmd.append('-u')
if listen:
cmd.append('-l')
if self.protocol == self.TCP:
cmd.append('-k')
else:
cmd.extend(['-w', '20'])
if self.src_port:
cmd.extend(['-p', self.src_port])
proc = RootHelperProcess(cmd, namespace=namespace)
return proc
def stop_processes(self):
for proc_attr in ('_client_process', '_server_process'):
proc = getattr(self, proc_attr)
if proc:
if proc.poll() is None:
proc.kill()
proc.wait()
setattr(self, proc_attr, None)
class NamespaceFixture(fixtures.Fixture):
"""Create a namespace.
:ivar ip_wrapper: created namespace
:type ip_wrapper: IPWrapper
:ivar name: created namespace name
:type name: str
"""
def __init__(self, prefix=NS_PREFIX):
super(NamespaceFixture, self).__init__()
self.prefix = prefix
def _setUp(self):
ip = ip_lib.IPWrapper()
self.name = self.prefix + uuidutils.generate_uuid()
self.addCleanup(self.destroy)
self.ip_wrapper = ip.ensure_namespace(self.name)
def destroy(self):
if self.ip_wrapper.netns.exists(self.name):
self.ip_wrapper.netns.delete(self.name)
class VethFixture(fixtures.Fixture):
"""Create a veth.
:ivar ports: created veth ports
:type ports: IPDevice 2-uplet
"""
def _setUp(self):
ip_wrapper = ip_lib.IPWrapper()
self.ports = common_base.create_resource(
VETH0_PREFIX,
lambda name: ip_wrapper.add_veth(name, self.get_peer_name(name)))
self.addCleanup(self.destroy)
def destroy(self):
for port in self.ports:
ip_wrapper = ip_lib.IPWrapper(port.namespace)
if ip_wrapper.netns.exists(port.namespace):
try:
ip_wrapper.del_veth(port.name)
break
except RuntimeError:
# NOTE(cbrandily): It seems a veth is automagically deleted
# when a namespace owning a veth endpoint is deleted.
pass
@staticmethod
def get_peer_name(name):
if name.startswith(VETH0_PREFIX):
return name.replace(VETH0_PREFIX, VETH1_PREFIX)
elif name.startswith(VETH1_PREFIX):
return name.replace(VETH1_PREFIX, VETH0_PREFIX)
else:
tools.fail('%s is not a valid VethFixture veth endpoint' % name)
@six.add_metaclass(abc.ABCMeta)
class PortFixture(fixtures.Fixture):
"""Create a port.
:ivar port: created port
:type port: IPDevice
:ivar bridge: port bridge
"""
def __init__(self, bridge=None, namespace=None, mac=None, port_id=None):
super(PortFixture, self).__init__()
self.bridge = bridge
self.namespace = namespace
self.mac = (
mac or db_base_plugin_common.DbBasePluginCommon._generate_mac())
self.port_id = port_id or uuidutils.generate_uuid()
@abc.abstractmethod
def _create_bridge_fixture(self):
pass
@abc.abstractmethod
def _setUp(self):
super(PortFixture, self)._setUp()
if not self.bridge:
self.bridge = self.useFixture(self._create_bridge_fixture()).bridge
@classmethod
def get(cls, bridge, namespace=None, mac=None, port_id=None):
"""Deduce PortFixture class from bridge type and instantiate it."""
if isinstance(bridge, ovs_lib.OVSBridge):
return OVSPortFixture(bridge, namespace, mac, port_id)
if isinstance(bridge, bridge_lib.BridgeDevice):
return LinuxBridgePortFixture(bridge, namespace)
if isinstance(bridge, VethBridge):
return VethPortFixture(bridge, namespace)
tools.fail('Unexpected bridge type: %s' % type(bridge))
class OVSBridgeFixture(fixtures.Fixture):
"""Create an OVS bridge.
:ivar prefix: bridge name prefix
:type prefix: str
:ivar bridge: created bridge
:type bridge: OVSBridge
"""
def __init__(self, prefix=BR_PREFIX):
super(OVSBridgeFixture, self).__init__()
self.prefix = prefix
def _setUp(self):
ovs = ovs_lib.BaseOVS()
self.bridge = common_base.create_resource(self.prefix, ovs.add_bridge)
self.addCleanup(self.bridge.destroy)
class OVSPortFixture(PortFixture):
def _create_bridge_fixture(self):
return OVSBridgeFixture()
def _setUp(self):
super(OVSPortFixture, self)._setUp()
interface_config = cfg.ConfigOpts()
interface_config.register_opts(interface.OPTS)
ovs_interface = interface.OVSInterfaceDriver(interface_config)
port_name = tests_base.get_rand_device_name(PORT_PREFIX)
ovs_interface.plug_new(
None,
self.port_id,
port_name,
self.mac,
bridge=self.bridge.br_name,
namespace=self.namespace)
self.addCleanup(self.bridge.delete_port, port_name)
self.port = ip_lib.IPDevice(port_name, self.namespace)
class LinuxBridgeFixture(fixtures.Fixture):
"""Create a linux bridge.
:ivar bridge: created bridge
:type bridge: BridgeDevice
:ivar namespace: created bridge namespace
:type namespace: str
"""
def __init__(self, prefix=BR_PREFIX, namespace=UNDEFINED):
super(LinuxBridgeFixture, self).__init__()
self.prefix = prefix
self.namespace = namespace
def _setUp(self):
if self.namespace is UNDEFINED:
self.namespace = self.useFixture(NamespaceFixture()).name
self.bridge = common_base.create_resource(
self.prefix,
bridge_lib.BridgeDevice.addbr,
namespace=self.namespace)
self.addCleanup(self.bridge.delbr)
self.bridge.link.set_up()
self.addCleanup(self.bridge.link.set_down)
class LinuxBridgePortFixture(PortFixture):
"""Create a linux bridge port.
:ivar port: created port
:type port: IPDevice
:ivar br_port: bridge side veth peer port
:type br_port: IPDevice
"""
def _create_bridge_fixture(self):
return LinuxBridgeFixture()
def _setUp(self):
super(LinuxBridgePortFixture, self)._setUp()
self.port, self.br_port = self.useFixture(VethFixture()).ports
# bridge side
br_ip_wrapper = ip_lib.IPWrapper(self.bridge.namespace)
br_ip_wrapper.add_device_to_namespace(self.br_port)
self.bridge.addif(self.br_port)
self.br_port.link.set_up()
# port side
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
class VethBridge(object):
def __init__(self, ports):
self.ports = ports
self.unallocated_ports = set(self.ports)
def allocate_port(self):
try:
return self.unallocated_ports.pop()
except KeyError:
tools.fail('All FakeBridge ports (%s) are already allocated.' %
len(self.ports))
class VethBridgeFixture(fixtures.Fixture):
"""Simulate a bridge with a veth.
:ivar bridge: created bridge
:type bridge: FakeBridge
"""
def _setUp(self):
ports = self.useFixture(VethFixture()).ports
self.bridge = VethBridge(ports)
class VethPortFixture(PortFixture):
"""Create a veth bridge port.
:ivar port: created port
:type port: IPDevice
"""
def _create_bridge_fixture(self):
return VethBridgeFixture()
def _setUp(self):
super(VethPortFixture, self)._setUp()
self.port = self.bridge.allocate_port()
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
|
|
# ActivitySim
# See full license in LICENSE.txt.
import logging
import numpy as np
import pandas as pd
from activitysim.core import logit
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import tracing
from activitysim.core import chunk
from activitysim.core import pipeline
from activitysim.core import expressions
from activitysim.core import simulate
from .util import estimation
logger = logging.getLogger(__name__)
PROBS_JOIN_COLUMNS = ['primary_purpose', 'outbound', 'person_type']
def map_coefficients(spec, coefficients):
if isinstance(coefficients, pd.DataFrame):
assert ('value' in coefficients.columns)
coefficients = coefficients['value'].to_dict()
assert isinstance(coefficients, dict), \
"map_coefficients doesn't grok type of coefficients: %s" % (type(coefficients))
for c in spec.columns:
if c == simulate.SPEC_LABEL_NAME:
continue
spec[c] = spec[c].map(coefficients).astype(np.float32)
assert not spec.isnull().any()
return spec
def choose_intermediate_trip_purpose(trips, probs_spec, estimator, trace_hh_id, trace_label):
"""
chose purpose for intermediate trips based on probs_spec
which assigns relative weights (summing to 1) to the possible purpose choices
Returns
-------
purpose: pandas.Series of purpose (str) indexed by trip_id
"""
non_purpose_cols = PROBS_JOIN_COLUMNS + ['depart_range_start', 'depart_range_end']
purpose_cols = [c for c in probs_spec.columns if c not in non_purpose_cols]
num_trips = len(trips.index)
have_trace_targets = trace_hh_id and tracing.has_trace_targets(trips)
# probs should sum to 1 across rows
sum_probs = probs_spec[purpose_cols].sum(axis=1)
probs_spec.loc[:, purpose_cols] = probs_spec.loc[:, purpose_cols].div(sum_probs, axis=0)
# left join trips to probs (there may be multiple rows per trip for multiple depart ranges)
choosers = pd.merge(trips.reset_index(), probs_spec, on=PROBS_JOIN_COLUMNS,
how='left').set_index('trip_id')
chunk.log_df(trace_label, 'choosers', choosers)
# select the matching depart range (this should result on in exactly one chooser row per trip)
chooser_probs = \
(choosers.start >= choosers['depart_range_start']) & (choosers.start <= choosers['depart_range_end'])
# if we failed to match a row in probs_spec
if chooser_probs.sum() < num_trips:
# this can happen if the spec doesn't have probs for the trips matching a trip's probs_join_cols
missing_trip_ids = trips.index[~trips.index.isin(choosers.index[chooser_probs])].values
unmatched_choosers = choosers[choosers.index.isin(missing_trip_ids)]
unmatched_choosers = unmatched_choosers[['person_id', 'start'] + non_purpose_cols]
# join to persons for better diagnostics
persons = inject.get_table('persons').to_frame()
persons_cols = ['age', 'is_worker', 'is_student', 'is_gradeschool', 'is_highschool', 'is_university']
unmatched_choosers = pd.merge(unmatched_choosers, persons[persons_cols],
left_on='person_id', right_index=True, how='left')
file_name = '%s.UNMATCHED_PROBS' % trace_label
logger.error("%s %s of %s intermediate trips could not be matched to probs based on join columns %s" %
(trace_label, len(unmatched_choosers), len(choosers), PROBS_JOIN_COLUMNS))
logger.info("Writing %s unmatched choosers to %s" % (len(unmatched_choosers), file_name,))
tracing.write_csv(unmatched_choosers, file_name=file_name, transpose=False)
raise RuntimeError("Some trips could not be matched to probs based on join columns %s." % PROBS_JOIN_COLUMNS)
# select the matching depart range (this should result on in exactly one chooser row per trip)
choosers = choosers[chooser_probs]
# choosers should now match trips row for row
assert choosers.index.identical(trips.index)
if estimator:
probs_cols = list(probs_spec.columns)
print(choosers[probs_cols])
estimator.write_table(choosers[probs_cols], 'probs', append=True)
choices, rands = logit.make_choices(
choosers[purpose_cols],
trace_label=trace_label, trace_choosers=choosers)
if have_trace_targets:
tracing.trace_df(choices, '%s.choices' % trace_label, columns=[None, 'trip_purpose'])
tracing.trace_df(rands, '%s.rands' % trace_label, columns=[None, 'rand'])
choices = choices.map(pd.Series(purpose_cols))
return choices
def run_trip_purpose(
trips_df,
estimator,
chunk_size,
trace_hh_id,
trace_label):
"""
trip purpose - main functionality separated from model step so it can be called iteratively
For each intermediate stop on a tour (i.e. trip other than the last trip outbound or inbound)
Each trip is assigned a purpose based on an observed frequency distribution
The distribution is segmented by tour purpose, tour direction, person type,
and, optionally, trip depart time .
Returns
-------
purpose: pandas.Series of purpose (str) indexed by trip_id
"""
# uniform across trip_purpose
chunk_tag = 'trip_purpose'
model_settings_file_name = 'trip_purpose.yaml'
model_settings = config.read_model_settings(model_settings_file_name)
spec_file_name = model_settings.get('PROBS_SPEC', 'trip_purpose_probs.csv')
probs_spec = pd.read_csv(config.config_file_path(spec_file_name), comment='#')
# FIXME for now, not really doing estimation for probabilistic model - just overwriting choices
# besides, it isn't clear that named coefficients would be helpful if we had some form of estimation
# coefficients_df = simulate.read_model_coefficients(model_settings)
# probs_spec = map_coefficients(probs_spec, coefficients_df)
if estimator:
estimator.write_spec(model_settings, tag='PROBS_SPEC')
estimator.write_model_settings(model_settings, model_settings_file_name)
# estimator.write_coefficients(coefficients_df, model_settings)
result_list = []
# - last trip of outbound tour gets primary_purpose
last_trip = (trips_df.trip_num == trips_df.trip_count)
purpose = trips_df.primary_purpose[last_trip & trips_df.outbound]
result_list.append(purpose)
logger.info("assign purpose to %s last outbound trips", purpose.shape[0])
# - last trip of inbound tour gets home (or work for atwork subtours)
purpose = trips_df.primary_purpose[last_trip & ~trips_df.outbound]
# FIXME should be lower case for consistency?
purpose = pd.Series(np.where(purpose == 'atwork', 'Work', 'Home'), index=purpose.index)
result_list.append(purpose)
logger.info("assign purpose to %s last inbound trips", purpose.shape[0])
# - intermediate stops (non-last trips) purpose assigned by probability table
trips_df = trips_df[~last_trip]
logger.info("assign purpose to %s intermediate trips", trips_df.shape[0])
preprocessor_settings = model_settings.get('preprocessor', None)
if preprocessor_settings:
locals_dict = config.get_model_constants(model_settings)
expressions.assign_columns(
df=trips_df,
model_settings=preprocessor_settings,
locals_dict=locals_dict,
trace_label=trace_label)
for i, trips_chunk, chunk_trace_label in \
chunk.adaptive_chunked_choosers(trips_df, chunk_size, chunk_tag, trace_label):
choices = choose_intermediate_trip_purpose(
trips_chunk,
probs_spec,
estimator,
trace_hh_id,
trace_label=chunk_trace_label)
result_list.append(choices)
chunk.log_df(trace_label, f'result_list', result_list)
if len(result_list) > 1:
choices = pd.concat(result_list)
return choices
@inject.step()
def trip_purpose(
trips,
chunk_size,
trace_hh_id):
"""
trip purpose model step - calls run_trip_purpose to run the actual model
adds purpose column to trips
"""
trace_label = "trip_purpose"
trips_df = trips.to_frame()
estimator = estimation.manager.begin_estimation('trip_purpose')
if estimator:
chooser_cols_for_estimation = ['person_id', 'household_id', 'tour_id', 'trip_num']
estimator.write_choosers(trips_df[chooser_cols_for_estimation])
choices = run_trip_purpose(
trips_df,
estimator,
chunk_size=chunk_size,
trace_hh_id=trace_hh_id,
trace_label=trace_label
)
if estimator:
estimator.write_choices(choices)
choices = estimator.get_survey_values(choices, 'trips', 'purpose') # override choices
estimator.write_override_choices(choices)
estimator.end_estimation()
trips_df['purpose'] = choices
# we should have assigned a purpose to all trips
assert not trips_df.purpose.isnull().any()
pipeline.replace_table("trips", trips_df)
if trace_hh_id:
tracing.trace_df(trips_df,
label=trace_label,
slicer='trip_id',
index_label='trip_id',
warn_if_empty=True)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""for_loop and pfor ops."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.parallel_for.pfor import PFor
from tensorflow.python.ops.parallel_for.pfor import PForConfig
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None):
"""Runs `loop_fn` `iters` times and stacks the outputs.
Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and
stacks corresponding outputs of the different runs.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and returns a possibly nested structure of tensor
objects. The shape of these outputs should not depend on the input.
loop_fn_dtypes: dtypes for the outputs of loop_fn.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: The number of iterations that can be dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked output tensor objects with the same
nested structure as the output of `loop_fn`.
"""
flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)
is_none_list = []
def while_body(i, *ta_list):
"""Body of while loop."""
fn_output = nest.flatten(loop_fn(i))
if len(fn_output) != len(flat_loop_fn_dtypes):
raise ValueError(
"Number of expected outputs, %d, does not match the number of "
"actual outputs, %d, from loop_fn" % (len(flat_loop_fn_dtypes),
len(fn_output)))
outputs = []
del is_none_list[:]
is_none_list.extend([x is None for x in fn_output])
for out, ta in zip(fn_output, ta_list):
# TODO(agarwal): support returning Operation objects from loop_fn.
if out is not None:
# out may be a ref tensor, wrap it in identity to get a non-ref tensor.
ta = ta.write(i, array_ops.expand_dims(out, 0))
outputs.append(ta)
return tuple([i + 1] + outputs)
if parallel_iterations is not None:
extra_args = {"parallel_iterations": parallel_iterations}
else:
extra_args = {}
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters,
while_body,
[0] + [tensor_array_ops.TensorArray(dtype.base_dtype, iters)
for dtype in flat_loop_fn_dtypes],
**extra_args)[1:]
# TODO(rachelim): enable this for sparse tensors
output = [None if is_none else ta.concat()
for ta, is_none in zip(ta_list, is_none_list)]
assert len(output) in (0, len(flat_loop_fn_dtypes))
if not output:
# This may happen for the case where iters == 0.
return None
else:
return nest.pack_sequence_as(loop_fn_dtypes, output)
def _flatten_first_two_dims(x):
"""Flattens the first two dimensions of x into a single dimension."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[old_shape[0] * old_shape[1]], old_shape[2:]],
axis=0)
return array_ops.reshape(x, new_shape)
PFOR_CONFIG_ARG = "pfor_config"
def _is_under_xla_context():
"""Check if we are currently inside an XLA compile context."""
g = ops.get_default_graph()
while g is not None:
control_flow_context = g._get_control_flow_context() # pylint: disable=protected-access
while control_flow_context is not None:
if control_flow_context.IsXLAContext():
return True
else:
control_flow_context = control_flow_context.outer_context
# If g is a FuncGraph, get its outer_graph.
g = getattr(g, "outer_graph", None)
return False
def pfor(loop_fn, iters, parallel_iterations=None):
"""Equivalent to running `loop_fn` `iters` times and stacking the outputs.
`pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters`
times, with input from 0 to `iters - 1`, and stacking corresponding output of
each iteration. However the implementation does not use a tf.while_loop.
Instead it adds new operations to the graph that collectively compute the same
value as what running `loop_fn` in a loop would compute.
This is an experimental feature and currently has a lot of limitations:
- There should be no data dependency between the different iterations. For
example, a future iteration should not depend on a value or side-effect of
a previous iteration.
- Stateful kernels may mostly not be supported since these often imply a
data dependency or ordering of the iterations. We do support a limited set
of such stateful kernels though (like RandomFoo, Variable operations like
reads, etc).
- Conversion works only on a limited set of kernels for which a converter
has been registered.
- loop_fn has limited support for control flow operations. tf.cond in
particular is not supported.
- `loop_fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of `loop_fn` outputs should not depend on the input
to loop_fn.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and optionally a keyword argument `pfor_config` set
to a PForConfig object. It returns a possibly nested structure of Tensor
or Operation objects. Note that if setting `parallel_iterations` argument
to something other than None, `loop_fn` may be called more than once
during graph construction. So it may need to avoid mutating global state.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None corresponds to
vectorizing all the iterations. If `parallel_iterations` is smaller than
`iters`, then chunks of at most that many iterations are dispatched in
sequence. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked tensor objects with the same nested
structure as the output of `loop_fn`.
Raises:
ValueError: If parallel_iterations is not None and not an integer > 1.
"""
def f():
return _pfor_impl(loop_fn, iters, parallel_iterations=parallel_iterations)
# Note that we wrap into a tf.function if in eager execution mode or under
# XLA compilation. The latter is so that we don't compile operations like
# tf.placeholder that are created by the loop body.
if context.executing_eagerly() or _is_under_xla_context():
f = function.defun(f)
return f()
def _loop_fn_has_config(loop_fn):
"""Test if `loop_fn` has a `pfor_config` argument."""
if tf_inspect.isfunction(loop_fn):
argspec = tf_inspect.getargspec(loop_fn)
return PFOR_CONFIG_ARG in argspec.args
elif isinstance(loop_fn, functools.partial):
fn = loop_fn.func
argspec = tf_inspect.getargspec(fn)
return (PFOR_CONFIG_ARG in argspec.args and
PFOR_CONFIG_ARG not in loop_fn.keywords)
else:
loop_class = tf_decorator.unwrap(loop_fn)[1]
if not hasattr(loop_class, "__call__"):
raise ValueError("loop_fn object did not have a __call__ method")
argspec = tf_inspect.getargspec(loop_class.__call__)
return PFOR_CONFIG_ARG in argspec.args
def _pfor_impl(loop_fn, iters, parallel_iterations=None, pfor_config=None):
"""Implementation of pfor."""
loop_fn_has_config = _loop_fn_has_config(loop_fn)
existing_ops = set(ops.get_default_graph().get_operations())
# Run the loop body
with ops.name_scope("loop_body"):
loop_var = array_ops.placeholder_with_default(0, shape=[])
if loop_fn_has_config:
if pfor_config is None:
pfor_config = PForConfig()
pfor_config._set_iters(iters) # pylint: disable=protected-access
loop_fn_outputs = loop_fn(loop_var, **{PFOR_CONFIG_ARG: pfor_config})
else:
assert pfor_config is None
loop_fn_outputs = loop_fn(loop_var)
# Convert outputs to Tensor if needed.
tmp_loop_fn_outputs = []
for loop_fn_output in nest.flatten(loop_fn_outputs):
if (loop_fn_output is not None and not isinstance(
loop_fn_output,
(ops.Operation, ops.Tensor, sparse_tensor.SparseTensor))):
if isinstance(loop_fn_output, indexed_slices.IndexedSlices):
logging.warn("Converting %s to a dense representation may make it slow."
" Alternatively, output the indices and values of the"
" IndexedSlices separately, and handle the vectorized"
" outputs directly." % loop_fn_output)
loop_fn_output = ops.convert_to_tensor(loop_fn_output)
tmp_loop_fn_outputs.append(loop_fn_output)
loop_fn_outputs = nest.pack_sequence_as(loop_fn_outputs, tmp_loop_fn_outputs)
new_ops = set(ops.get_default_graph().get_operations()) - existing_ops
iters = ops.convert_to_tensor(iters)
if parallel_iterations is not None:
if parallel_iterations < 1:
raise ValueError("parallel_iterations must be None or a positive integer")
if parallel_iterations == 1:
raise ValueError("Found parallel_iterations == 1. Use for_loop instead.")
iters_value = tensor_util.constant_value(iters)
if iters_value is not None and iters_value < parallel_iterations:
parallel_iterations = None
if parallel_iterations is None:
with ops.name_scope("pfor"):
converter = PFor(loop_var, iters, new_ops, pfor_config=pfor_config)
outputs = []
for loop_fn_output in nest.flatten(loop_fn_outputs):
outputs.append(converter.convert(loop_fn_output))
return nest.pack_sequence_as(loop_fn_outputs, outputs)
else:
if pfor_config is not None and pfor_config._has_reductions(): # pylint: disable=protected-access
raise ValueError("Setting parallel_iterations currently unsupported if"
" reductions across iterations are performed.")
num_tiled_iterations = iters // parallel_iterations
num_remaining_iterations = iters % parallel_iterations
# TODO(agarwal): Avoid calling loop_fn twice. Generate the loop body inside
# a tf.function and extract the graph from there to vectorize it.
with ops.name_scope("pfor_untiled"):
converter = PFor(loop_var, num_remaining_iterations, new_ops,
pfor_config=pfor_config)
remaining_outputs = []
flattened_loop_fn_outputs = nest.flatten(loop_fn_outputs)
for loop_fn_output in flattened_loop_fn_outputs:
remaining_outputs.append(converter.convert(loop_fn_output))
with ops.name_scope("pfor_tiled"):
loop_fn_dtypes = [ops.convert_to_tensor(x).dtype
for x in flattened_loop_fn_outputs]
def tiled_loop_body(j):
offset = j * parallel_iterations + num_remaining_iterations
def tiled_loop_fn(i, pfor_config=None):
if loop_fn_has_config:
return nest.flatten(loop_fn(i + offset, pfor_config=pfor_config))
else:
return nest.flatten(loop_fn(i + offset))
return _pfor_impl(
tiled_loop_fn, parallel_iterations, pfor_config=pfor_config)
tiled_outputs = for_loop(tiled_loop_body, loop_fn_dtypes,
num_tiled_iterations, parallel_iterations=1)
tiled_outputs = [_flatten_first_two_dims(y) for y in tiled_outputs]
with ops.name_scope("pfor"):
iters_value = tensor_util.constant_value(iters)
if iters_value is None or iters_value % parallel_iterations:
outputs = control_flow_ops.cond(
math_ops.equal(num_remaining_iterations, 0),
lambda: tiled_outputs,
lambda: [array_ops.concat([x, y], axis=0)
for x, y in zip(remaining_outputs, tiled_outputs)])
else:
outputs = tiled_outputs
return nest.pack_sequence_as(loop_fn_outputs, nest.flatten(outputs))
@tf_export("vectorized_map")
def vectorized_map(fn, elems):
"""Parallel map on the list of tensors unpacked from `elems` on dimension 0.
This method works similar to tf.map_fn but is optimized to run much faster,
possibly with a much larger memory footprint. The speedups are obtained by
vectorization (see https://arxiv.org/pdf/1903.04243.pdf). The idea behind
vectorization is to semantically launch all the invocations of `fn` in
parallel and fuse corresponding operations across all these invocations. This
fusion is done statically at graph generation time and the generated code is
often similar in performance to a manually fused version.
Because `tf.vectorized_map` fully parallelizes the batch, this method will
generally be significantly faster than using `tf.map_fn`, especially in eager
mode. However this is an experimental feature and currently has a lot of
limitations:
- There should be no data dependency between the different semantic
invocations of `fn`, i.e. it should be safe to map the elements of the
inputs in any order.
- Stateful kernels may mostly not be supported since these often imply a
data dependency. We do support a limited set of such stateful kernels
though (like RandomFoo, Variable operations like reads, etc).
- `fn` has limited support for control flow operations. `tf.cond` in
particular is not supported.
- `fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of any intermediate or output tensors in the
computation of `fn` should not depend on the input to `fn`.
Examples:
```python
def outer_product(a):
return tf.tensordot(a, a, 0)
batch_size = 100
a = tf.ones((batch_size, 32, 32))
c = tf.vectorized_map(outer_product, a)
assert c.shape == (batch_size, 32, 32, 32, 32)
```
```python
# Computing per-example gradients
batch_size = 10
num_features = 32
layer = tf.keras.layers.Dense(1)
def model_fn(arg):
with tf.GradientTape() as g:
inp, label = arg
inp = tf.expand_dims(inp, 0)
label = tf.expand_dims(label, 0)
prediction = layer(inp)
loss = tf.nn.l2_loss(label - prediction)
return g.gradient(loss, (layer.kernel, layer.bias))
inputs = tf.random.uniform([batch_size, num_features])
labels = tf.random.uniform([batch_size, 1])
per_example_gradients = tf.vectorized_map(model_fn, (inputs, labels))
assert per_example_gradients[0].shape == (batch_size, num_features, 1)
assert per_example_gradients[1].shape == (batch_size, 1)
```
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same (possibly nested) structure as `elems`, and returns a possibly
nested structure of Tensors and Operations, which may be different than
the structure of `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be mapped over by `fn`.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
"""
def loop_fn(i):
gathered_elems = nest.map_structure(lambda x: array_ops.gather(x, i), elems)
return fn(gathered_elems)
batch_size = None
first_elem = ops.convert_to_tensor(nest.flatten(elems)[0])
if first_elem.shape.rank is not None:
batch_size = first_elem.shape.as_list()[0]
if batch_size is None:
batch_size = array_ops.shape(first_elem)[0]
return pfor(loop_fn, batch_size)
|
|
import os
import imp
import json
from sqlalchemy import or_
from framework.db import models
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import DBPluginInterface
from framework.utils import FileOperations
TEST_GROUPS = ['web', 'network', 'auxiliary']
class PluginDB(BaseComponent, DBPluginInterface):
COMPONENT_NAME = "db_plugin"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.db = self.get_component("db")
self.error_handler = self.get_component("error_handler")
self.LoadTestGroups(self.config.select_user_or_default_config_path(
self.config.FrameworkConfigGet("WEB_TEST_GROUPS"), self.config.FrameworkConfigGet("WEB_PLUGIN_CONFIG_DIR")),
"web")
self.LoadTestGroups(self.config.select_user_or_default_config_path(
self.config.FrameworkConfigGet("NET_TEST_GROUPS"), self.config.FrameworkConfigGet("NET_PLUGIN_CONFIG_DIR")),
"network")
self.LoadTestGroups(self.config.select_user_or_default_config_path(
self.config.FrameworkConfigGet("AUX_TEST_GROUPS"), self.config.FrameworkConfigGet("AUX_PLUGIN_CONFIG_DIR")),
"auxiliary")
# After loading the test groups then load the plugins, because of many-to-one relationship
self.LoadFromFileSystem() # Load plugins :P
def init(self):
self.timer = self.get_component("timer")
def GetTestGroupsFromFile(self, file_path):
# This needs to be a list instead of a dictionary to preserve order in python < 2.7
TestGroups = []
ConfigFile = FileOperations.open(file_path, 'r').read().splitlines()
for line in ConfigFile:
if '#' == line[0]:
continue # Skip comments
try:
Code, Priority, Descrip, Hint, URL = line.strip().split(' | ')
except ValueError:
self.error_handler.FrameworkAbort("Problem in Test Groups file: '%s' -> Cannot parse line: %s" %
(file_path, line))
if len(Descrip) < 2:
Descrip = Hint
if len(Hint) < 2:
Hint = ""
TestGroups.append({'code': Code, 'priority': Priority, 'descrip': Descrip, 'hint': Hint, 'url': URL})
return TestGroups
def LoadTestGroups(self, test_groups_file, plugin_group):
TestGroups = self.GetTestGroupsFromFile(test_groups_file)
for group in TestGroups:
self.db.session.merge(
models.TestGroup(
code=group['code'],
priority=group['priority'],
descrip=group['descrip'],
hint=group['hint'],
url=group['url'],
group=plugin_group)
)
self.db.session.commit()
def LoadFromFileSystem(self):
"""Loads the plugins from the filesystem and updates their info.
Walks through each sub-directory of `PLUGINS_DIR`.
For each file, loads it thanks to the imp module.
Updates the database with the information for each plugin:
+ 'title': the title of the plugin
+ 'name': the name of the plugin
+ 'code': the internal code of the plugin
+ 'group': the group of the plugin (ex: web)
+ 'type': the type of the plugin (ex: active, passive, ...)
+ 'descrip': the description of the plugin
+ 'file': the filename of the plugin
+ 'internet_res': does the plugin use internet resources?
"""
# TODO: When the -t, -e or -o is given to OWTF command line, only load
# the specific plugins (and not all of them like below).
# Retrieve the list of the plugins (sorted) from the directory given by
# 'PLUGIN_DIR'.
plugins = []
for root, _, files in os.walk(self.config.FrameworkConfigGet('PLUGINS_DIR')):
plugins.extend([os.path.join(root, filename) for filename in files if filename.endswith('py')])
plugins = sorted(plugins)
# Retrieve the information of the plugin.
for plugin_path in plugins:
# Only keep the relative path to the plugin
plugin = plugin_path.replace(self.config.FrameworkConfigGet('PLUGINS_DIR'), '')
# TODO: Using os.path.sep might not be portable especially on
# Windows platform since it allows '/' and '\' in the path.
# Retrieve the group, the type and the file of the plugin.
chunks = plugin.split(os.path.sep)
# TODO: Ensure that the variables group, type and file exist when
# the length of chunks is less than 3.
if len(chunks) == 3:
group, type, file = chunks
# Retrieve the internal name and code of the plugin.
name, code = os.path.splitext(file)[0].split('@')
# Only load the plugin if in XXX_TEST_GROUPS configuration (e.g. web_testgroups.cfg)
if self.db.session.query(models.TestGroup).get(code) is None:
continue
# Load the plugin as a module.
filename, pathname, desc = imp.find_module(os.path.splitext(os.path.basename(plugin_path))[0],
[os.path.dirname(plugin_path)])
plugin_module = imp.load_module(os.path.splitext(file)[0], filename, pathname, desc)
# Try te retrieve the `attr` dictionary from the module and convert
# it to json in order to save it into the database.
attr = None
try:
attr = json.dumps(plugin_module.ATTR)
except AttributeError: # The plugin didn't define an attr dict.
pass
# Save the plugin into the database.
self.db.session.merge(
models.Plugin(
key='%s@%s' % (type, code),
group=group,
type=type,
title=name.title().replace('_', ' '),
name=name,
code=code,
file=file,
descrip=plugin_module.DESCRIPTION,
attr=attr
)
)
self.db.session.commit()
def DeriveTestGroupDict(self, obj):
if obj:
pdict = dict(obj.__dict__)
pdict.pop("_sa_instance_state")
return pdict
def DeriveTestGroupDicts(self, obj_list):
dict_list = []
for obj in obj_list:
dict_list.append(self.DeriveTestGroupDict(obj))
return dict_list
def GetTestGroup(self, code):
group = self.db.session.query(models.TestGroup).get(code)
return self.DeriveTestGroupDict(group)
def GetAllTestGroups(self):
test_groups = self.db.session.query(models.TestGroup).order_by(models.TestGroup.priority.desc()).all()
return self.DeriveTestGroupDicts(test_groups)
def GetAllGroups(self):
groups = self.db.session.query(models.Plugin.group).distinct().all()
groups = [i[0] for i in groups]
return groups
def GetAllTypes(self):
plugin_types = self.db.session.query(models.Plugin.type).distinct().all()
plugin_types = [i[0] for i in plugin_types] # Necessary because of sqlalchemy
return plugin_types
def GetTypesForGroup(self, PluginGroup):
plugin_types = self.db.session.query(models.Plugin.type).filter_by(group=PluginGroup).distinct().all()
plugin_types = [i[0] for i in plugin_types]
return plugin_types
def DerivePluginDict(self, obj):
if obj:
pdict = dict(obj.__dict__)
pdict.pop("_sa_instance_state")
# Remove outputs array if present
if "outputs" in pdict.keys():
pdict.pop("outputs")
pdict["min_time"] = None
min_time = obj.min_time
if min_time is not None:
pdict["min_time"] = self.timer.get_time_as_str(min_time)
return pdict
def DerivePluginDicts(self, obj_list):
plugin_dicts = []
for obj in obj_list:
plugin_dicts.append(self.DerivePluginDict(obj))
return plugin_dicts
def GenerateQueryUsingSession(self, criteria):
query = self.db.session.query(models.Plugin).join(models.TestGroup)
if criteria.get("type", None):
if isinstance(criteria["type"], (str, unicode)):
query = query.filter(models.Plugin.type == criteria["type"])
if isinstance(criteria["type"], list):
query = query.filter(models.Plugin.type.in_(criteria["type"]))
if criteria.get("group", None):
if isinstance(criteria["group"], (str, unicode)):
query = query.filter_by(group=criteria["group"])
if isinstance(criteria["group"], list):
query = query.filter(models.Plugin.group.in_(criteria["group"]))
if criteria.get("code", None):
if isinstance(criteria["code"], (str, unicode)):
query = query.filter_by(code=criteria["code"])
if isinstance(criteria["code"], list):
query = query.filter(models.Plugin.code.in_(criteria["code"]))
if criteria.get("name", None):
if isinstance(criteria["name"], (str, unicode)):
query = query.filter(models.Plugin.name == criteria["name"])
if isinstance(criteria["name"], list):
query = query.filter(models.Plugin.name.in_(criteria["name"]))
return query.order_by(models.TestGroup.priority.desc())
def PluginNametoCode(self, codes):
checklist = ["OWTF-", "PTES-"]
query = self.db.session.query(models.Plugin.code)
for count, name in enumerate(codes):
if all(check not in name for check in checklist):
code = query.filter(models.Plugin.name == name).first()
codes[count] = str(code[0])
return codes
def GetAll(self, Criteria={}):
if "code" in Criteria:
Criteria["code"] = self.PluginNametoCode(Criteria["code"])
query = self.GenerateQueryUsingSession(Criteria)
plugin_obj_list = query.all()
return self.DerivePluginDicts(plugin_obj_list)
def GetPluginsByType(self, PluginType):
return self.GetAll({"type": PluginType})
def GetPluginsByGroup(self, PluginGroup):
return self.GetAll({"group": PluginGroup})
def GetPluginsByGroupType(self, PluginGroup, PluginType):
return self.GetAll({"type": PluginType, "group": PluginGroup})
def GetGroupsForPlugins(self, Plugins):
groups = self.db.session.query(models.Plugin.group).filter(or_(models.Plugin.code.in_(Plugins),
models.Plugin.name.in_(Plugins))).distinct().all()
groups = [i[0] for i in groups]
return groups
|
|
# -*- coding: utf-8 -*-
import django
from django.test import SimpleTestCase
from django.test.utils import override_settings
from .forms import (
ChildModelFormset,
CustomNamingForm,
DefaultNamingForm,
FieldWidgetWithClassNameForm,
MixedNamingForm,
MultipleNamingForm,
WidgetsWidgetWithClassNameForm,
WithDataForm,
)
class TestWidget(SimpleTestCase):
def test_custom_naming(self):
html = CustomNamingForm().as_p()
self.assertIn('name="location"', html)
self.assertIn('data-lat-field="latitude"', html)
self.assertIn('data-lon-field="longitude"', html)
self.assertIn('name="latitude"', html)
self.assertIn('name="longitude"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_location").osmfield();});'
"</script>",
html,
)
def test_default_naming(self):
html = DefaultNamingForm().as_p()
self.assertIn('name="location"', html)
self.assertIn('data-lat-field="location_lat"', html)
self.assertIn('data-lon-field="location_lon"', html)
self.assertIn('name="location_lat"', html)
self.assertIn('name="location_lon"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_location").osmfield();});'
"</script>",
html,
)
def test_mixed_naming(self):
html = MixedNamingForm().as_p()
self.assertIn('name="location"', html)
self.assertIn('data-lat-field="location_lat"', html)
self.assertIn('data-lon-field="longitude"', html)
self.assertIn('name="location_lat"', html)
self.assertIn('name="longitude"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_location").osmfield();});'
"</script>",
html,
)
def test_multiple_naming(self):
html = MultipleNamingForm().as_p()
self.assertIn('name="default_location"', html)
self.assertIn('data-lat-field="default_location_lat"', html)
self.assertIn('data-lon-field="default_location_lon"', html)
self.assertIn('name="default_location_lat"', html)
self.assertIn('name="default_location_lon"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_default_location").osmfield();});'
"</script>",
html,
)
self.assertIn('name="custom_location"', html)
self.assertIn('data-lat-field="custom_latitude"', html)
self.assertIn('data-lon-field="custom_longitude"', html)
self.assertIn('name="custom_latitude"', html)
self.assertIn('name="custom_longitude"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_custom_location").osmfield();});'
"</script>",
html,
)
def test_field_widget_contains_class(self):
html = FieldWidgetWithClassNameForm().as_p()
self.assertIn('class="custom-class osmfield"', html)
def test_widgets_widget_contains_class(self):
html = WidgetsWidgetWithClassNameForm().as_p()
self.assertIn('class="custom-class osmfield"', html)
def test_widget_prefix_in_formset(self):
html = ChildModelFormset().as_p()
# Check for form 0
self.assertIn('id="id_children-0-location"', html)
self.assertIn('name="children-0-location"', html)
self.assertIn('data-lat-field="children-0-location_lat"', html)
self.assertIn('data-lon-field="children-0-location_lon"', html)
self.assertIn('id="id_children-0-location_lat"', html)
self.assertIn('id="id_children-0-location_lon"', html)
self.assertIn('name="children-0-location_lat"', html)
self.assertIn('name="children-0-location_lon"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_children-0-location").osmfield();});'
"</script>",
html,
)
# Check for form 1
self.assertIn('id="id_children-1-location"', html)
self.assertIn('name="children-1-location"', html)
self.assertIn('data-lat-field="children-1-location_lat"', html)
self.assertIn('data-lon-field="children-1-location_lon"', html)
self.assertIn('id="id_children-1-location_lat"', html)
self.assertIn('id="id_children-1-location_lon"', html)
self.assertIn('name="children-1-location_lat"', html)
self.assertIn('name="children-1-location_lon"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_children-1-location").osmfield();});'
"</script>",
html,
)
def test_widget_location_data_field(self):
html = WithDataForm().as_p()
self.assertIn('name="location"', html)
self.assertIn('data-lat-field="latitude"', html)
self.assertIn('data-lon-field="longitude"', html)
self.assertIn('data-data-field="location_data"', html)
self.assertIn('name="latitude"', html)
self.assertIn('name="longitude"', html)
self.assertIn('name="location_data"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_location").osmfield();});'
"</script>",
html,
)
class TestMedia(SimpleTestCase):
@override_settings(DEBUG=True)
def test_css_debug(self):
css = DefaultNamingForm().media.render_css()
self.assertIn(
'<link href="css/vendor/leaflet.css" type="text/css" media="screen" '
'rel="stylesheet"',
next(css),
)
self.assertIn(
'<link href="css/osm_field.css" type="text/css" media="screen" '
'rel="stylesheet"',
next(css),
)
def test_css_no_debug(self):
css = DefaultNamingForm().media.render_css()
self.assertIn(
'<link href="css/vendor/leaflet.css" type="text/css" media="screen" '
'rel="stylesheet"',
next(css),
)
self.assertIn(
'<link href="css/osm_field.min.css" type="text/css" media="screen" '
'rel="stylesheet"',
next(css),
)
@override_settings(DEBUG=True)
def test_js_debug(self):
js = DefaultNamingForm().media.render_js()
if django.VERSION[:2] >= (3, 1):
self.assertEqual(
'<script src="js/vendor/leaflet.js"></script>'
'<script src="js/osm_field.js"></script>',
"".join(js),
)
else:
self.assertEqual(
'<script type="text/javascript" src="js/vendor/leaflet.js"></script>'
'<script type="text/javascript" src="js/osm_field.js"></script>',
"".join(js),
)
def test_js_no_debug(self):
js = DefaultNamingForm().media.render_js()
if django.VERSION[:2] >= (3, 1):
self.assertEqual(
'<script src="js/vendor/leaflet.js"></script>'
'<script src="js/osm_field.min.js"></script>',
"".join(js),
)
else:
self.assertEqual(
'<script type="text/javascript" src="js/vendor/leaflet.js"></script>'
'<script type="text/javascript" src="js/osm_field.min.js"></script>',
"".join(js),
)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
from neutronclient.common import exceptions as neutron_client_exc
from oslo.config import cfg
from nova.api.openstack import extensions
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import conductor
from nova import exception
from nova.i18n import _, _LE, _LW
from nova.network import base_api
from nova.network import model as network_model
from nova.network import neutronv2
from nova.network.neutronv2 import constants
from nova.network.security_group import openstack_driver
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
neutron_opts = [
cfg.StrOpt('url',
default='http://127.0.0.1:9696',
help='URL for connecting to neutron',
deprecated_group='DEFAULT',
deprecated_name='neutron_url'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds',
deprecated_group='DEFAULT',
deprecated_name='neutron_url_timeout'),
cfg.StrOpt('admin_user_id',
help='User id for connecting to neutron in admin context'),
cfg.StrOpt('admin_username',
help='Username for connecting to neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_username'),
cfg.StrOpt('admin_password',
help='Password for connecting to neutron in admin context',
secret=True,
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_password'),
cfg.StrOpt('admin_tenant_id',
help='Tenant id for connecting to neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_tenant_id'),
cfg.StrOpt('admin_tenant_name',
help='Tenant name for connecting to neutron in admin context. '
'This option will be ignored if neutron_admin_tenant_id '
'is set. Note that with Keystone V3 tenant names are '
'only unique within a domain.',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_tenant_name'),
cfg.StrOpt('region_name',
help='Region name for connecting to neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_region_name'),
cfg.StrOpt('admin_auth_url',
default='http://localhost:5000/v2.0',
help='Authorization URL for connecting to neutron in admin '
'context',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_auth_url'),
cfg.BoolOpt('api_insecure',
default=False,
help='If set, ignore any SSL validation issues',
deprecated_group='DEFAULT',
deprecated_name='neutron_api_insecure'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Authorization strategy for connecting to '
'neutron in admin context',
deprecated_group='DEFAULT',
deprecated_name='neutron_auth_strategy'),
# TODO(berrange) temporary hack until Neutron can pass over the
# name of the OVS bridge it is configured with
cfg.StrOpt('ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch',
deprecated_group='DEFAULT',
deprecated_name='neutron_ovs_bridge'),
cfg.IntOpt('extension_sync_interval',
default=600,
help='Number of seconds before querying neutron for'
' extensions',
deprecated_group='DEFAULT',
deprecated_name='neutron_extension_sync_interval'),
cfg.StrOpt('ca_certificates_file',
help='Location of CA certificates file to use for '
'neutron client requests.',
deprecated_group='DEFAULT',
deprecated_name='neutron_ca_certificates_file'),
cfg.BoolOpt('allow_duplicate_networks',
default=False,
help='Allow an instance to have multiple vNICs attached to '
'the same Neutron network.'),
]
CONF = cfg.CONF
# neutron_opts options in the DEFAULT group were deprecated in Juno
CONF.register_opts(neutron_opts, 'neutron')
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
soft_external_network_attach_authorize = extensions.soft_core_authorizer(
'network', 'attach_external_network')
class API(base_api.NetworkAPI):
"""API for interacting with the neutron 2.x API."""
def __init__(self):
super(API, self).__init__()
self.last_neutron_extension_sync = None
self.extensions = {}
self.conductor_api = conductor.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures."""
def _get_available_networks(self, context, project_id,
net_ids=None, neutron=None):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
if not neutron:
neutron = neutronv2.get_client(context)
if net_ids:
# If user has specified to attach instance only to specific
# networks then only add these to **search_opts. This search will
# also include 'shared' networks.
search_opts = {'id': net_ids}
nets = neutron.list_networks(**search_opts).get('networks', [])
else:
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {'tenant_id': project_id, 'shared': False}
nets = neutron.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
nets += neutron.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
if not soft_external_network_attach_authorize(context):
for net in nets:
# Perform this check here rather than in validate_networks to
# ensure the check is performed every time
# allocate_for_instance is invoked
if net.get('router:external'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
return nets
def _create_port(self, port_client, instance, network_id, port_req_body,
fixed_ip=None, security_group_ids=None,
available_macs=None, dhcp_opts=None):
"""Attempts to create a port for the instance on the given network.
:param port_client: The client to use to create the port.
:param instance: Create the port for the given instance.
:param network_id: Create the port on the given network.
:param port_req_body: Pre-populated port request. Should have the
device_id, device_owner, and any required neutron extension values.
:param fixed_ip: Optional fixed IP to use from the given network.
:param security_group_ids: Optional list of security group IDs to
apply to the port.
:param available_macs: Optional set of available MAC addresses to use.
:param dhcp_opts: Optional DHCP options.
:returns: ID of the created port.
:raises PortLimitExceeded: If neutron fails with an OverQuota error.
:raises NoMoreFixedIps: If neutron fails with
IpAddressGenerationFailure error.
"""
try:
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address': fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
if security_group_ids:
port_req_body['port']['security_groups'] = security_group_ids
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance['uuid'])
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
if dhcp_opts is not None:
port_req_body['port']['extra_dhcp_opts'] = dhcp_opts
port_id = port_client.create_port(port_req_body)['port']['id']
LOG.debug('Successfully created port: %s', port_id,
instance=instance)
return port_id
except neutron_client_exc.OverQuotaClient:
LOG.warning(_LW(
'Neutron error: Port quota exceeded in tenant: %s'),
port_req_body['port']['tenant_id'], instance=instance)
raise exception.PortLimitExceeded()
except neutron_client_exc.IpAddressGenerationFailureClient:
LOG.warning(_LW('Neutron error: No more fixed IPs in network: %s'),
network_id, instance=instance)
raise exception.NoMoreFixedIps()
except neutron_client_exc.MacAddressInUseClient:
LOG.warning(_LW('Neutron error: MAC address %(mac)s is already '
'in use on network %(network)s.') %
{'mac': mac_address, 'network': network_id},
instance=instance)
raise exception.PortInUse(port_id=mac_address)
except neutron_client_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Neutron error creating port on network %s'),
network_id, instance=instance)
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate network resources for the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param requested_networks: optional value containing
network_id, fixed_ip, and port_id
:param security_groups: security groups to allocate for instance
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
NB: NeutronV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
"""
hypervisor_macs = kwargs.get('macs', None)
available_macs = None
if hypervisor_macs is not None:
# Make a copy we can mutate: records macs that have not been used
# to create a port on a network. If we find a mac with a
# pre-allocated port we also remove it from this set.
available_macs = set(hypervisor_macs)
neutron = neutronv2.get_client(context)
LOG.debug('allocate_for_instance()', instance=instance)
if not instance.project_id:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance.uuid)
requested_networks = kwargs.get('requested_networks')
dhcp_opts = kwargs.get('dhcp_options', None)
ports = {}
net_ids = []
ordered_networks = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
port = neutron.show_port(port_id)['port']
if port.get('device_id'):
raise exception.PortInUse(port_id=port_id)
if hypervisor_macs is not None:
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
instance=instance.uuid)
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(port['mac_address'])
network_id = port['network_id']
ports[port_id] = port
if network_id:
net_ids.append(network_id)
ordered_networks.append((network_id, fixed_ip, port_id))
nets = self._get_available_networks(context, instance.project_id,
net_ids)
if not nets:
LOG.warn(_LW("No network configured!"), instance=instance)
return network_model.NetworkInfo([])
# if this function is directly called without a requested_network param
# or if it is indirectly called through allocate_port_for_instance()
# with None params=(network_id=None, requested_ip=None, port_id=None):
if (not requested_networks
or requested_networks == [(None, None, None)]):
# bug/1267723 - if no network is requested and more
# than one is available then raise NetworkAmbiguous Exception
if len(nets) > 1:
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
ordered_networks.append((nets[0]['id'], None, None))
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance.project_id}
user_security_groups = neutron.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
if name_match:
raise exception.NoUniqueMatch(
_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific.") %
security_group)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if not name_match and not uuid_match:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
elif name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
touched_port_ids = []
created_port_ids = []
ports_in_requested_order = []
nets_in_requested_order = []
for network_id, fixed_ip, port_id in ordered_networks:
# Network lookup for available network_id
network = None
for net in nets:
if net['id'] == network_id:
network = net
break
# if network_id did not pass validate_networks() and not available
# here then skip it safely not continuing with a None Network
else:
continue
nets_in_requested_order.append(network)
# If security groups are requested on an instance then the
# network must has a subnet associated with it. Some plugins
# implement the port-security extension which requires
# 'port_security_enabled' to be True for security groups.
# That is why True is returned if 'port_security_enabled'
# is not found.
if (security_groups and not (
network['subnets']
and network.get('port_security_enabled', True))):
raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
zone = 'compute:%s' % instance.availability_zone
port_req_body = {'port': {'device_id': instance.uuid,
'device_owner': zone}}
try:
self._populate_neutron_extension_values(context, instance,
port_req_body)
# Requires admin creds to set port bindings
port_client = (neutron if not
self._has_port_binding_extension(context) else
neutronv2.get_client(context, admin=True))
if port_id:
port = ports[port_id]
port_client.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
ports_in_requested_order.append(port['id'])
else:
created_port = self._create_port(
port_client, instance, network_id,
port_req_body, fixed_ip,
security_group_ids, available_macs, dhcp_opts)
created_port_ids.append(created_port)
ports_in_requested_order.append(created_port)
except Exception:
with excutils.save_and_reraise_exception():
for port_id in touched_port_ids:
try:
port_req_body = {'port': {'device_id': ''}}
# Requires admin creds to set port bindings
if self._has_port_binding_extension(context):
port_req_body['port']['binding:host_id'] = None
port_client = neutronv2.get_client(
context, admin=True)
else:
port_client = neutron
port_client.update_port(port_id, port_req_body)
except Exception:
msg = _LE("Failed to update port %s")
LOG.exception(msg, port_id)
for port_id in created_port_ids:
try:
neutron.delete_port(port_id)
except Exception:
msg = _LE("Failed to delete port %s")
LOG.exception(msg, port_id)
nw_info = self.get_instance_nw_info(context, instance,
networks=nets_in_requested_order,
port_ids=ports_in_requested_order)
# NOTE(danms): Only return info about ports we created in this run.
# In the initial allocation case, this will be everything we created,
# and in later runs will only be what was created that time. Thus,
# this only affects the attach case, not the original use for this
# method.
return network_model.NetworkInfo([vif for vif in nw_info
if vif['id'] in created_port_ids +
touched_port_ids])
def _refresh_neutron_extensions_cache(self, context):
"""Refresh the neutron extensions cache when necessary."""
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync)
>= CONF.neutron.extension_sync_interval)):
neutron = neutronv2.get_client(context)
extensions_list = neutron.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = dict((ext['name'], ext)
for ext in extensions_list)
def _has_port_binding_extension(self, context, refresh_cache=False):
if refresh_cache:
self._refresh_neutron_extensions_cache(context)
return constants.PORTBINDING_EXT in self.extensions
def _populate_neutron_extension_values(self, context, instance,
port_req_body):
"""Populate neutron extension values for the instance.
If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor.
"""
self._refresh_neutron_extensions_cache(context)
if constants.QOS_QUEUE in self.extensions:
flavor = flavors.extract_flavor(instance)
rxtx_factor = flavor.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
if self._has_port_binding_extension(context):
port_req_body['port']['binding:host_id'] = instance.get('host')
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug('deallocate_for_instance()', instance=instance)
search_opts = {'device_id': instance.uuid}
neutron = neutronv2.get_client(context)
data = neutron.list_ports(**search_opts)
ports = [port['id'] for port in data.get('ports', [])]
requested_networks = kwargs.get('requested_networks') or {}
ports_to_skip = [port_id for nets, fips, port_id in requested_networks]
ports = set(ports) - set(ports_to_skip)
# Reset device_id and device_owner for the ports that are skipped
for port in ports_to_skip:
port_req_body = {'port': {'device_id': '', 'device_owner': ''}}
try:
neutronv2.get_client(context).update_port(port,
port_req_body)
except Exception:
LOG.info(_('Unable to reset device ID for port %s'), port,
instance=instance)
for port in ports:
try:
neutron.delete_port(port)
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
LOG.warning(_LW("Port %s does not exist"), port)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to delete neutron port %s"),
port)
# NOTE(arosen): This clears out the network_cache only if the instance
# hasn't already been deleted. This is needed when an instance fails to
# launch and is rescheduled onto another compute node. If the instance
# has already been deleted this call does nothing.
base_api.update_instance_cache_with_nw_info(self, context, instance,
network_model.NetworkInfo([]))
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
"""Allocate a port for the instance."""
return self.allocate_for_instance(context, instance,
requested_networks=[(network_id, requested_ip, port_id)])
def deallocate_port_for_instance(self, context, instance, port_id):
"""Remove a specified port from the instance.
Return network information for the instance
"""
try:
neutronv2.get_client(context).delete_port(port_id)
except Exception:
LOG.exception(_LE("Failed to delete neutron port %s"),
port_id)
return self.get_instance_nw_info(context, instance)
def list_ports(self, context, **search_opts):
"""List ports for the client based on search options."""
return neutronv2.get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
"""Return the port for the client given the port id."""
return neutronv2.get_client(context).show_port(port_id)
def get_instance_nw_info(self, context, instance, networks=None,
port_ids=None, use_slave=False):
"""Return network information for specified instance
and update cache.
"""
# NOTE(geekinutah): It would be nice if use_slave had us call
# special APIs that pummeled slaves instead of
# the master. For now we just ignore this arg.
result = self._get_instance_nw_info(context, instance, networks,
port_ids)
base_api.update_instance_cache_with_nw_info(self, context, instance,
result, update_cells=False)
return result
def _get_instance_nw_info(self, context, instance, networks=None,
port_ids=None):
# keep this caching-free version of the get_instance_nw_info method
# because it is used by the caching logic itself.
LOG.debug('get_instance_nw_info()', instance=instance)
nw_info = self._build_network_info_model(context, instance, networks,
port_ids)
return network_model.NetworkInfo.hydrate(nw_info)
def _gather_port_ids_and_networks(self, context, instance, networks=None,
port_ids=None):
"""Return an instance's complete list of port_ids and networks."""
if ((networks is None and port_ids is not None) or
(port_ids is None and networks is not None)):
message = ("This method needs to be called with either "
"networks=None and port_ids=None or port_ids and "
" networks as not none.")
raise exception.NovaException(message=message)
ifaces = compute_utils.get_nw_info_for_instance(instance)
# This code path is only done when refreshing the network_cache
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
if networks is None:
networks = self._get_available_networks(context,
instance['project_id'],
net_ids)
# an interface was added/removed from instance.
else:
# Since networks does not contain the existing networks on the
# instance we use their values from the cache and add it.
networks = networks + [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
# Include existing interfaces so they are not removed from the db.
port_ids = [iface['id'] for iface in ifaces] + port_ids
return networks, port_ids
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed ip to the instance from specified network."""
search_opts = {'network_id': network_id}
data = neutronv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'network_id': network_id}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
return self._get_instance_nw_info(context, instance)
except Exception as ex:
msg = ("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex})
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed ip from the instance."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
except Exception as ex:
msg = ("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex})
return self._get_instance_nw_info(context, instance)
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks, num_instances):
"""Validate that the tenant can use the requested networks.
Return the number of instances than can be successfully allocated
with the requested network configuration.
"""
LOG.debug('validate_networks() for %s',
requested_networks)
neutron = neutronv2.get_client(context)
ports_needed_per_instance = 0
if not requested_networks:
nets = self._get_available_networks(context, context.project_id,
neutron=neutron)
if len(nets) > 1:
# Attaching to more than one network by default doesn't
# make sense, as the order will be arbitrary and the guest OS
# won't know which to configure
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
else:
ports_needed_per_instance = 1
else:
instance_on_net_ids = []
net_ids_requested = []
for (net_id, fixed_ip, port_id) in requested_networks:
if port_id:
try:
port = neutron.show_port(port_id).get('port')
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
port = None
else:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to access port %s"),
port_id)
if not port:
raise exception.PortNotFound(port_id=port_id)
if port.get('device_id', None):
raise exception.PortInUse(port_id=port_id)
if not port.get('fixed_ips'):
raise exception.PortRequiresFixedIP(port_id=port_id)
net_id = port['network_id']
else:
ports_needed_per_instance += 1
net_ids_requested.append(net_id)
# NOTE(jecarey) There is currently a race condition.
# That is, if you have more than one request for a specific
# fixed IP at the same time then only one will be allocated
# the ip. The fixed IP will be allocated to only one of the
# instances that will run. The second instance will fail on
# spawn. That instance will go into error state.
# TODO(jecarey) Need to address this race condition once we
# have the ability to update mac addresses in Neutron.
if fixed_ip:
# TODO(jecarey) Need to look at consolidating list_port
# calls once able to OR filters.
search_opts = {'network_id': net_id,
'fixed_ips': 'ip_address=%s' % fixed_ip,
'fields': 'device_id'}
existing_ports = neutron.list_ports(
**search_opts)['ports']
if existing_ports:
i_uuid = existing_ports[0]['device_id']
raise exception.FixedIpAlreadyInUse(
address=fixed_ip,
instance_uuid=i_uuid)
if (not CONF.neutron.allow_duplicate_networks and
net_id in instance_on_net_ids):
raise exception.NetworkDuplicated(network_id=net_id)
instance_on_net_ids.append(net_id)
# Now check to see if all requested networks exist
if net_ids_requested:
nets = self._get_available_networks(
context, context.project_id, net_ids_requested,
neutron=neutron)
for net in nets:
if not net.get('subnets'):
raise exception.NetworkRequiresSubnet(
network_uuid=net['id'])
if len(nets) != len(net_ids_requested):
requested_netid_set = set(net_ids_requested)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requested_netid_set - returned_netid_set
if lostid_set:
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
# Note(PhilD): Ideally Nova would create all required ports as part of
# network validation, but port creation requires some details
# from the hypervisor. So we just check the quota and return
# how many of the requested number of instances can be created
if ports_needed_per_instance:
ports = neutron.list_ports(tenant_id=context.project_id)['ports']
quotas = neutron.show_quota(tenant_id=context.project_id)['quota']
if quotas.get('port', -1) == -1:
# Unlimited Port Quota
return num_instances
else:
free_ports = quotas.get('port') - len(ports)
ports_needed = ports_needed_per_instance * num_instances
if free_ports >= ports_needed:
return num_instances
else:
return free_ports // ports_needed_per_instance
return num_instances
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given ip address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Return a list of dicts in the form of
[{'instance_uuid': uuid}] that matched the ip filter.
"""
# filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
ip = filters.get('ip')
# we remove ^$\ in the ip filer
if ip[0] == '^':
ip = ip[1:]
if ip[-1] == '$':
ip = ip[:-1]
ip = ip.replace('\\.', '.')
return self._get_instance_uuids_by_ip(context, ip)
def _get_port_id_by_fixed_address(self, client,
instance, address):
"""Return port_id from a fixed address."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating ip with a fixed ip."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
if fip['port_id']:
port = client.show_port(fip['port_id'])['port']
orig_instance_uuid = port['device_id']
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
def get_all(self, context):
"""Get all networks for client."""
client = neutronv2.get_client(context)
networks = client.list_networks().get('networks')
for network in networks:
network['label'] = network['name']
return networks
def get(self, context, network_uuid):
"""Get specific network for client."""
client = neutronv2.get_client(context)
try:
network = client.show_network(network_uuid).get('network') or {}
except neutron_client_exc.NetworkNotFoundClient:
raise exception.NetworkNotFound(network_id=network_uuid)
network['label'] = network['name']
return network
def delete(self, context, network_uuid):
"""Delete a network for client."""
raise NotImplementedError()
def disassociate(self, context, network_uuid):
"""Disassociate a network for client."""
raise NotImplementedError()
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate a network for client."""
raise NotImplementedError()
def get_fixed_ip(self, context, id):
"""Get a fixed ip from the id."""
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
"""Return instance uuids given an address."""
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, client, port_id):
if not port_id:
return {}
port = client.show_port(port_id)['port']
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
def get_floating_ip(self, context, id):
"""Return floating ip object given the floating ip id."""
client = neutronv2.get_client(context)
try:
fip = client.show_floatingip(id)['floatingip']
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
raise exception.FloatingIpNotFound(id=id)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to access floating IP %s'), id)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {constants.NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
"""Return floating ip pool names."""
client = neutronv2.get_client(context)
pools = self._get_floating_ip_pools(client)
# Note(salv-orlando): Return a list of names to be consistent with
# nova.network.api.get_floating_ip_pools
return [n['name'] or n['id'] for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
result = {'id': fip['id'],
'address': fip['floating_ip_address'],
'pool': pool['name'] or pool['id'],
'project_id': fip['tenant_id'],
# In Neutron v2, an exact fixed_ip_id does not exist.
'fixed_ip_id': fip['port_id'],
}
# In Neutron v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
result['fixed_ip'] = {'address': fip['fixed_ip_address']}
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
result['instance'] = {'uuid': instance_uuid}
else:
result['instance'] = None
return result
def get_floating_ip_by_address(self, context, address):
"""Return a floating ip given an address."""
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = neutronv2.get_client(context)
project_id = context.project_id
fips = client.list_floatingips(tenant_id=project_id)['floatingips']
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
raise NotImplementedError()
def get_instance_id_by_floating_address(self, context, address):
"""Return the instance id a floating ip's fixed ip is allocated to."""
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = client.show_port(fip['port_id'])['port']
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating ip to a project from a pool."""
client = neutronv2.get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
param = {'floatingip': {'floating_network_id': pool_id}}
try:
fip = client.create_floatingip(param)
except (neutron_client_exc.IpAddressGenerationFailureClient,
neutron_client_exc.ExternalIpAddressExhaustedClient) as e:
raise exception.NoMoreFloatingIps(unicode(e))
except neutron_client_exc.OverQuotaClient as e:
raise exception.FloatingIpLimitExceeded(unicode(e))
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
"""Get floatingip from floating ip address."""
if not address:
raise exception.FloatingIpNotFoundForAddress(address=address)
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floatingips from fixed ip and port."""
try:
data = client.list_floatingips(fixed_ip_address=fixed_ip,
port_id=port)
# If a neutron plugin does not implement the L3 API a 404 from
# list_floatingips will be raised.
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
return []
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unable to access floating IP %(fixed_ip)s '
'for port %(port_id)s'),
{'fixed_ip': fixed_ip, 'port_id': port})
return data['floatingips']
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating ip with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating ip from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
if not self._has_port_binding_extension(context, refresh_cache=True):
return
neutron = neutronv2.get_client(context, admin=True)
search_opts = {'device_id': instance['uuid'],
'tenant_id': instance['project_id']}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
port_req_body = {'port': {'binding:host_id':
migration['dest_compute']}}
try:
neutron.update_port(p['id'], port_req_body)
except Exception:
with excutils.save_and_reraise_exception():
msg = _LE("Unable to update host of port %s")
LOG.exception(msg, p['id'])
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _nw_info_get_ips(self, client, port):
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
return network_IPs
def _nw_info_get_subnets(self, context, port, network_IPs):
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
return subnets
def _nw_info_build_network(self, port, networks, subnets):
network_name = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
tenant_id = net['tenant_id']
break
else:
tenant_id = port['tenant_id']
LOG.warning(_LW("Network %(id)s not matched with the tenants "
"network! The ports tenant %(tenant_id)s will be "
"used."),
{'id': port['network_id'], 'tenant_id': tenant_id})
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
# TODO(berrange) Neutron should pass the bridge name
# in another binding metadata field
if vif_type == network_model.VIF_TYPE_OVS:
bridge = CONF.neutron.ovs_bridge
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
should_create_bridge = True
elif vif_type == network_model.VIF_TYPE_DVS:
if network_name is None:
bridge = port['network_id']
else:
bridge = '%s-%s' % (network_name, port['network_id'])
# Prune the bridge name if necessary. For the DVS this is not done
# as the bridge is a '<network-name>-<network-UUID>'.
if bridge is not None and vif_type != network_model.VIF_TYPE_DVS:
bridge = bridge[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=tenant_id
)
network['subnets'] = subnets
port_profile = port.get('binding:profile')
if port_profile:
physical_network = port_profile.get('physical_network')
if physical_network:
network['physical_network'] = physical_network
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
return network, ovs_interfaceid
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None):
"""Return list of ordered VIFs attached to instance.
:param context - request context.
:param instance - instance we are returning network info for.
:param networks - List of networks being attached to an instance.
If value is None this value will be populated
from the existing cached value.
:param port_ids - List of port_ids that are being attached to an
instance in order of attachment. If value is None
this value will be populated from the existing
cached value.
"""
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
client = neutronv2.get_client(context, admin=True)
data = client.list_ports(**search_opts)
current_neutron_ports = data.get('ports', [])
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids)
nw_info = network_model.NetworkInfo()
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
vif_active = False
if (current_neutron_port['admin_state_up'] is False
or current_neutron_port['status'] == 'ACTIVE'):
vif_active = True
network_IPs = self._nw_info_get_ips(client,
current_neutron_port)
subnets = self._nw_info_get_subnets(context,
current_neutron_port,
network_IPs)
devname = "tap" + current_neutron_port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network, ovs_interfaceid = (
self._nw_info_build_network(current_neutron_port,
networks, subnets))
nw_info.append(network_model.VIF(
id=current_neutron_port['id'],
address=current_neutron_port['mac_address'],
network=network,
type=current_neutron_port.get('binding:vif_type'),
details=current_neutron_port.get('binding:vif_details'),
ovs_interfaceid=ovs_interfaceid,
devname=devname,
active=vif_active))
return nw_info
def _get_subnets_from_port(self, context, port):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = neutronv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = neutronv2.get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
for route in subnet.get('host_routes', []):
subnet_object.add_route(
network_model.Route(cidr=route['destination'],
gateway=network_model.IP(
address=route['nexthop'],
type='gateway')))
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
|
|
"""
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, ComplementNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
# #############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
# order of labels in `target_names` can be different from `categories`
target_names = data_train.target_names
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
# #############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, label in enumerate(target_names):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s" % (label, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=target_names))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50, tol=1e-3), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50, tol=1e-3),
"Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(penalty=penalty, dual=False,
tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty,
max_iter=5)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet",
max_iter=5)))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
results.append(benchmark(ComplementNB(alpha=.1)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False,
tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
|
#!/usr/bin/env python
#
# Copyright 2014 Infoxchange Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A script to install and start an SSH daemon in a Docker image, enabling the
user to log on to it.
"""
import argparse
import logging
import os
import pwd
import socket
import subprocess
import sys
import time
import uuid
import yaml
# pylint:disable=no-name-in-module,import-error
from distutils.spawn import find_executable
# pylint:enable=no-name-in-module,import-error
from xdg.BaseDirectory import xdg_config_home
import pkg_resources
from forklift.arguments import argument_factory, convert_to_args, project_args
from forklift.base import DEVNULL, ImproperlyConfigured
import forklift.drivers
import forklift.services
LOG_LEVELS = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
LOGGER = logging.getLogger(__name__)
try:
# pylint:disable=maybe-no-member
__version__ = pkg_resources.get_distribution('docker-forklift').version
except pkg_resources.DistributionNotFound:
__version__ = 'dev'
def create_parser(services, drivers, command_required=True):
"""
Collect all options from services and drivers in an argparse format.
"""
parser = argparse.ArgumentParser(
usage="%(prog)s [options]",
)
add_argument = parser.add_argument
add_argument('--application_id',
help="Application name to derive resource names from")
add_argument('--driver', default=None, choices=drivers.keys(),
help="Driver to execute the application with")
add_argument('--services', default=[], nargs='*', choices=services.keys(),
help="Services to provide to the application")
add_argument('--transient', action='store_true',
help="Force services to use a transisent provider, where "
"one is available")
add_argument('--rm', action='store_true',
help="When done, clean up and transient providers that were "
"created")
add_argument('--unique', action='store_true',
help="Add to the application ID to make it unique for this"
"invocation")
add_argument('--cleanroom', action='store_true',
help="Synonym for --unique --transient --rm")
add_argument('--environment', default=[], nargs='*',
type=lambda pair: pair.split('=', 1),
help="Additional environment variables to pass")
add_argument('--loglevel', default='WARNING', choices=LOG_LEVELS,
metavar='LEVEL', type=lambda strlevel: strlevel.upper(),
help="Set the minimum log level to ouput")
add_argument('--version', '-v', action='version', version=__version__)
for name, service in services.items():
service_options = parser.add_argument_group(name)
service.add_arguments(
argument_factory(service_options.add_argument, name))
add_argument('command', nargs='+' if command_required else '*',
help="Command to run")
# Drivers inherit all the common options from their base class, so
# allow conflicts for this group of options
driver_options = parser.add_argument_group('Driver options')
driver_options.conflict_handler = 'resolve'
for name, driver in drivers.items():
driver.add_arguments(driver_options.add_argument)
# Dummy option to separate command line arguments from the ones
# generated from configuration files
add_argument('--zzzz', action='store_const', const=None,
help=argparse.SUPPRESS)
return parser
class Forklift(object):
"""
The main class.
"""
services = forklift.services.register
drivers = forklift.drivers.register
CONFIG_DIR = os.path.join(xdg_config_home, 'forklift')
def __init__(self, argv):
"""
Parse the command line and set up the class.
"""
# Parse the configuration from:
# - implicit defaults
# - project configuration file
# - user configuration file
# - user per-project configuration file
# - command line
options = self.implicit_configuration()
# Get application_id
initial_parser = create_parser({}, {}, command_required=False)
conf, _ = initial_parser.parse_known_args(options)
for conffile in self.configuration_files(conf):
options.extend(self.file_configuration(conffile))
options.append('--zzzz')
options.extend(argv[1:])
parser = create_parser(self.services, self.drivers)
conf = parser.parse_args(options)
if conf.cleanroom:
args_idx = options.index('--zzzz')
left, right = (options[:args_idx], options[args_idx:])
options = left + ['--unique', '--transient', '--rm'] + right
# Once the driver and services are known, parse the arguments again
# with only the needed options
driver = self.get_driver(conf)
# enabled_services = {
# name: service
# for name, service in self.services.items()
# if name in conf.services
# }
# FIXME: creating a parser with only the enabled_services (see above)
# causes problems because we then cannot parse the arguments for
# disabled services. Because services are separately namespaced
# including arguments for non-enabled services is sufficient for now
parser = create_parser(self.services, # FIXME: enabled_services
{driver: self.drivers[driver]})
self.conf = parser.parse_args(options)
# As soon as we have parsed conf
self.setup_logging()
if self.conf.unique:
self.unique_application_id()
def implicit_configuration(self):
"""
Implicit configuration based on the current directory.
"""
application_id = os.path.basename(os.path.abspath(os.curdir))
return [
'--application_id', application_id,
]
def configuration_files(self, conf):
"""
A list of configuration files to look for settings in.
"""
application_id = conf.application_id
return (
'forklift.yaml',
os.path.join(self.CONFIG_DIR, '_default.yaml'),
os.path.join(self.CONFIG_DIR, '{0}.yaml'.format(application_id)),
)
def file_configuration(self, name):
"""
Parse settings from a configuration file.
"""
try:
with open(name) as conffile:
return convert_to_args(yaml.load(conffile))
except IOError:
return []
def unique_application_id(self):
"""
Set the application id in config to a (probably) unique value
"""
self.conf.application_id += '-%s' % uuid.uuid4()
LOGGER.info("New application ID is '%s'", self.conf.application_id)
@staticmethod
def _readme_stream():
"""
Get the README file as a stream.
"""
# pylint:disable=no-name-in-module,import-error
from pkg_resources import resource_stream
# pylint:enable=no-name-in-module,import-error
return resource_stream(__name__, 'README.md')
def help(self):
"""
Render the help file.
"""
readme = self._readme_stream()
# Try to format the README nicely if Pandoc is installed
pagers = [
'pandoc -s -f markdown -t man | man -l -',
os.environ.get('PAGER', ''),
'less',
'more',
]
pager = None
for pager in pagers:
if find_executable(pager.split(' ')[0]):
break
process = subprocess.Popen(pager, shell=True, stdin=subprocess.PIPE)
process.communicate(input=readme.read())
readme.close()
process.wait()
def get_driver(self, conf):
"""
Find out what driver to use given the configuration.
If no driver is explicitly specified, choose one which states
the command is its valid target or fall back to Docker driver.
"""
if conf.driver:
return conf.driver
target = conf.command[0]
for driver_name, driver_class in self.drivers.items():
if driver_class.valid_target(target):
return driver_name
return 'docker'
def main(self):
"""
Run the specified application command.
"""
if self.conf.command == ['help']:
self.help()
return 0
driver_name = self.get_driver(self.conf)
driver_class = self.drivers[driver_name]
(target, *command) = self.conf.command
services = []
try:
try:
# This strange loop is so that even if we get an exception
# mid-loop, we still get the list of services that have been
# successfully started (otherwise we get empty array)
services_gen = (
self.services[service].provide(
self.conf.application_id,
overrides=project_args(self.conf, service),
transient=self.conf.transient,
)
for service in self.conf.services
)
for service in services_gen:
services.append(service)
environment = dict(self.conf.environment)
driver = driver_class(
target=target,
services=services,
environment=environment,
conf=self.conf,
)
except ImproperlyConfigured as ex:
print(ex)
return 1
return driver.run(*command)
finally:
if self.conf.rm:
for service in services:
# pylint:disable=undefined-loop-variable
service.cleanup()
def setup_logging(self):
"""
Setup the root logger
"""
logging.basicConfig(level=self.conf.loglevel)
def main():
"""
Main entry point.
"""
return Forklift(sys.argv).main()
if __name__ == '__main__':
main()
|
|
import os
import hashlib
import json
import time
import re
import shutil
import glob
from textwrap import dedent
import sublime
import sublime_plugin
# Metaclass for singletons (TODO refactor)
class Singleton(type):
_instance = None
def __call__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instance
class FileHistory(metaclass=Singleton):
SETTINGS_CALLBACK_KEY = 'FileHistory-reload'
PRINT_DEBUG = False
SETTINGS_FILE = 'FileHistory.sublime-settings'
INDENT_SIZE = 2
DEFAULT_TIMESTAMP_FORMAT = '%Y-%m-%d @ %H:%M:%S'
OLD_DEFAULT_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
def __init__(self):
"""Class to manage the file-access history"""
self.__load_settings()
self.__load_history()
self.__clear_context()
if self.DELETE_ALL_ON_STARTUP:
sublime.set_timeout_async(lambda: self.delete_all_history(), 0)
elif self.CLEANUP_ON_STARTUP:
sublime.set_timeout_async(lambda: self.clean_history(False), 0)
def __load_settings(self):
"""Load the plugin settings from FileHistory.sublime-settings"""
self.app_settings = sublime.load_settings(self.SETTINGS_FILE)
self.__refresh_settings(True)
# The settings may change during execution so we need to listen for changes
self.app_settings.add_on_change(self.SETTINGS_CALLBACK_KEY, self.__refresh_settings)
def __refresh_settings(self, first_load=False):
if not first_load:
print('[FileHistory] Reloading the settings file "%s".' % (self.SETTINGS_FILE))
self.PRINT_DEBUG = self.__ensure_setting('debug', False)
self.GLOBAL_MAX_ENTRIES = self.__ensure_setting('global_max_entries', 100)
self.PROJECT_MAX_ENTRIES = self.__ensure_setting('project_max_entries', 50)
self.USE_SAVED_POSITION = self.__ensure_setting('use_saved_position', True)
self.NEW_TAB_POSITION = self.__ensure_setting('new_tab_position', 'next')
self.REOPEN_IN_CURRENT_GROUP = self.__ensure_setting('reopen_file_in_current_group', False)
self.REMOVE_NON_EXISTENT_FILES = self.__ensure_setting('remove_non_existent_files_on_preview', True)
self.CLEANUP_ON_STARTUP = self.__ensure_setting('cleanup_on_startup', True)
self.DELETE_ALL_ON_STARTUP = self.__ensure_setting('delete_all_on_startup', False)
history_path = self.__ensure_setting('history_file', os.path.join('User', 'FileHistory.json'))
self.HISTORY_FILE = os.path.normpath(os.path.join(sublime.packages_path(), history_path))
self.USE_MONOSPACE = self.__ensure_setting('monospace_font', False)
self.REAL_PATH = self.__ensure_setting('real_path', False)
self.TIMESTAMP_SHOW = self.__ensure_setting('timestamp_show', True)
self.TIMESTAMP_FORMAT = self.__ensure_setting('timestamp_format', self.DEFAULT_TIMESTAMP_FORMAT)
self.TIMESTAMP_MODE = self.__ensure_setting('timestamp_mode', 'history_access')
self.TIMESTAMP_RELATIVE = self.__ensure_setting('timestamp_relative', True)
self.PRETTIFY_HISTORY = self.__ensure_setting('prettify_history', False)
self.PATH_EXCLUDE_PATTERNS = self.__ensure_setting('path_exclude_patterns', [])
self.PATH_REINCLUDE_PATTERNS = self.__ensure_setting('path_reinclude_patterns', [])
self.MAX_BACKUP_COUNT = self.__ensure_setting('max_backup_count', 3)
# Test if the specified format string is valid
try:
time.strftime(self.TIMESTAMP_FORMAT)
except ValueError:
print('[FileHistory] Invalid timstamp_format string. Falling back to default.')
self.TIMESTAMP_FORMAT = self.DEFAULT_TIMESTAMP_FORMAT
self.SHOW_FILE_PREVIEW = self.__ensure_setting('show_file_preview', True)
def get_history_timestamp(self, history_entry, action):
timestamp = None
filepath = history_entry['filename']
if 'timestamp' in history_entry and self.TIMESTAMP_MODE == 'history_access':
timestamp = history_entry['timestamp']
elif filepath and os.path.exists(filepath):
action = 'modified'
timestamp = int(os.path.getmtime(filepath))
return (action, timestamp)
def timestamp_from_string(self, timestamp):
"""try with the user-defined timestamp then try the default timestamp."""
formats = (self.TIMESTAMP_FORMAT,
self.DEFAULT_TIMESTAMP_FORMAT,
self.OLD_DEFAULT_TIMESTAMP_FORMAT)
for format_string in formats:
try:
history_time = time.strptime(timestamp, format_string)
except ValueError:
pass
else:
return int(time.mktime(history_time))
self.debug('The timestamp "%s" does not match any of the formats %s' % (timestamp, formats))
def __ensure_setting(self, key, default_value):
value = default_value
if self.app_settings.has(key):
value = self.app_settings.get(key)
self.debug('Setting "%s" = %r' % (key, value))
else:
# no need to persist this setting - just use the default
self.debug('Setting "%s" not found. Using the default value of %r' % (key, default_value))
return value
def debug(self, text):
"""Helper method for "logging" to the console."""
if self.PRINT_DEBUG:
print('[FileHistory] ' + text)
def get_current_project_key(self):
return self.get_project_key(sublime.active_window())
def get_project_key(self, window):
m = hashlib.md5()
for path in window.folders():
m.update(path.encode('utf-8'))
project_key = m.hexdigest()
# Try to use project_file_name (available in ST3 build 3014)
# Note: Although it would be more appropriate, the name of the workspace is not available
if hasattr(window, 'project_file_name'):
project_filename = window.project_file_name()
if not project_filename:
return project_key
# migrate the history entry based on the "old" project key (if it exists)
if project_key in self.history:
self.history[project_filename] = self.history[project_key]
del self.history[project_key]
# use the new project key
project_key = project_filename
return project_key
def __load_history(self):
self.history = {}
if not os.path.exists(self.HISTORY_FILE):
self.debug("History file '%s' doesn't exist" % self.HISTORY_FILE)
return
self.debug('Loading the history from file ' + self.HISTORY_FILE)
try:
with open(self.HISTORY_FILE, 'r') as f:
updated_history = json.load(f)
except Exception as e:
updated_history = {}
sublime.error_message(
dedent("""\
File History could not read your history file at '%s'.
%s: %s""")
% (self.HISTORY_FILE, e.__class__.__name__, e)
)
self.history = updated_history
# Do cleanup on the history file
self.__ensure_project('global')
trigger_save = False
# Migrate old formatted timestamps and convert to POSIX
hlist = updated_history['global']['closed'] or updated_history['global']['opened']
if hlist and 'timestamp' in hlist[0] and not isinstance(hlist[0]['timestamp'], int):
# Found an old timestamp. Likely that all others are old too
self.debug("Found an old-style formatted timestamp. Migrating to POSIX")
for project in updated_history.values():
for key in ('closed', 'opened'):
for entry in project[key]:
if not isinstance(entry.get('timestamp', 0), int):
new_stamp = self.timestamp_from_string(entry['timestamp'])
if not new_stamp:
del entry['timestamp']
else:
entry['timestamp'] = new_stamp
trigger_save = True
# Remove actions keys
if hlist and 'action' in hlist[0]:
self.debug("Found an old-style action field. Cleaning up")
trigger_save = True
for project in updated_history.values():
for key in ('closed', 'opened'):
for entry in project[key]:
if 'action' in entry:
del entry['action']
if trigger_save:
# Save the changes
self.__save_history()
def __save_history(self):
self.debug('Saving the history to file ' + self.HISTORY_FILE)
with open(self.HISTORY_FILE, mode='w+') as f:
indent = self.INDENT_SIZE if self.PRETTIFY_HISTORY else None
json.dump(self.history, f, indent=indent)
f.flush()
sublime.set_timeout_async(lambda: self.__manage_backups(), 0)
def __manage_backups(self):
# Only keep backups if the user wants them
if self.MAX_BACKUP_COUNT <= 0:
return
# Make sure there is a backup of the history for today
(root, ext) = os.path.splitext(self.HISTORY_FILE)
datestamp = time.strftime('%Y%m%d')
backup = '%s_%s%s' % (root, datestamp, ext)
if not os.path.exists(backup):
self.debug('Backing up the history file for %s' % datestamp)
shutil.copy(self.HISTORY_FILE, backup)
# Limit the number of backup files to keep
listing = sorted(glob.glob('%s_*%s' % (root, ext)), reverse=True)
if len(listing) > self.MAX_BACKUP_COUNT:
for discard_file in listing[self.MAX_BACKUP_COUNT:]:
self.debug('Discarding old backup %s' % discard_file)
os.remove(discard_file)
def delete_all_history(self):
self.history = {}
self.__save_history()
def get_history(self, current_project_only=True):
"""Return a copy of the requested history (global or project-specific): closed files followed by opened files"""
# Make sure the history is loaded
# TODO: If we have loaded history previously we should cache it and not access the file system again
if len(self.history) == 0:
self.__load_history()
# Load the requested history (global or project-specific)
if current_project_only:
self.project_name = self.get_current_project_key()
else:
self.project_name = 'global'
# Return the list of closed and opened files
if self.project_name in self.history:
# Return a copy of the contained lists in history (the only actually mutated objects)
history = self.history[self.project_name]
return dict(opened=history['opened'][:], closed=history['closed'][:])
else:
self.debug('WARN: Project %s could not be found in the file history list - returning an empty history list' % (self.project_name))
return dict(opened=[], closed=[])
def __ensure_project(self, project_name):
"""Make sure the project nodes exist (including 'opened' and 'closed')"""
if project_name not in self.history:
self.history[project_name] = {}
self.history[project_name]['opened'] = []
self.history[project_name]['closed'] = []
def is_suppressed(self, view, filename):
override_settings = view.settings().get("file_history", dict())
exclude_patterns = self.PATH_EXCLUDE_PATTERNS + override_settings.get("path_exclude_patterns", [])
reinclude_patterns = self.PATH_REINCLUDE_PATTERNS + override_settings.get("path_reinclude_patterns", [])
# Force forward slashes in the filename
filename = os.path.normpath(filename).replace("\\", "/")
# Search the filename for the pattern and suppress it if it matches
for exclude in exclude_patterns:
if re.search(exclude, filename):
self.debug('[X] Exclusion pattern "%s" blocks history tracking for filename "%s"'
% (exclude, filename))
# See if none of out reinclude patterns nullifies the exclude
for reinclude in reinclude_patterns:
if re.search(reinclude, filename):
self.debug('[O] Inclusion pattern "%s" re-includes history tracking for filename "%s"'
% (reinclude, filename))
return False
return True
return False
def add_view(self, window, view, history_type):
# No point adding a transient view to the history
if self.is_transient_view(window, view):
return
filename = view.file_name()
# Only keep track of files that have a filename
if filename is not None:
if self.REAL_PATH:
realname = os.path.realpath(view.file_name())
if realname != filename:
self.debug("Resolved '%s' to '%s'" % (filename, realname))
filename = realname
project_name = self.get_current_project_key()
if self.is_suppressed(view, filename):
# If filename matches 'path_exclude_patterns' then abort the history tracking
# and remove any references to this file from the history
self.__remove(project_name, filename)
self.__remove('global', filename)
elif os.path.exists(filename):
# Add to both the project-specific and global histories
(group, index) = sublime.active_window().get_view_index(view)
self.__add_to_history(project_name, history_type, filename, group, index)
self.__add_to_history('global', history_type, filename, group, index)
else:
# If the file doesn't exist then remove it from the lists
self.__remove(project_name, filename)
self.__remove('global', filename)
self.__save_history()
def __add_to_history(self, project_name, history_type, filename, group, index):
self.debug('Adding %s file to project "%s" with group %s and index %s: %s' % (history_type, project_name, group, index, filename))
# Make sure the project nodes exist
self.__ensure_project(project_name)
# Remove the file from the project list then
# add it to the top (of the opened/closed list)
self.__remove(project_name, filename)
entry = {'filename': filename, 'group': group, 'index': index, 'timestamp': int(time.time())}
self.history[project_name][history_type].insert(0, entry)
# Make sure we limit the number of history entries
max_num_entries = self.GLOBAL_MAX_ENTRIES if project_name == 'global' else self.PROJECT_MAX_ENTRIES
self.history[project_name][history_type] = self.history[project_name][history_type][0:max_num_entries]
def __remove(self, project_name, filename):
# Only continue if this project exists
if project_name not in self.history:
return
# Remove any references to this file from the project
for history_type in ('opened', 'closed'):
for node in iter(self.history[project_name][history_type]):
if node['filename'] == filename:
self.history[project_name][history_type].remove(node)
def clean_history(self, current_project_only):
if current_project_only:
self.__clean_history(self.get_current_project_key())
else:
# Clean-up the all histories and remove orphaned projects
orphan_list = []
open_projects = [self.get_project_key(window) for window in sublime.windows()]
for project_key in self.history:
# clean the project or remove it (if it no longer exists)
if (
project_key == 'global'
or os.path.exists(project_key)
or project_key in open_projects
):
# clean the project
self.__clean_history(project_key)
else:
# queue the orphaned project for deletion
orphan_list.append(project_key)
# remove any orphaned projects and save the history
for project_key in orphan_list:
self.debug('Removing orphaned project "%s" from the history' % project_key)
del self.history[project_key]
# Save history
self.__save_history()
def __clean_history(self, project_name):
self.debug('Cleaning the "%s" history' % (project_name))
# Only continue if this project exists
if project_name not in self.history:
sublime.status_message("This project does not have any history")
return
# Remove any non-existent files from the project
for history_type in ('opened', 'closed'):
for node in reversed(self.history[project_name][history_type]):
if not os.path.exists(node['filename']):
self.debug('Removing non-existent file from project "%s": %s' % (project_name, node['filename']))
self.history[project_name][history_type].remove(node)
sublime.status_message("File history cleaned")
def __clear_context(self):
"""Reset the calling view variables"""
self.calling_view = None
self.calling_view_index = []
self.calling_view_is_empty = False
self.current_view = None
self.current_history_entry = None
def __track_calling_view(self, window):
"""Remember the view that the command was run from (including the group and index positions),
so we can return to the "calling" view if the user cancels the preview
(or so we can show something if a file in the history no longer exists)"""
if not self.calling_view:
self.calling_view = window.active_view()
if self.calling_view:
self.calling_view_index = window.get_view_index(self.calling_view)
self.calling_view_is_empty = len(window.views()) == 0
else:
self.calling_view_index = [0, 0]
self.calling_view_is_empty = True
def __calculate_view_index(self, window, history_entry):
# Get the group of the new view (the currently active group is the default)
group = history_entry['group']
if group < 0 or group >= window.num_groups():
group = self.calling_view_index[0]
# Get the alternative tab index (in case the saved index in no longer valid):
# The file could be opened in the saved tab position or as the first tab, the last tab or after the current tab...
max_index = len(window.views_in_group(group))
saved_index = history_entry['index']
if self.USE_SAVED_POSITION and saved_index >= 0 and saved_index <= max_index:
index = saved_index
elif self.NEW_TAB_POSITION == 'first':
index = 0
elif self.NEW_TAB_POSITION == 'last':
index = max_index
elif self.calling_view_index:
# DEFAULT: Open in the next tab
index = self.calling_view_index[1] + 1
else:
index = 0
return (group, index)
def preview_history(self, window, history_entry):
"""Preview the file if it exists, otherwise show the previous view (aka the "calling_view")"""
self.current_history_entry = history_entry
# track the view even if we won't be previewing it (to support quick-open and remove from history quick keys)
self.__track_calling_view(window)
# Only preview the view if the user wants to see it
if not self.SHOW_FILE_PREVIEW:
return
filepath = history_entry['filename']
if os.path.exists(filepath):
# asynchronously open the preview (improves perceived performance)
sublime.set_timeout_async(lambda: self.__open_preview(window, filepath), 0)
else:
# Close the last preview and remove the non-existent file from the history
self.__close_preview(window)
self.__remove(self.get_current_project_key(), filepath)
self.__save_history()
def __open_preview(self, window, filepath):
self.debug("Opening preview for '%s'" % filepath)
self.current_view = window.open_file(filepath, sublime.TRANSIENT | getattr(sublime, 'FORCE_GROUP', 0))
def quick_open_preview(self, window):
"""Open the file that is currently being previewed
Returns true if 'refresh' state should be cleared later."""
if not self.current_history_entry:
return
view = self.current_view
other_view = self.get_view_from_another_group(window, view.file_name())
# Only try to open and position the file if it is transient
if self.is_transient_view(window, view):
if not self.REOPEN_IN_CURRENT_GROUP and other_view:
# Focus the other view instead of opening a clone
self.debug("Focussing existing view in group %d" % window.get_view_index(other_view)[0])
self.__close_preview(window)
window.focus_view(other_view)
# Changing focus to another group requires reopening the panel, unfortunately
return True
else:
(group, index) = self.__calculate_view_index(window, self.current_history_entry)
view = window.open_file(self.current_history_entry['filename'])
window.set_view_index(view, group, index)
# Refocus on the newly opened file rather than the original one
self.__clear_context()
self.__track_calling_view(window)
def delete_current_entry(self):
"""Delete the history entry for the file that is currently being previewed"""
if not self.current_history_entry:
return
filename = self.current_history_entry['filename']
self.debug('Removing history entry for "%s" from project "%s"' % (filename, self.project_name))
self.__remove(self.project_name, filename)
self.__save_history()
def open_history(self, window, history_entry):
"""Open the file represented by the history_entry in the provided window"""
self.__track_calling_view(window)
(group, index) = self.__calculate_view_index(window, history_entry)
if not self.REOPEN_IN_CURRENT_GROUP or not hasattr(sublime, 'FORCE_GROUP'):
# Open the file and position the view correctly
self.__close_preview(window)
new_view = window.open_file(history_entry['filename'])
window.set_view_index(new_view, group, index)
self.debug('Opened file in group %s, index %s (based on saved group %s, index %s): %s'
% (group, index, history_entry['group'], history_entry['index'], history_entry['filename']))
else:
window.open_file(history_entry['filename'], sublime.FORCE_GROUP)
self.debug('Opened clone of file in current group: %s' % history_entry['filename'])
self.__clear_context()
def __close_preview(self, window):
if not self.SHOW_FILE_PREVIEW:
return
active_view = window.active_view()
if not self.current_view:
return
elif self.current_view.id() != active_view.id():
self.debug("ID mismatch!")
return
elif not self.is_transient_view(window, self.current_view):
self.debug("Last 'opened' view not transient")
return
self.debug("Closing file: %s" % self.current_view.file_name())
window.run_command("close_file")
self.current_view = None
def reset(self, window):
"""The user cancelled the action - give the focus back to the "calling" view and clear the context"""
self.__close_preview(window)
self.__clear_context()
def is_transient_view(self, window, view):
if not view:
# Sometimes, the view is just `None`. We can't use it in this
# state so just mark as transient.
return True
elif window.get_view_index(view)[1] == -1:
# If the view index is -1, then this can't be a real view.
# window.transient_view_in_group is not returning the correct
# value when we quickly cycle through the quick panel previews.
self.debug("Detected possibly transient view with index = -1: '%s'"
% view.file_name())
return True
else:
return view == window.transient_view_in_group(window.active_group())
def get_view_from_another_group(self, window, filename):
if self.calling_view_index:
# Not always defined at this point
calling_group = self.calling_view_index[0]
else:
calling_group = window.get_view_index(window.active_view())[0]
# Scan for view with same file_name in other groups
for group in range(window.num_groups()):
if group == calling_group:
continue
for view in window.views_in_group(group):
if view.file_name() == filename:
return view
#######################################
class OpenRecentlyClosedFileEvent(sublime_plugin.EventListener):
# We need pre close to detect if the view was transient,
# otherwise it always has (-1, -1) group and index.
def on_pre_close(self, view):
FileHistory().add_view(sublime.active_window(), view, 'closed')
def on_load(self, view):
FileHistory().add_view(sublime.active_window(), view, 'opened')
class CleanupFileHistoryCommand(sublime_plugin.WindowCommand):
def run(self, current_project_only=True):
FileHistory().clean_history(current_project_only)
class ResetFileHistoryCommand(sublime_plugin.WindowCommand):
def run(self):
FileHistory().delete_all_history()
class OpenRecentlyClosedFileCommand(sublime_plugin.WindowCommand):
"""class to either open the last closed file or show a quick panel with the recent file history (closed files first)"""
__is_active = False
def approximate_age(self, from_stamp, to_stamp=None, precision=2):
"""Calculate the relative time from given timestamp to another given (epoch) or now."""
if to_stamp is None:
to_stamp = time.time()
rem = to_stamp - from_stamp
def divide(rem, mod):
return rem % mod, int(rem // mod)
def subtract(rem, div):
n = int(rem // div)
return n, rem - n * div
seconds, rem = divide(rem, 60)
minutes, rem = divide(rem, 60)
hours, days = divide(rem, 24)
years, days = subtract(days, 365)
months, days = subtract(days, 30)
weeks, days = subtract(days, 7)
magnitudes = []
first = None
values = locals()
for i, magnitude in enumerate(("years", "months", "weeks", "days", "hours", "minutes", "seconds")):
v = int(values[magnitude])
if v == 0:
continue
s = "%s %s" % (v, magnitude)
if v == 1: # strip plural s
s = s[:-1]
# Handle precision limit
if first is None:
first = i
elif first + precision <= i:
break
magnitudes.append(s)
return ", ".join(magnitudes)
def set_refresh_in_progress(self):
self.refresh_in_progress = True
def clear_refresh_in_progress(self):
del self.refresh_in_progress
def is_refresh_in_progress(self):
return hasattr(self, "refresh_in_progress")
def delete_current_entry(self):
if not self.current_selected_index or self.current_selected_index < 0:
return
closed_len = len(self.history_list['closed'])
if self.current_selected_index < closed_len:
key = 'closed'
else:
self.current_selected_index -= closed_len
key = 'opened'
if self.current_selected_index <= len(self.history_list[key]):
del self.history_list[key][self.current_selected_index]
def get_history_by_index(self, index):
if index < 0:
return
closed_len = len(self.history_list['closed'])
if index < closed_len:
key = 'closed'
else:
index -= closed_len
key = 'opened'
if index <= len(self.history_list[key]):
return self.history_list[key][index]
def run(self, current_project_only=True, action="show_history"):
if action == "show_history":
self.current_project_only = current_project_only
if not self.is_refresh_in_progress():
self.history_list = FileHistory().get_history(current_project_only)
self.current_selected_index = None
self.group_index = self.window.active_group()
selected_index = 0
else:
FileHistory().debug("Reopening from refresh")
self.window.focus_group(self.group_index)
self.clear_refresh_in_progress()
selected_index = self.current_selected_index
# TODO recover filter text?
# Prepare the display list with the file name and path separated
display_list = []
for key in ('closed', 'opened'):
for entry in self.history_list[key]:
filepath = entry['filename']
info = [os.path.basename(filepath), os.path.dirname(filepath)]
# Only include the timestamp if it is there and if the user wants to see it
if FileHistory().TIMESTAMP_SHOW:
if not os.path.exists(filepath):
stamp = 'file no longer exists'
else:
(action, timestamp) = FileHistory().get_history_timestamp(entry, key)
if not timestamp:
stamp = ''
elif bool(FileHistory().TIMESTAMP_RELATIVE):
stamp = '%s %s ago' % (action, self.approximate_age(timestamp))
else:
stamp_str = time.strftime(FileHistory().TIMESTAMP_FORMAT, time.gmtime(timestamp))
stamp = '%s at %s' % (action, stamp_str)
info.append((' ' * 6) + stamp)
display_list.append(info)
if not display_list:
return
font_flag = sublime.MONOSPACE_FONT if FileHistory().USE_MONOSPACE else 0
self.__class__.__is_active = True
self.window.show_quick_panel(display_list, self.open_file, font_flag,
on_highlight=self.show_preview,
selected_index=selected_index)
sublime.status_message("[File History] You can quick-open or remove the currently "
"selected entry with `right` and `ctrl/cmd+del` respectively.")
elif action == "open_latest_closed":
self.history_list = FileHistory().get_history(current_project_only)
self.open_file(0)
elif action == "delete_current_entry":
FileHistory().delete_current_entry()
if not self.current_selected_index:
return
self.delete_current_entry()
# Deleting an entry from the quick panel should reopen it with the entry removed
self.set_refresh_in_progress()
sublime.active_window().run_command('hide_overlay')
elif action == "quick_open_current_entry":
# Will require reopening the panel if view in another group is focussed
self.set_refresh_in_progress()
if not FileHistory().quick_open_preview(sublime.active_window()):
self.clear_refresh_in_progress()
@classmethod
def is_active(cls):
'''
Returns whether the history overlay is open in a window. Note that
only the currently focused window can have an open overlay.
'''
return cls.__is_active
def show_preview(self, selected_index):
self.current_selected_index = selected_index
selected_entry = self.get_history_by_index(selected_index)
if selected_entry:
# A bug in SublimeText will cause the quick-panel to unexpectedly close trying to show the preview
# for a file that is already open in a different group, so simply don't display the preview for these files.
# In later releases, a 'FORCE_GROUP' flag has been introduced.
if hasattr(sublime, 'FORCE_GROUP') or not FileHistory().get_view_from_another_group(self.window, selected_entry['filename']):
FileHistory().preview_history(self.window, selected_entry)
def open_file(self, selected_index):
self.__class__.__is_active = False
selected_entry = self.get_history_by_index(selected_index)
if selected_entry:
# If the file is open in another group then simply give focus to that view, otherwise open the file
open_view = FileHistory().get_view_from_another_group(self.window, selected_entry['filename'])
if open_view and not FileHistory().REOPEN_IN_CURRENT_GROUP:
FileHistory().debug("Focussing existing view in group %d" % self.window.get_view_index(open_view)[0])
FileHistory().reset(self.window) # clear preview
self.window.focus_view(open_view)
else:
FileHistory().open_history(self.window, selected_entry)
else:
# The user cancelled the action
FileHistory().reset(self.window)
FileHistory().debug("User closed panel")
if self.is_refresh_in_progress():
self.window.run_command('open_recently_closed_file', {'current_project_only': self.current_project_only})
return
self.history_list = {}
class OpenRecentlyCloseFileCommandContextHandler(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
if key != 'file_history_overlay_visible':
return None
v1, v2 = OpenRecentlyClosedFileCommand.is_active(), bool(operand)
if operator == sublime.OP_EQUAL:
return v1 == v2
elif operator == sublime.OP_NOT_EQUAL:
return v1 != v2
else:
return None
def plugin_loaded():
# Force the FileHistory singleton to be instantiated so the startup tasks will be executed
# Depending on the "cleanup_on_startup" setting, the history may be cleaned at startup
FileHistory()
def plugin_unloaded():
# Unregister our on_change callback
FileHistory().app_settings.clear_on_change(FileHistory.SETTINGS_CALLBACK_KEY)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_notebooks_by_workspace_request(
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/notebooks')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_notebook_summary_by_work_space_request(
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/notebooksSummary')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_notebook_request_initial(
notebook_name: str,
*,
json: JSONType = None,
content: Any = None,
if_match: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/notebooks/{notebookName}')
path_format_arguments = {
"notebookName": _SERIALIZER.url("notebook_name", notebook_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str')
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_notebook_request(
notebook_name: str,
*,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/notebooks/{notebookName}')
path_format_arguments = {
"notebookName": _SERIALIZER.url("notebook_name", notebook_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if if_none_match is not None:
header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_notebook_request_initial(
notebook_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/notebooks/{notebookName}')
path_format_arguments = {
"notebookName": _SERIALIZER.url("notebook_name", notebook_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_rename_notebook_request_initial(
notebook_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/notebooks/{notebookName}/rename')
path_format_arguments = {
"notebookName": _SERIALIZER.url("notebook_name", notebook_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class NotebookOperations(object):
"""NotebookOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.artifacts.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_notebooks_by_workspace(
self,
**kwargs: Any
) -> Iterable["_models.NotebookListResponse"]:
"""Lists Notebooks.
:keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NotebookListResponse or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.synapse.artifacts.models.NotebookListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.NotebookListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_notebooks_by_workspace_request(
api_version=api_version,
template_url=self.get_notebooks_by_workspace.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_get_notebooks_by_workspace_request(
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("NotebookListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_notebooks_by_workspace.metadata = {'url': '/notebooks'} # type: ignore
@distributed_trace
def get_notebook_summary_by_work_space(
self,
**kwargs: Any
) -> Iterable["_models.NotebookListResponse"]:
"""Lists a summary of Notebooks.
:keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NotebookListResponse or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.synapse.artifacts.models.NotebookListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.NotebookListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_notebook_summary_by_work_space_request(
api_version=api_version,
template_url=self.get_notebook_summary_by_work_space.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_get_notebook_summary_by_work_space_request(
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("NotebookListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_notebook_summary_by_work_space.metadata = {'url': '/notebooksSummary'} # type: ignore
def _create_or_update_notebook_initial(
self,
notebook_name: str,
notebook: "_models.NotebookResource",
if_match: Optional[str] = None,
**kwargs: Any
) -> Optional["_models.NotebookResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NotebookResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(notebook, 'NotebookResource')
request = build_create_or_update_notebook_request_initial(
notebook_name=notebook_name,
api_version=api_version,
content_type=content_type,
json=_json,
if_match=if_match,
template_url=self._create_or_update_notebook_initial.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NotebookResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_notebook_initial.metadata = {'url': '/notebooks/{notebookName}'} # type: ignore
@distributed_trace
def begin_create_or_update_notebook(
self,
notebook_name: str,
notebook: "_models.NotebookResource",
if_match: Optional[str] = None,
**kwargs: Any
) -> LROPoller["_models.NotebookResource"]:
"""Creates or updates a Note Book.
:param notebook_name: The notebook name.
:type notebook_name: str
:param notebook: Note book resource definition.
:type notebook: ~azure.synapse.artifacts.models.NotebookResource
:param if_match: ETag of the Note book entity. Should only be specified for update, for which
it should match existing entity or can be * for unconditional update.
:type if_match: str
:keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either NotebookResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.synapse.artifacts.models.NotebookResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NotebookResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_notebook_initial(
notebook_name=notebook_name,
notebook=notebook,
if_match=if_match,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('NotebookResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_notebook.metadata = {'url': '/notebooks/{notebookName}'} # type: ignore
@distributed_trace
def get_notebook(
self,
notebook_name: str,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> Optional["_models.NotebookResource"]:
"""Gets a Note Book.
:param notebook_name: The notebook name.
:type notebook_name: str
:param if_none_match: ETag of the Notebook entity. Should only be specified for get. If the
ETag matches the existing entity tag, or if * was provided, then no content will be returned.
:type if_none_match: str
:keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NotebookResource, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.NotebookResource or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NotebookResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
request = build_get_notebook_request(
notebook_name=notebook_name,
api_version=api_version,
if_none_match=if_none_match,
template_url=self.get_notebook.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NotebookResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_notebook.metadata = {'url': '/notebooks/{notebookName}'} # type: ignore
def _delete_notebook_initial(
self,
notebook_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
request = build_delete_notebook_request_initial(
notebook_name=notebook_name,
api_version=api_version,
template_url=self._delete_notebook_initial.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
_delete_notebook_initial.metadata = {'url': '/notebooks/{notebookName}'} # type: ignore
@distributed_trace
def begin_delete_notebook(
self,
notebook_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a Note book.
:param notebook_name: The notebook name.
:type notebook_name: str
:keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_notebook_initial(
notebook_name=notebook_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_notebook.metadata = {'url': '/notebooks/{notebookName}'} # type: ignore
def _rename_notebook_initial(
self,
notebook_name: str,
new_name: Optional[str] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_request = _models.ArtifactRenameRequest(new_name=new_name)
_json = self._serialize.body(_request, 'ArtifactRenameRequest')
request = build_rename_notebook_request_initial(
notebook_name=notebook_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._rename_notebook_initial.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
_rename_notebook_initial.metadata = {'url': '/notebooks/{notebookName}/rename'} # type: ignore
@distributed_trace
def begin_rename_notebook(
self,
notebook_name: str,
new_name: Optional[str] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Renames a notebook.
:param notebook_name: The notebook name.
:type notebook_name: str
:param new_name: New name of the artifact.
:type new_name: str
:keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._rename_notebook_initial(
notebook_name=notebook_name,
new_name=new_name,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rename_notebook.metadata = {'url': '/notebooks/{notebookName}/rename'} # type: ignore
|
|
import re
import uuid as py_uuid
from cattle import ApiError
from common_fixtures import * # NOQA
from test_volume import VOLUME_CLEANUP_LABEL
TEST_IMAGE = 'ibuildthecloud/helloworld'
TEST_IMAGE_LATEST = TEST_IMAGE + ':latest'
TEST_IMAGE_UUID = 'docker:' + TEST_IMAGE
if_docker = pytest.mark.skipif("os.environ.get('DOCKER_TEST') == 'false'",
reason='DOCKER_TEST is not set')
os_environ = "os.environ.get('DOCKER_VERSION') != '1.12.1'"
if_docker_1_12 = pytest.mark.skipif(os_environ,
reason='Docker version is not 1.12.1')
sched_environ = "os.environ.get('CATTLE_TEST_RESOURCE_SCHEDULER') != 'true'"
if_resource_scheduler = pytest.mark.skipif(sched_environ)
@pytest.fixture(scope='session')
def docker_client(super_client):
for host in super_client.list_host(state='active', remove_null=True,
kind='docker'):
key = super_client.create_api_key(accountId=host.accountId)
super_client.wait_success(key)
wait_for(lambda: host.agent().state == 'active')
wait_for(lambda: len(host.storagePools()) > 0 and
host.storagePools()[0].state == 'active')
return api_client(key.publicValue, key.secretValue)
raise Exception('Failed to find docker host, please register one')
@if_docker
def test_docker_create_only(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
startOnCreate=False)
try:
container = docker_client.wait_success(container)
assert container is not None
assert 'container' == container.type
image = super_client.reload(container).image()
assert image.instanceKind == 'container'
image_mapping = filter(
lambda m: m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 0
assert not image.isPublic
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_create_only_from_sha(docker_client, super_client):
image_name = 'tianon/true@sha256:662fc60808e6d5628a090e39' \
'b4bcae694add28a626031cc889109c2cf2af5d73'
uuid = 'docker:' + image_name
container = docker_client.create_container(name='test-sha256',
imageUuid=uuid,
networkMode='bridge',
startOnCreate=False)
try:
container = docker_client.wait_success(container)
assert container is not None
assert 'container' == container.type
image = super_client.reload(container).image()
assert image.instanceKind == 'container'
image_mapping = filter(
lambda m: m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 0
assert not image.isPublic
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_create_with_start(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
try:
assert container.state == 'creating'
container = super_client.wait_success(container)
assert container.state == 'running'
assert container.data.dockerContainer.Image == TEST_IMAGE
assert len(container.volumes()) == 1
image = container.volumes()[0].image()
image = super_client.reload(image)
image_mapping = filter(
lambda m: not m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 1
assert image_mapping[0].imageId == image.id
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_build(docker_client, super_client):
uuid = 'image-' + random_str()
url = 'https://github.com/rancherio/tiny-build/raw/master/build.tar'
container = docker_client.create_container(imageUuid='docker:' + uuid,
networkMode='bridge',
build={
'context': url,
})
try:
assert container.state == 'creating'
container = super_client.wait_success(container)
# This builds tianon/true which just dies
assert container.state == 'running' or container.state == 'stopped'
assert container.transitioning == 'no'
assert container.data.dockerContainer.Image == uuid
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_create_with_start_using_docker_io(docker_client, super_client):
image = 'docker.io/' + TEST_IMAGE
uuid = 'docker:' + image
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
container = super_client.wait_success(container)
assert container.state == 'running'
assert container.data.dockerContainer.Image == image
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_command(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
command=['sleep', '42'])
try:
container = super_client.wait_success(container)
assert container.data.dockerInspect.Config.Cmd == ['sleep', '42']
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_command_args(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
command=['sleep', '1', '2',
'3'])
try:
container = super_client.wait_success(container)
assert container.data.dockerInspect.Config.Cmd == ['sleep', '1', '2',
'3']
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_short_lived_container(docker_client, super_client):
container = docker_client.create_container(imageUuid="docker:tianon/true",
networkMode='bridge')
container = wait_for_condition(
docker_client, container,
lambda x: x.state == 'stopped',
lambda x: 'State is: ' + x.state)
assert container.state == 'stopped'
assert container.transitioning == 'no'
@if_docker
def test_docker_stop(docker_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
assert container.state == 'creating'
container = docker_client.wait_success(container)
assert container.state == 'running'
start = time.time()
container = container.stop(timeout=0)
assert container.state == 'stopping'
container = docker_client.wait_success(container)
delta = time.time() - start
assert container.state == 'stopped'
assert delta < 10
@if_docker
def test_docker_purge(docker_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
assert container.state == 'creating'
container = docker_client.wait_success(container)
assert container.state == 'running'
container = container.stop(timeout=0)
assert container.state == 'stopping'
container = docker_client.wait_success(container)
assert container.state == 'stopped'
docker_client.delete(container)
container = docker_client.wait_success(container)
assert container.removed is not None
safe_purge(container, docker_client)
volumes = container.volumes()
assert len(volumes) == 0
def safe_purge(c, docker_client):
try:
c.purge()
except (ApiError, AttributeError):
# It's possible for the container to already have been purged
pass
c = docker_client.wait_success(c)
assert c.state == 'purged'
return c
@if_docker
def test_docker_image_format(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
try:
container = docker_client.wait_success(container)
container = super_client.reload(container)
assert container.image().format == 'docker'
assert container.volumes()[0].image().format == 'docker'
assert container.volumes()[0].format == 'docker'
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_ports_from_container_publish_all(docker_client):
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(networkMode='bridge',
publishAllPorts=True,
imageUuid=uuid)
c = docker_client.wait_success(c)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
port = ports[0]
assert port.publicPort is not None
assert port.privatePort == 8080
assert port.publicIpAddressId is not None
assert port.kind == 'imagePort'
docker_client.delete(c)
@if_docker
def test_docker_ports_from_container_no_publish(docker_client):
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
c = docker_client.wait_success(c)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
port = ports[0]
assert port.publicPort is None
assert port.privatePort == 8080
assert port.publicIpAddressId is not None
assert port.kind == 'imagePort'
docker_client.delete(c)
@if_docker
def test_docker_ports_from_container(docker_client, super_client):
def reload(x):
return super_client.reload(x)
_ = reload
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(networkMode='bridge',
startOnCreate=False,
publishAllPorts=True,
imageUuid=uuid,
ports=[
'8081',
'8082/tcp',
'8083/udp'])
c = docker_client.wait_success(c)
assert c.state == 'stopped'
count = 0
for port in c.ports_link():
count += 1
assert port.kind == 'userPort'
assert port.publicPort is None
assert port.privateIpAddressId is None
assert port.publicIpAddressId is None
if port.privatePort == 8081:
assert port.protocol == 'tcp'
elif port.privatePort == 8082:
assert port.protocol == 'tcp'
elif port.privatePort == 8083:
assert port.protocol == 'udp'
else:
assert False
assert count == 3
c = docker_client.wait_success(c.start())
assert c.state == 'running'
count = 0
ip = None
privateIp = None
for port in c.ports_link():
count += 1
assert port.privateIpAddressId is not None
privateIp = port.privateIpAddress()
assert privateIp.kind == 'docker'
assert _(privateIp).subnetId is None
assert port.publicPort is not None
assert port.publicIpAddressId is not None
if ip is None:
ip = port.publicIpAddressId
assert port.publicIpAddressId == ip
if port.privatePort == 8081:
assert port.kind == 'userPort'
assert port.protocol == 'tcp'
elif port.privatePort == 8082:
assert port.kind == 'userPort'
assert port.protocol == 'tcp'
elif port.privatePort == 8083:
assert port.kind == 'userPort'
assert port.protocol == 'udp'
elif port.privatePort == 8080:
assert port.kind == 'imagePort'
else:
assert False
assert count == 4
assert c.primaryIpAddress == privateIp.address
c = docker_client.wait_success(c.stop(timeout=0))
assert c.state == 'stopped'
count = 0
for nic in _(c).nics():
for ip in nic.ipAddresses():
count += 1
assert ip.kind == 'docker'
assert ip.state == 'inactive'
assert ip.address is None
assert count == 1
c = docker_client.wait_success(c.start())
if c.state != 'running':
super_c = super_client.reload(c)
print 'DEBUG Container not running: %s' % super_c
assert c.state == 'running'
count = 0
for nic in _(c).nics():
for ip in nic.ipAddresses():
count += 1
assert ip.kind == 'docker'
assert ip.state == 'active'
assert ip.address is not None
assert count == 1
docker_client.delete(c)
@if_docker
def test_no_port_override(docker_client, super_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
ports=['8083:8080'])
try:
c = super_client.wait_success(c, timeout=240)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
assert ports[0].kind == 'userPort'
assert ports[0].publicPort == 8083
assert ports[0].privatePort == 8080
finally:
if c is not None:
super_client.delete(c)
@if_docker
def test_docker_volumes(docker_client, super_client):
def reload(x):
return super_client.reload(x)
_ = reload
uuid = TEST_IMAGE_UUID
bind_mount_uuid = py_uuid.uuid4().hex
bar_host_path = '/tmp/bar%s' % bind_mount_uuid
bar_bind_mount = '%s:/bar' % bar_host_path
c = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
startOnCreate=False,
dataVolumes=['/foo',
bar_bind_mount])
c = docker_client.wait_success(c)
assert len(c.dataVolumes) == 2
assert set(c.dataVolumes) == set(['/foo', bar_bind_mount])
c = super_client.wait_success(c.start())
volumes = c.volumes()
assert len(volumes) == 1
mounts = c.mounts_link()
assert len(mounts) == 2
foo_mount, bar_mount = None, None
foo_vol, bar_vol = None, None
for mount in mounts:
assert mount.instance().id == c.id
if mount.path == '/foo':
foo_mount = mount
foo_vol = mount.volume()
elif mount.path == '/bar':
bar_mount = mount
bar_vol = mount.volume()
foo_vol = wait_for_condition(
docker_client, foo_vol, lambda x: x.state == 'active')
assert foo_mount is not None
assert foo_mount.permissions == 'rw'
assert foo_vol is not None
assert not foo_vol.isHostPath
assert _(foo_vol).attachedState == 'inactive'
bar_vol = wait_for_condition(
docker_client, bar_vol, lambda x: x.state == 'active')
assert bar_mount is not None
assert bar_mount.permissions == 'rw'
assert bar_vol is not None
assert _(bar_vol).attachedState == 'inactive'
assert bar_vol.isHostPath
# We use 'in' instead of '==' because Docker uses the fully qualified
# non-linked path and it might look something like: /mnt/sda1/<path>
assert bar_host_path in bar_vol.uri
c2 = docker_client.create_container(name="volumes_from_test",
networkMode='bridge',
imageUuid=uuid,
startOnCreate=False,
dataVolumesFrom=[c.id])
c2 = docker_client.wait_success(c2)
assert len(c2.dataVolumesFrom) == 1
assert set(c2.dataVolumesFrom) == set([c.id])
c2 = super_client.wait_success(c2.start())
c2_mounts = c2.mounts_link()
assert len(c2_mounts) == 2
for mount in c2_mounts:
assert mount.instance().id == c2.id
if mount.path == '/foo':
assert mount.volumeId == foo_vol.id
elif mount.path == '/bar':
assert mount.volumeId == bar_vol.id
c = docker_client.wait_success(c.stop(remove=True, timeout=0))
c2 = docker_client.wait_success(c2.stop(remove=True, timeout=0))
_check_path(foo_vol, True, docker_client, super_client)
_check_path(bar_vol, True, docker_client, super_client)
@if_docker
def test_volumes_from_more_than_one_container(docker_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
dataVolumes=['/foo'])
docker_client.wait_success(c)
c2 = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
dataVolumes=['/bar'])
docker_client.wait_success(c2)
c3 = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
dataVolumesFrom=[c.id, c2.id])
c3 = docker_client.wait_success(c3)
mounts = c3.mounts_link()
assert len(mounts) == 2
paths = ['/foo', '/bar']
for m in mounts:
assert m.path in paths
@if_docker
def test_container_fields(docker_client, super_client):
caps = ["SYS_MODULE", "SYS_RAWIO", "SYS_PACCT", "SYS_ADMIN",
"SYS_NICE", "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG",
"MKNOD", "AUDIT_WRITE", "AUDIT_CONTROL", "MAC_OVERRIDE",
"MAC_ADMIN", "NET_ADMIN", "SYSLOG", "CHOWN", "NET_RAW",
"DAC_OVERRIDE", "FOWNER", "DAC_READ_SEARCH", "FSETID",
"KILL", "SETGID", "SETUID", "LINUX_IMMUTABLE",
"NET_BIND_SERVICE", "NET_BROADCAST", "IPC_LOCK",
"IPC_OWNER", "SYS_CHROOT", "SYS_PTRACE", "SYS_BOOT",
"LEASE", "SETFCAP", "WAKE_ALARM", "BLOCK_SUSPEND", "ALL"]
test_name = 'container_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
restart_policy = {"maximumRetryCount": 2, "name": "on-failure"}
c = docker_client.create_container(name=test_name + random_str(),
networkMode='bridge',
imageUuid=image_uuid,
capAdd=caps,
capDrop=caps,
dnsSearch=['8.8.8.8', '1.2.3.4'],
dns=['8.8.8.8', '1.2.3.4'],
privileged=True,
domainName="rancher.io",
memory=12000000,
memorySwap=16000000,
memoryReservation=4194304,
cpuSet="0,1",
stdinOpen=True,
tty=True,
command=["true"],
entryPoint=["/bin/sh", "-c"],
cpuShares=400,
restartPolicy=restart_policy,
devices="/dev/null:/dev/xnull:rw")
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
assert set(c.data['dockerInspect']['HostConfig']['CapAdd']) == set(caps)
assert set(c.data['dockerInspect']['HostConfig']['CapDrop']) == set(caps)
actual_dns = c.data['dockerInspect']['HostConfig']['Dns']
# TODO: when networking is back
# assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4', '169.254.169.250'])
assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4'])
actual_dns = c.data['dockerInspect']['HostConfig']['DnsSearch']
# TODO: when networking is back
# assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4', 'rancher.internal'])
assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4'])
assert c.data['dockerInspect']['HostConfig']['Privileged']
assert c.data['dockerInspect']['Config']['Domainname'] == "rancher.io"
assert c.data['dockerInspect']['HostConfig']['Memory'] == 12000000
assert c.data['dockerInspect']['HostConfig'][
'MemoryReservation'] == 4194304
# assert c.data['dockerInspect']['Config']['MemorySwap'] == 16000000
assert c.data['dockerInspect']['HostConfig']['CpusetCpus'] == "0,1"
assert c.data['dockerInspect']['Config']['Tty']
assert c.data['dockerInspect']['Config']['OpenStdin']
actual_entry_point = set(c.data['dockerInspect']['Config']['Entrypoint'])
assert actual_entry_point == set(["/bin/sh", "-c"])
assert c.data['dockerInspect']['HostConfig']['CpuShares'] == 400
act_restart_pol = c.data['dockerInspect']['HostConfig']['RestartPolicy']
assert act_restart_pol['MaximumRetryCount'] == 2
assert act_restart_pol['Name'] == "on-failure"
actual_devices = c.data['dockerInspect']['HostConfig']['Devices']
assert len(actual_devices) == 1
assert actual_devices[0]['CgroupPermissions'] == "rw"
assert actual_devices[0]['PathOnHost'] == "/dev/null"
assert actual_devices[0]['PathInContainer'] == "/dev/xnull"
@if_docker
def test_docker_newfields(docker_client, super_client):
test_name = 'container_field_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
privileged = True
blkioWeight = 100
cpuPeriod = 100000
cpuQuota = 50000
cpuSetMems = "0"
kernelMemory = 10000000
memory = 10000000
groupAdd = ['root']
memorySwappiness = 50
oomScoreAdj = 500
shmSize = 67108864
tmpfs = {"/run": "rw,noexec,nosuid,size=65536k"}
uts = "host"
ipcMode = "host"
stopSignal = "SIGTERM"
ulimits = [{"name": "cpu", "hard": 100000, "soft": 100000}]
c = docker_client.create_container(name=test_name,
imageUuid=image_uuid,
privileged=privileged,
blkioWeight=blkioWeight,
cpuPeriod=cpuPeriod,
cpuQuota=cpuQuota,
cpuSetMems=cpuSetMems,
kernelMemory=kernelMemory,
groupAdd=groupAdd,
memory=memory,
memorySwappiness=memorySwappiness,
oomScoreAdj=oomScoreAdj,
shmSize=shmSize,
tmpfs=tmpfs,
uts=uts,
ipcMode=ipcMode,
stopSignal=stopSignal,
networkMode='bridge',
ulimits=ulimits)
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
assert c.data['dockerInspect']['HostConfig']['BlkioWeight'] == 100
assert c.data['dockerInspect']['HostConfig']['CpuPeriod'] == 100000
assert c.data['dockerInspect']['HostConfig']['CpuQuota'] == 50000
assert c.data['dockerInspect']['HostConfig']['CpusetMems'] == "0"
assert c.data['dockerInspect']['HostConfig']['KernelMemory'] == 10000000
assert c.data['dockerInspect']['HostConfig']['Memory'] == 10000000
assert c.data['dockerInspect']['HostConfig']['MemorySwappiness'] == 50
assert c.data['dockerInspect']['HostConfig']['GroupAdd'] == ['root']
assert not c.data['dockerInspect']['HostConfig']['OomKillDisable']
assert c.data['dockerInspect']['HostConfig']['OomScoreAdj'] == 500
assert c.data['dockerInspect']['HostConfig']['ShmSize'] == 67108864
run_args = "rw,noexec,nosuid,size=65536k"
assert c.data['dockerInspect']['HostConfig']['Tmpfs'] == {"/run": run_args}
assert c.data['dockerInspect']['HostConfig']['UTSMode'] == 'host'
assert c.data['dockerInspect']['HostConfig']['IpcMode'] == 'host'
host_limits = {"Name": "cpu", "Hard": 100000, "Soft": 100000}
assert c.data['dockerInspect']['HostConfig']['Ulimits'] == [host_limits]
assert c.data['dockerInspect']['Config']['StopSignal'] == 'SIGTERM'
@if_docker_1_12
def test_docker_extra_newfields(docker_client, super_client):
test_name = 'container_field_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
sysctls = {"net.ipv4.ip_forward": "1"}
healthCmd = ["ls"]
healthInterval = 5
healthRetries = 3
healthTimeout = 60
c = docker_client.create_container(name=test_name,
imageUuid=image_uuid,
sysctls=sysctls,
healthCmd=healthCmd,
healthTimeout=healthTimeout,
healthRetries=healthRetries,
healthInterval=healthInterval)
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
host_sysctls = {"net.ipv4.ip_forward": "1"}
assert c.data['dockerInspect']['HostConfig']['Sysctls'] == host_sysctls
assert c.data['dockerInspect']['Config']['Healthcheck']['Test'] == ['ls']
h_interval = c.data['dockerInspect']['Config']['Healthcheck']['Interval']
assert h_interval == 5000000000
h_timeout = c.data['dockerInspect']['Config']['Healthcheck']['Timeout']
assert h_timeout == 60000000000
assert c.data['dockerInspect']['Config']['Healthcheck']['Retries'] == 3
@if_docker
def test_container_milli_cpu_reservation(docker_client, super_client):
test_name = 'container_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
c = docker_client.create_container(name=test_name,
imageUuid=image_uuid,
stdinOpen=True,
tty=True,
command=["true"],
entryPoint=["/bin/sh", "-c"],
networkMode='bridge',
milliCpuReservation=2000,
cpuShares=400)
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
# milliCpuReservation will take precedence over cpuShares and be converted
# to a value that is (milliCpuShares / 1000) * 1024
assert c.data['dockerInspect']['HostConfig']['CpuShares'] == 2048
def get_mounts(resource):
return [x for x in resource.mounts_link() if x.state != 'inactive']
def check_mounts(client, resource, count):
def wait_for_mount_count(res):
m = get_mounts(res)
return len(m) == count
wait_for_condition(client, resource, wait_for_mount_count)
mounts = get_mounts(resource)
return mounts
def volume_cleanup_setup(docker_client, uuid, strategy=None):
labels = {}
if strategy:
labels[VOLUME_CLEANUP_LABEL] = strategy
vol_name = random_str()
c = docker_client.create_container(name="volume_cleanup_test",
imageUuid=uuid,
networkMode='bridge',
dataVolumes=['/tmp/foo',
'%s:/foo' % vol_name],
labels=labels)
c = docker_client.wait_success(c)
if strategy:
assert c.labels[VOLUME_CLEANUP_LABEL] == strategy
mounts = check_mounts(docker_client, c, 2)
v1 = mounts[0].volume()
v2 = mounts[1].volume()
wait_for_condition(docker_client, v1, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v2, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
named_vol = v1 if v1.name == vol_name else v2
unnamed_vol = v1 if v1.name != vol_name else v2
c = docker_client.wait_success(c.stop(remove=True, timeout=0))
safe_purge(c, docker_client)
check_mounts(docker_client, c, 0)
return c, named_vol, unnamed_vol
@if_docker
def test_cleanup_volume_strategy(docker_client):
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID)
assert docker_client.wait_success(named_vol).state == 'detached'
assert docker_client.wait_success(unnamed_vol).removed is not None
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID,
strategy='unnamed')
assert docker_client.wait_success(named_vol).state == 'detached'
assert docker_client.wait_success(unnamed_vol).removed is not None
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID,
strategy='none')
assert docker_client.wait_success(named_vol).state == 'detached'
assert docker_client.wait_success(unnamed_vol).state == 'detached'
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID,
strategy='all')
assert docker_client.wait_success(named_vol).removed is not None
assert docker_client.wait_success(unnamed_vol).removed is not None
@if_docker
def test_docker_volume_long(docker_client):
a = 'a' * 200
v = '/tmp/{}:/tmp/{}'.format(a, a)
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
dataVolumes=[v],
command=['sleep', '42'])
c = docker_client.wait_success(c)
assert c.state == 'running'
vol = c.mounts_link()[0].volume()
vol = docker_client.wait_success(vol)
assert vol.state == 'active'
@if_docker
def test_docker_mount_life_cycle(docker_client):
# Using nginx because it has a baked in volume, which is a good test case
uuid = 'docker:nginx:1.9.0'
bind_mount_uuid = py_uuid.uuid4().hex
bar_host_path = '/tmp/bar%s' % bind_mount_uuid
bar_bind_mount = '%s:/bar' % bar_host_path
c = docker_client.create_container(imageUuid=uuid,
startOnCreate=False,
networkMode='bridge',
dataVolumes=['%s:/foo' % random_str(),
bar_bind_mount])
c = docker_client.wait_success(c)
c = docker_client.wait_success(c.start())
mounts = check_mounts(docker_client, c, 3)
v1 = mounts[0].volume()
v2 = mounts[1].volume()
v3 = mounts[2].volume()
wait_for_condition(docker_client, v1, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v2, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v3, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
c = docker_client.wait_success(c.stop(timeout=0))
assert c.state == 'stopped'
wait_for_condition(docker_client, v1, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v2, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v3, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
c = docker_client.wait_success(c.remove())
check_mounts(docker_client, c, 0)
# State can be either detached or removed depending on whether c got purged
assert docker_client.wait_success(v1).state != 'active'
assert docker_client.wait_success(v2).state != 'active'
assert docker_client.wait_success(v3).state != 'active'
@if_docker
def test_docker_labels(docker_client, super_client):
# 1.8 broke this behavior where labels would come from the images
# one day maybe they will bring it back.
# image_uuid = 'docker:ranchertest/labelled:v0.1.0'
image_uuid = TEST_IMAGE_UUID
c = docker_client.create_container(name="labels_test",
imageUuid=image_uuid,
networkMode='bridge',
labels={'io.rancher.testlabel.'
'fromapi': 'yes'})
c = docker_client.wait_success(c)
def labels_callback():
labels = c.instanceLabels()
if len(labels) >= 3:
return labels
return None
labels = wait_for(labels_callback)
actual_labels = {}
for l in labels:
actual_labels[l.key] = l.value
sc = super_client.reload(c)
mac_address = sc.nics()[0].macAddress
expected_labels = {
# 'io.rancher.testlabel': 'value1',
# 'io.rancher.testlabel.space': 'value 1',
'io.rancher.testlabel.fromapi': 'yes',
'io.rancher.container.uuid': c.uuid,
'io.rancher.container.name': c.name,
'io.rancher.container.mac_address': mac_address,
}
assert actual_labels == expected_labels
docker_client.delete(c)
@if_docker
def test_container_odd_fields(super_client, docker_client):
c = docker_client.create_container(pidMode=None,
imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
logConfig={
'driver': None,
'config': None,
})
c = docker_client.wait_success(c)
assert c.state == 'running'
assert c.pidMode is None
assert c.logConfig == {'type': 'logConfig', 'driver': None, 'config': None}
c = super_client.reload(c)
assert c.data.dockerInspect.HostConfig.LogConfig['Type'] == 'json-file'
assert not c.data.dockerInspect.HostConfig.LogConfig['Config']
@if_docker
def test_container_bad_build(super_client, docker_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
build={
'context': None,
'remote': None
})
c = docker_client.wait_success(c)
assert c.state == 'running'
assert c.pidMode is None
assert c.build == {'context': None, 'remote': None, 'type': 'dockerBuild'}
c = super_client.reload(c)
assert c.data.dockerInspect.Config.Image == TEST_IMAGE
@if_docker
def test_service_link_emu_docker_link(super_client, docker_client):
env_name = random_str()
env = docker_client.create_stack(name=env_name)
env = docker_client.wait_success(env)
assert env.state == "active"
server = docker_client.create_service(name='server', launchConfig={
'networkMode': 'bridge',
'imageUuid': TEST_IMAGE_UUID
}, stackId=env.id)
service = docker_client.create_service(name='client', launchConfig={
'networkMode': 'bridge',
'imageUuid': TEST_IMAGE_UUID
}, stackId=env.id)
service_link = {"serviceId": server.id, "name": "other"}
service.setservicelinks(serviceLinks=[service_link])
server = docker_client.wait_success(server)
service = docker_client.wait_success(service)
server = docker_client.wait_success(server.activate())
assert server.state == 'active'
service = docker_client.wait_success(service.activate())
assert service.state == 'active'
instance = find_one(service.instances)
instance = super_client.reload(instance)
link = find_one(instance.instanceLinks)
target_instance = find_one(server.instances)
assert link.targetInstanceId == target_instance.id
assert link.instanceNames == ['{}-server-1'.format(env_name)]
docker_client.delete(env)
@if_docker
def test_service_links_with_no_ports(docker_client):
env = docker_client.create_stack(name=random_str())
env = docker_client.wait_success(env)
assert env.state == "active"
server = docker_client.create_service(name='server', launchConfig={
'imageUuid': TEST_IMAGE_UUID,
'networkMode': 'bridge',
'stdinOpen': True,
'tty': True,
}, stackId=env.id)
server = docker_client.wait_success(server)
assert server.state == 'inactive'
service = docker_client.create_service(name='client', launchConfig={
'imageUuid': TEST_IMAGE_UUID,
'networkMode': 'bridge',
'stdinOpen': True,
'tty': True,
}, stackId=env.id)
service = docker_client.wait_success(service)
assert service.state == 'inactive'
service_link = {"serviceId": server.id, "name": "bb"}
service.setservicelinks(serviceLinks=[service_link])
server = docker_client.wait_success(server.activate())
assert server.state == 'active'
service = docker_client.wait_success(service.activate())
assert service.state == 'active'
@if_docker
def test_blkio_device_options(super_client, docker_client):
dev_opts = {
'/dev/sda': {
'readIops': 1000,
'writeIops': 2000,
},
'/dev/null': {
'readBps': 3000,
}
}
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode=None,
blkioDeviceOptions=dev_opts)
c = docker_client.wait_success(c)
assert c.state == 'running'
super_c = super_client.reload(c)
hc = super_c.data.dockerInspect['HostConfig']
assert hc['BlkioDeviceReadIOps'] == [{'Path': '/dev/sda', 'Rate': 1000}]
assert hc['BlkioDeviceWriteIOps'] == [{'Path': '/dev/sda', 'Rate': 2000}]
assert hc['BlkioDeviceReadBps'] == [{'Path': '/dev/null', 'Rate': 3000}]
@if_resource_scheduler
def test_port_constraint(docker_client):
# Tests with the above label can only be ran when the external scheduler is
# is enabled. It isn't in CI, so we need to disable these tests by default
# They can (and should) be run locally if working on the scheduler
containers = []
try:
c = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9998:81/tcp']))
containers.append(c)
# try to deploy another container with same public port + protocol
c2 = docker_client.wait_transitioning(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9998:81/tcp']))
assert c2.transitioning == 'error'
assert '9998:81/tcp' in c2.transitioningMessage
assert c2.state == 'error'
containers.append(c2)
# try different public port
c3 = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9999:81/tcp']))
containers.append(c3)
# try different protocol
c4 = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9999:81/udp']))
containers.append(c4)
# UDP is now taken
c5 = docker_client.wait_transitioning(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9999:81/udp']))
assert c5.transitioning == 'error'
assert '9999:81/udp' in c5.transitioningMessage
assert c5.state == 'error'
containers.append(c5)
# try different bind IP
c6 = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.1:9997:81/tcp']))
containers.append(c6)
# Bind IP is now taken
c7 = docker_client.wait_transitioning(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.1:9997:81/tcp']))
assert c7.transitioning == 'error'
assert '127.2.2.1:9997:81/tcp' in c7.transitioningMessage
assert c7.state == 'error'
containers.append(c7)
finally:
for c in containers:
if c is not None:
c = docker_client.wait_success(docker_client.delete(c))
c.purge()
@if_resource_scheduler
def test_conflicting_ports_in_deployment_unit(docker_client):
env = docker_client.create_stack(name=random_str())
env = docker_client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": TEST_IMAGE_UUID, "ports": ['7777:6666']}
secondary_lc = {"imageUuid": TEST_IMAGE_UUID,
"name": "secondary", "ports": ['7777:6666']}
svc = docker_client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
svc = docker_client.wait_success(svc)
assert svc.state == "inactive"
svc = svc.activate()
c = _wait_for_compose_instance_error(docker_client, svc, env)
assert '7777:6666/tcp' in c.transitioningMessage
env.remove()
@if_resource_scheduler
def test_simultaneous_port_allocation(docker_client):
# This test ensures if two containers are allocated simultaneously, only
# one will get the port and the other will fail to allocate.
# By nature, this test is exercise a race condition, so it isn't perfect.
env = docker_client.create_stack(name=random_str())
env = docker_client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"ports": ['5555:6666']}
svc = docker_client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=2)
svc = docker_client.wait_success(svc)
assert svc.state == "inactive"
svc = svc.activate()
c = _wait_for_compose_instance_error(docker_client, svc, env)
assert '5555:6666/tcp' in c.transitioningMessage
@if_resource_scheduler
def test_docker_bind_address(docker_client, super_client):
c = docker_client.create_container(name='bindAddrTest',
networkMode='bridge',
imageUuid=TEST_IMAGE_UUID,
ports=['127.0.0.1:89:8999'])
c = docker_client.wait_success(c)
assert c.state == 'running'
c = super_client.reload(c)
bindings = c.data['dockerInspect']['HostConfig']['PortBindings']
assert bindings['8999/tcp'] == [{'HostIp': '127.0.0.1', 'HostPort': '89'}]
c = docker_client.create_container(name='bindAddrTest2',
networkMode='bridge',
imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.2:89:8999'])
c = docker_client.wait_success(c)
assert c.state == 'running'
c = super_client.reload(c)
bindings = c.data['dockerInspect']['HostConfig']['PortBindings']
assert bindings['8999/tcp'] == [{'HostIp': '127.2.2.2', 'HostPort': '89'}]
c = docker_client.create_container(name='bindAddrTest3',
networkMode='bridge',
imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.2:89:8999'])
c = docker_client.wait_transitioning(c)
assert c.transitioning == 'error'
assert '127.2.2.2:89:8999' in c.transitioningMessage
assert c.state == 'error'
def _wait_for_compose_instance_error(client, service, env):
name = env.name + "-" + service.name + "%"
def check():
containers = client.list_container(name_like=name, state='error')
if len(containers) > 0:
return containers[0]
container = wait_for(check)
return container
def _check_path(volume, should_exist, client, super_client):
path = _path_to_volume(volume)
print 'Checking path [%s] for volume [%s].' % (path, volume)
c = client. \
create_container(name="volume_check" + random_str(),
imageUuid="docker:ranchertest/volume-test:v0.1.0",
networkMode=None,
environment={'TEST_PATH': path},
command='/opt/tools/check_path_exists.sh',
dataVolumes=[
'/var/lib/docker:/host/var/lib/docker',
'/tmp:/host/tmp'])
c = super_client.wait_success(c)
assert c.state == 'running'
c = super_client.wait_success(c.stop())
assert c.state == 'stopped'
code = c.data.dockerInspect.State.ExitCode
if should_exist:
# The exit code of the container should be a 10 if the path existed
assert code == 10
else:
# And 11 if the path did not exist
assert code == 11
c.remove()
def _path_to_volume(volume):
path = volume.uri.replace('file://', '')
mounted_path = re.sub('^.*?/var/lib/docker', '/host/var/lib/docker',
path)
if not mounted_path.startswith('/host/var/lib/docker'):
mounted_path = re.sub('^.*?/tmp', '/host/tmp',
path)
return mounted_path
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import shutil
from hashlib import sha1
from pants.build_graph.build_graph import sort_targets
from pants.build_graph.target import Target
from pants.invalidation.build_invalidator import CacheKey
from pants.util.dirutil import relative_symlink, safe_delete, safe_mkdir, safe_rmtree
from pants.util.memo import memoized_method
class VersionedTargetSet:
"""Represents a list of targets, a corresponding CacheKey, and a flag determining whether the
list of targets is currently valid.
When invalidating a single target, this can be used to represent that target as a singleton.
When checking the artifact cache, this can also be used to represent a list of targets that are
built together into a single artifact.
"""
class IllegalResultsDir(Exception):
"""Indicate a problem interacting with a versioned target results directory."""
@staticmethod
def from_versioned_targets(versioned_targets):
"""
:API: public
"""
first_target = versioned_targets[0]
cache_manager = first_target._cache_manager
# Quick sanity check; all the versioned targets should have the same cache manager.
# TODO(ryan): the way VersionedTargets store their own links to a single CacheManager instance
# feels hacky; see if there's a cleaner way for callers to handle awareness of the CacheManager.
for versioned_target in versioned_targets:
if versioned_target._cache_manager != cache_manager:
raise ValueError(
"Attempting to combine versioned targets {} and {} with different"
" CacheManager instances: {} and {}".format(
first_target,
versioned_target,
cache_manager,
versioned_target._cache_manager,
)
)
return VersionedTargetSet(cache_manager, versioned_targets)
def __init__(self, cache_manager, versioned_targets):
self._cache_manager = cache_manager
self.versioned_targets = versioned_targets
self.targets = [vt.target for vt in versioned_targets]
# The following line is a no-op if cache_key was set in the VersionedTarget __init__ method.
self.cache_key = CacheKey.combine_cache_keys([vt.cache_key for vt in versioned_targets])
# NB: previous_cache_key may be None on the first build of a target.
self.previous_cache_key = cache_manager.previous_key(self.cache_key)
self.valid = self.previous_cache_key == self.cache_key
if cache_manager.invalidation_report:
cache_manager.invalidation_report.add_vts(
cache_manager.task_name, self.targets, self.cache_key, self.valid, phase="init"
)
self._results_dir = None
self._current_results_dir = None
self._previous_results_dir = None
# True if the results_dir for this VT was created incrementally via clone of the
# previous results_dir.
self.is_incremental = False
@property
def cacheable(self):
"""Indicates whether artifacts associated with this target set should be cached.
:return: `True` if this target set's associated artifacts can be cached.
:rtype: bool
"""
return self._cache_manager.cacheable(self.cache_key)
def update(self):
self._cache_manager.update(self)
def force_invalidate(self):
# Note: This method isn't exposted as Public because the api is not yet
# finalized, however it is currently used by Square for plugins. There is
# an open OSS issue to finalize this API. Please take care when changing
# until https://github.com/pantsbuild/pants/issues/2532 is resolved.
self._cache_manager.force_invalidate(self)
@property
def has_results_dir(self):
return self._results_dir is not None
@property
def has_previous_results_dir(self):
return self._previous_results_dir is not None and os.path.isdir(self._previous_results_dir)
@property
def results_dir(self):
"""The directory that stores results for these targets.
The results_dir is represented by a stable symlink to the current_results_dir: consumers
should generally prefer to access the stable directory.
"""
if self._results_dir is None:
raise ValueError("No results_dir was created for {}".format(self))
return self._results_dir
@property
def current_results_dir(self):
"""A unique directory that stores results for this version of these targets."""
if self._current_results_dir is None:
raise ValueError("No results_dir was created for {}".format(self))
return self._current_results_dir
@property
def previous_results_dir(self):
"""The directory that stores results for the previous version of these targets.
Only valid if is_incremental is true.
TODO: Exposing old results is a bit of an abstraction leak, because ill-behaved Tasks could
mutate them.
"""
if not self.has_previous_results_dir:
raise ValueError("There is no previous_results_dir for: {}".format(self))
return self._previous_results_dir
def ensure_legal(self):
"""Return True as long as the state does not break any internal contracts."""
# Do our best to provide complete feedback, it's easy to imagine the frustration of flipping between error states.
if self._results_dir:
errors = ""
if not os.path.islink(self._results_dir):
errors += "\nThe results_dir is no longer a symlink:\n\t* {}".format(
self._results_dir
)
if not os.path.isdir(self._current_results_dir):
errors += "\nThe current_results_dir directory was not found\n\t* {}".format(
self._current_results_dir
)
if errors:
raise self.IllegalResultsDir(
"\nThe results_dirs state should not be manually cleaned or recreated by tasks.\n{}".format(
errors
)
)
return True
def live_dirs(self):
"""Yields directories that must exist for this VersionedTarget to function."""
# The only caller of this function is the workdir cleaning pipeline. It is not clear that the previous_results_dir
# should be returned for that purpose. And, by the time this is called, the contents have already been copied.
if self.has_results_dir:
yield self.results_dir
yield self.current_results_dir
if self.has_previous_results_dir:
yield self.previous_results_dir
@memoized_method
def _target_to_vt(self):
return {vt.target: vt for vt in self.versioned_targets}
def __repr__(self):
return "VTS({}, {})".format(
",".join(target.address.spec for target in self.targets),
"valid" if self.valid else "invalid",
)
class VersionedTarget(VersionedTargetSet):
"""This class represents a singleton VersionedTargetSet.
:API: public
"""
def __init__(self, cache_manager, target, cache_key):
"""
:API: public
"""
if not isinstance(target, Target):
raise ValueError(
"The target {} must be an instance of Target but is not.".format(target.id)
)
self.target = target
self.cache_key = cache_key
# Must come after the assignments above, as they are used in the parent's __init__.
super().__init__(cache_manager, [self])
self.id = target.id
@property
def cacheable(self):
"""Indicates whether artifacts associated with this target should be cached.
:return: `True` if this target's associated artifacts can be cached.
:rtype: bool
"""
return super().cacheable and not self.target.no_cache
def create_results_dir(self):
"""Ensure that the empty results directory and a stable symlink exist for these versioned
targets."""
self._current_results_dir = self._cache_manager._results_dir_path(
self.cache_key, stable=False
)
self._results_dir = self._cache_manager._results_dir_path(self.cache_key, stable=True)
if not self.valid:
# Clean the workspace for invalid vts.
safe_mkdir(self._current_results_dir, clean=True)
relative_symlink(self._current_results_dir, self._results_dir)
self.ensure_legal()
def copy_previous_results(self):
"""Use the latest valid results_dir as the starting contents of the current results_dir.
Should be called after the cache is checked, since previous_results are not useful if there
is a cached artifact.
"""
# TODO(mateo): This should probably be managed by the task, which manages the rest of the
# incremental support.
if not self.previous_cache_key:
return None
previous_path = self._cache_manager._results_dir_path(self.previous_cache_key, stable=False)
if os.path.isdir(previous_path):
self.is_incremental = True
safe_rmtree(self._current_results_dir)
shutil.copytree(previous_path, self._current_results_dir)
safe_mkdir(self._current_results_dir)
relative_symlink(self._current_results_dir, self.results_dir)
# Set the self._previous last, so that it is only True after the copy completed.
self._previous_results_dir = previous_path
def __repr__(self):
return "VT({}, {})".format(self.target.id, "valid" if self.valid else "invalid")
class InvalidationCheck:
"""The result of calling check() on a CacheManager.
Each member is a list of VersionedTargetSet objects. Sorting of the targets depends
on how you order the InvalidationCheck from the InvalidationCacheManager.
Tasks may need to perform no, some or all operations on either of these, depending on how they
are implemented.
"""
def __init__(self, all_vts, invalid_vts):
"""
:API: public
"""
# All the targets, valid and invalid.
self.all_vts = all_vts
# Just the invalid targets.
self.invalid_vts = invalid_vts
class InvalidationCacheManager:
"""Manages cache checks, updates and invalidation keeping track of basic change and invalidation
statistics.
Note that this is distinct from the ArtifactCache concept, and should probably be renamed.
"""
class CacheValidationError(Exception):
"""Indicates a problem accessing the cache."""
_STABLE_DIR_NAME = "current"
def __init__(
self,
results_dir_root,
cache_key_generator,
build_invalidator,
invalidate_dependents,
fingerprint_strategy=None,
invalidation_report=None,
task_name=None,
task_version_slug=None,
artifact_write_callback=lambda _: None,
):
"""
:API: public
"""
self._cache_key_generator = cache_key_generator
self._task_name = task_name or "UNKNOWN"
self._invalidate_dependents = invalidate_dependents
self._invalidator = build_invalidator
self._fingerprint_strategy = fingerprint_strategy
self._artifact_write_callback = artifact_write_callback
self.invalidation_report = invalidation_report
# Create the task-versioned prefix of the results dir, and a stable symlink to it
# (useful when debugging).
self._results_dir_prefix = os.path.join(results_dir_root, task_version_slug)
safe_mkdir(self._results_dir_prefix)
stable_prefix = os.path.join(results_dir_root, self._STABLE_DIR_NAME)
safe_delete(stable_prefix)
relative_symlink(self._results_dir_prefix, stable_prefix)
def update(self, vts):
"""Mark a changed or invalidated VersionedTargetSet as successfully processed."""
for vt in vts.versioned_targets:
vt.ensure_legal()
if not vt.valid:
self._invalidator.update(vt.cache_key)
vt.valid = True
self._artifact_write_callback(vt)
if not vts.valid:
vts.ensure_legal()
self._invalidator.update(vts.cache_key)
vts.valid = True
self._artifact_write_callback(vts)
def force_invalidate(self, vts):
"""Force invalidation of a VersionedTargetSet."""
for vt in vts.versioned_targets:
self._invalidator.force_invalidate(vt.cache_key)
vt.valid = False
self._invalidator.force_invalidate(vts.cache_key)
vts.valid = False
def check(self, targets, topological_order=False):
"""Checks whether each of the targets has changed and invalidates it if so.
Returns a list of VersionedTargetSet objects (either valid or invalid). The returned sets
'cover' the input targets, with one caveat: if the FingerprintStrategy
opted out of fingerprinting a target because it doesn't contribute to invalidation, then that
target will be excluded from all_vts and invalid_vts.
Callers can inspect these vts and rebuild the invalid ones, for example.
"""
all_vts = self.wrap_targets(targets, topological_order=topological_order)
invalid_vts = [vt for vt in all_vts if not vt.valid]
return InvalidationCheck(all_vts, invalid_vts)
@property
def task_name(self):
return self._task_name
def _results_dir_path(self, key, stable):
"""Return a results directory path for the given key.
:param key: A CacheKey to generate an id for.
:param stable: True to use a stable subdirectory, false to use a portion of the cache key to
generate a path unique to the key.
"""
# TODO: Shorten cache_key hashes in general?
return os.path.join(
self._results_dir_prefix,
key.id,
self._STABLE_DIR_NAME if stable else sha1(key.hash.encode()).hexdigest()[:12],
)
def wrap_targets(self, targets, topological_order=False):
"""Wrap targets and their computed cache keys in VersionedTargets.
If the FingerprintStrategy opted out of providing a fingerprint for a target, that target will not
have an associated VersionedTarget returned.
Returns a list of VersionedTargets, each representing one input target.
"""
def vt_iter():
if topological_order:
target_set = set(targets)
sorted_targets = [t for t in reversed(sort_targets(targets)) if t in target_set]
else:
sorted_targets = sorted(targets)
for target in sorted_targets:
target_key = self._key_for(target)
if target_key is not None:
yield VersionedTarget(self, target, target_key)
return list(vt_iter())
def cacheable(self, cache_key):
"""Indicates whether artifacts associated with the given `cache_key` should be cached.
:return: `True` if the `cache_key` represents a cacheable set of target artifacts.
:rtype: bool
"""
return self._invalidator.cacheable(cache_key)
def previous_key(self, cache_key):
return self._invalidator.previous_key(cache_key)
def _key_for(self, target):
try:
return self._cache_key_generator.key_for_target(
target,
transitive=self._invalidate_dependents,
fingerprint_strategy=self._fingerprint_strategy,
)
except Exception as e:
# This is a catch-all for problems we haven't caught up with and given a better diagnostic.
# TODO(Eric Ayers): If you see this exception, add a fix to catch the problem earlier.
new_exception = self.CacheValidationError(
"Problem validating target {} in {}: {}".format(
target.id, target.address.spec_path, e
)
)
raise self.CacheValidationError(new_exception) from e
|
|
import sys
from django.conf import settings
from django.db.backends.base.creation import BaseDatabaseCreation
from django.db.utils import DatabaseError
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
TEST_DATABASE_PREFIX = 'test_'
class DatabaseCreation(BaseDatabaseCreation):
@cached_property
def _maindb_connection(self):
"""
This is analogous to other backends' `_nodb_connection` property,
which allows access to an "administrative" connection which can
be used to manage the test databases.
For Oracle, the only connection that can be used for that purpose
is the main (non-test) connection.
"""
settings_dict = settings.DATABASES[self.connection.alias]
user = settings_dict.get('SAVED_USER') or settings_dict['USER']
password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD']
settings_dict = settings_dict.copy()
settings_dict.update(USER=user, PASSWORD=password)
DatabaseWrapper = type(self.connection)
return DatabaseWrapper(settings_dict, alias=self.connection.alias)
def _create_test_db(self, verbosity=1, autoclobber=False, keepdb=False):
parameters = self._get_test_db_params()
cursor = self._maindb_connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity, keepdb)
except Exception as e:
if 'ORA-01543' not in str(e):
# All errors except "tablespace already exists" cancel tests
sys.stderr.write("Got an error creating the test database: %s\n" % e)
sys.exit(2)
if not autoclobber:
confirm = input(
"It appears the test database, %s, already exists. "
"Type 'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
if verbosity >= 1:
print("Destroying old test database for alias '%s'..." % self.connection.alias)
try:
self._execute_test_db_destruction(cursor, parameters, verbosity)
except DatabaseError as e:
if 'ORA-29857' in str(e):
self._handle_objects_preventing_db_destruction(cursor, parameters,
verbosity, autoclobber)
else:
# Ran into a database error that isn't about leftover objects in the tablespace
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
except Exception as e:
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
try:
self._execute_test_db_creation(cursor, parameters, verbosity, keepdb)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity, keepdb)
except Exception as e:
if 'ORA-01920' not in str(e):
# All errors except "user already exists" cancel tests
sys.stderr.write("Got an error creating the test user: %s\n" % e)
sys.exit(2)
if not autoclobber:
confirm = input(
"It appears the test user, %s, already exists. Type "
"'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity, keepdb)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self._maindb_connection.close() # done with main user -- test user and tablespaces created
self._switch_to_test_user(parameters)
return self.connection.settings_dict['NAME']
def _switch_to_test_user(self, parameters):
"""
Oracle doesn't have the concept of separate databases under the same user.
Thus, we use a separate user (see _create_test_db). This method is used
to switch to that user. We will need the main user again for clean-up when
we end testing, so we keep its credentials in SAVED_USER/SAVED_PASSWORD
entries in the settings dict.
"""
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \
self.connection.settings_dict['USER']
real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = \
self.connection.settings_dict['PASSWORD']
real_test_settings = real_settings['TEST']
test_settings = self.connection.settings_dict['TEST']
real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = \
self.connection.settings_dict['USER'] = parameters['user']
real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password']
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary database
whose settings are given
"""
self.connection.settings_dict['USER'] = primary_settings_dict['USER']
self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD']
def _handle_objects_preventing_db_destruction(self, cursor, parameters, verbosity, autoclobber):
# There are objects in the test tablespace which prevent dropping it
# The easy fix is to drop the test user -- but are we allowed to do so?
print("There are objects in the old test database which prevent its destruction.")
print("If they belong to the test user, deleting the user will allow the test "
"database to be recreated.")
print("Otherwise, you will need to find and remove each of these objects, "
"or use a different tablespace.\n")
if self._test_user_create():
if not autoclobber:
confirm = input("Type 'yes' to delete user %s: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test user: %s\n" % e)
sys.exit(2)
try:
if verbosity >= 1:
print("Destroying old test database for alias '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
else:
print("Django is configured to use pre-existing test user '%s',"
" and will not attempt to delete it.\n" % parameters['user'])
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
self.connection.close()
parameters = self._get_test_db_params()
cursor = self._maindb_connection.cursor()
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._maindb_connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity, keepdb=False):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['user'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(datafile)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize)s
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(datafile_tmp)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize_tmp)s
""",
]
# Ignore "tablespace already exists" error when keepdb is on.
acceptable_ora_err = 'ORA-01543' if keepdb else None
self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err)
def _create_test_user(self, cursor, parameters, verbosity, keepdb=False):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY "%(password)s"
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CREATE SESSION,
CREATE TABLE,
CREATE SEQUENCE,
CREATE PROCEDURE,
CREATE TRIGGER
TO %(user)s""",
]
# Ignore "user already exists" error when keepdb is on
acceptable_ora_err = 'ORA-01920' if keepdb else None
success = self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err)
# If the password was randomly generated, change the user accordingly.
if not success and self._test_settings_get('PASSWORD') is None:
set_password = 'ALTER USER %(user)s IDENTIFIED BY "%(password)s"'
self._execute_statements(cursor, [set_password], parameters, verbosity)
# Most test-suites can be run without the create-view privilege. But some need it.
extra = "GRANT CREATE VIEW TO %(user)s"
success = self._execute_allow_fail_statements(cursor, [extra], parameters, verbosity, 'ORA-01031')
if not success and verbosity >= 2:
print("Failed to grant CREATE VIEW permission to test user. This may be ok.")
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['user'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity, allow_quiet_fail=False):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
if (not allow_quiet_fail) or verbosity >= 2:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _execute_allow_fail_statements(self, cursor, statements, parameters, verbosity, acceptable_ora_err):
"""
Execute statements which are allowed to fail silently if the Oracle
error code given by `acceptable_ora_err` is raised. Return True if the
statements execute without an exception, or False otherwise.
"""
try:
# Statement can fail when acceptable_ora_err is not None
allow_quiet_fail = acceptable_ora_err is not None and len(acceptable_ora_err) > 0
self._execute_statements(cursor, statements, parameters, verbosity, allow_quiet_fail=allow_quiet_fail)
return True
except DatabaseError as err:
description = str(err)
if acceptable_ora_err is None or acceptable_ora_err not in description:
raise
return False
def _get_test_db_params(self):
return {
'dbname': self._test_database_name(),
'user': self._test_database_user(),
'password': self._test_database_passwd(),
'tblspace': self._test_database_tblspace(),
'tblspace_temp': self._test_database_tblspace_tmp(),
'datafile': self._test_database_tblspace_datafile(),
'datafile_tmp': self._test_database_tblspace_tmp_datafile(),
'maxsize': self._test_database_tblspace_size(),
'maxsize_tmp': self._test_database_tblspace_tmp_size(),
}
def _test_settings_get(self, key, default=None, prefixed=None):
"""
Return a value from the test settings dict,
or a given default,
or a prefixed entry from the main settings dict
"""
settings_dict = self.connection.settings_dict
val = settings_dict['TEST'].get(key, default)
if val is None and prefixed:
val = TEST_DATABASE_PREFIX + settings_dict[prefixed]
return val
def _test_database_name(self):
return self._test_settings_get('NAME', prefixed='NAME')
def _test_database_create(self):
return self._test_settings_get('CREATE_DB', default=True)
def _test_user_create(self):
return self._test_settings_get('CREATE_USER', default=True)
def _test_database_user(self):
return self._test_settings_get('USER', prefixed='USER')
def _test_database_passwd(self):
password = self._test_settings_get('PASSWORD')
if password is None and self._test_user_create():
# Oracle passwords are limited to 30 chars and can't contain symbols.
password = get_random_string(length=30)
return password
def _test_database_tblspace(self):
return self._test_settings_get('TBLSPACE', prefixed='USER')
def _test_database_tblspace_tmp(self):
settings_dict = self.connection.settings_dict
return settings_dict['TEST'].get('TBLSPACE_TMP',
TEST_DATABASE_PREFIX + settings_dict['USER'] + '_temp')
def _test_database_tblspace_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace()
return self._test_settings_get('DATAFILE', default=tblspace)
def _test_database_tblspace_tmp_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace_tmp()
return self._test_settings_get('DATAFILE_TMP', default=tblspace)
def _test_database_tblspace_size(self):
return self._test_settings_get('DATAFILE_MAXSIZE', default='500M')
def _test_database_tblspace_tmp_size(self):
return self._test_settings_get('DATAFILE_TMP_MAXSIZE', default='500M')
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
|
|
import os
import yaml
import traceback
import logging
import socket
from coder import encode
from functools import wraps
from importlib import import_module
from time import sleep
RUN_NAME = 'light_on'
START_NAME = 'start'
STOP_NAME = 'stop'
logger = logging.getLogger(__name__)
def iter_func(attr):
"""
Create a wrapper that executes the wrapped function for every element
in getattr(self, attr).
"""
def wrapper(func):
@wraps(func)
def inner(self, *args, **kwargs):
for mod in getattr(self, attr):
func(self, mod, *args, **kwargs)
return inner
return wrapper
class Client(object):
def __init__(self, config):
self.config = config
self.clear_modules()
for name in self.config['modules']:
self.register_module(name)
def clear_modules(self):
"""Clear all registered modules"""
self.modules_with_start = []
self.modules_with_stop = []
self.modules = []
def register_module(self, module_name):
"""
Register a module.
A module must define a `light_on` function.
A module may define a `stop` function. This will be called when
shutting down, after `light_on` is called for the last time.
A module may define a `start` function only when it also defines a
`stop` function. This will be called when initialising, before
`light_on` is called for the first time.
"""
try:
module = import_module(module_name)
except ImportError:
logger.error(traceback.format_exc())
else:
# Check whether the module is usable
if not hasattr(module, RUN_NAME):
raise ImportError(
"{} must have a {} function".format(
module.__name__,
RUN_NAME
)
)
self.modules.append(module)
# Check whether the module has a `stop` function
if hasattr(module, STOP_NAME):
self.modules_with_stop.append(module)
# Check whether the module has a `start` function
# and verify it can be stopped.
if hasattr(module, START_NAME):
if not self.modules_with_stop[-1] == module:
raise ImportError(
"A module with a {} function must also have a {}"
" function ({})".format(
START_NAME,
STOP_NAME,
module.__name__
)
)
self.modules_with_start.append(module)
def run(self):
"""
Keep checking the value of the lights until KeyboardInterrupt
"""
try:
self.start_modules()
while True:
self.lights_on()
sleep(self.config['sleep_seconds'])
except KeyboardInterrupt:
logger.info("Bye!")
except:
logger.error("THERE WAS AN ERROR")
logger.error(traceback.format_exc())
finally:
self.stop_modules()
self.send_msg((0, 0), (32, 16), (0, 0, 0))
@iter_func('modules_with_start')
def start_modules(self, mod):
"""
Run the `start` functions of the modules that have it
"""
logger.debug("Starting: " + mod.__name__)
getattr(mod, START_NAME)(self.config)
@iter_func('modules_with_stop')
def stop_modules(self, mod):
"""
Run the `stop` functions of the modules that have it
"""
# Accept errors at stopping, because an erroneous module should not
# stop other modules from stopping
logger.info("Stopping: " + mod.__name__)
try:
getattr(mod, STOP_NAME)(self.config)
logger.info("Stopped {}".format(mod.__name__))
except:
logger.error(traceback.format_exc())
logger.warning("{} wasn't stopped!".format(mod.__name__))
@iter_func('modules')
def lights_on(self, mod):
lights = mod.light_on(self.config)
logger.debug('{}: {}'.format(mod.__name__, lights))
self.send_lights(mod.__name__, lights)
def send_lights(self, module_name, light_values):
"""
Send the value of some lights
"""
# get the signal
module_config = self.config['modules'][module_name]
if len(light_values) == 3:
pos = module_config['pos']
size = module_config['size']
self.send_msg(pos, size, light_values)
# send the signal
elif len(light_values) % 3 == 0:
for i in range(0, len(light_values), 3):
pos = module_config[i // 3]['pos']
size = module_config[i // 3]['size']
one_light = light_values[i:(i+3)]
logger.debug(
"Sending: pos: {}, size: {}, values: {}".format(
pos,
size,
one_light
))
self.send_msg(pos, size, one_light)
else:
logger.error(
"The tuple returned by {} does not have a length of a multiple"
" of 3".format(module_name)
)
def send_msg(self, pos, size, light_values, timeout=10):
signal = encode(*pos, *size, *light_values)
# print(repr(self.config['testing']))
if self.config['testing']:
logger.debug("Sending: {:0>32b}".format(signal))
return
ip = self.config['server_ip']
port = self.config['server_port']
# AF_INET: IPv4
# SOCK_STREAM: TCP
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect((ip, port))
message = signal.to_bytes(4, byteorder='big')
s.send(message)
# Receive a message (as part of TCP?) (and discard it)
s.recv(1000)
s.close()
if __name__ == '__main__':
from argparse import ArgumentParser, FileType
parser = ArgumentParser()
parser.add_argument(
'-c',
'--config_file',
type=FileType('r'),
help="Provide a configuration file that will be used to load in "
"settings for the lights. If omitted, it will use face detection"
)
parser.add_argument(
'-l',
'--log_level',
default='INFO'
)
parser.add_argument(
'--log_file',
default='log.txt',
)
args = parser.parse_args()
# logging.basicConfig(level=args.log_level)
# get the root logger and set its level to DEBUG
root = logging.getLogger()
root.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(args.log_level)
# add formatter to ch
ch.setFormatter(logging.Formatter(
'%(levelname)s: %(name)s: %(message)s'
))
# Create file handler
fh = logging.FileHandler(args.log_file)
fh.setFormatter(logging.Formatter(
'%(asctime)s: %(levelname)s: %(name)s: %(message)s'
))
# add ch to root logger
root.addHandler(fh)
root.addHandler(ch)
if args.config_file is None:
# Use OpenFace to choose a configuration file
from modules.face_recognizer import FaceDetector as fr
argument_dict = {
'captureDevice': 0, 'height': 240, 'cuda': False, 'width': 320,
'threshold': 0.5, 'imgDim': 96,
'classifierModel': 'features/classifier.pkl',
'networkModel': '/home/m0re/projects/openface/models/openface/nn4.small2.v1.t7',
'verbose': False, 'dlibFacePredictor': '/home/m0re/projects/openface/models/dlib/shape_predictor_68_face_landmarks.dat'}
detec = fr.FaceDetector(argument_dict)
person = detec.recognize_person(
0, 320, 240, 'modules/face_recognizer/features/classifier.pkl',
0.7)
config_name = "{}.yml".format(person)
if person != "" and os.path.exists(config_name):
logger.info("{} logged in!".format(person))
with open(config_name) as f:
config = yaml.load(f)
else:
logger.info("Failed to log in, falling back on default config:"
" \'config.yml\'")
with open("config.yml") as f:
config = yaml.load(f)
else:
config = yaml.load(args.config_file)
args.config_file.close()
client = Client(config)
client.run()
logging.shutdown()
|
|
"""
Title: DCGAN to generate face images
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/04/29
Last modified: 2021/01/01
Description: A simple DCGAN trained using `fit()` by overriding `train_step` on CelebA images.
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
import os
import gdown
from zipfile import ZipFile
"""
## Prepare CelebA data
We'll use face images from the CelebA dataset, resized to 64x64.
"""
os.makedirs("celeba_gan")
url = "https://drive.google.com/uc?id=1O7m1010EJjLE5QxLZiM9Fpjs7Oj6e684"
output = "celeba_gan/data.zip"
gdown.download(url, output, quiet=True)
with ZipFile("celeba_gan/data.zip", "r") as zipobj:
zipobj.extractall("celeba_gan")
"""
Create a dataset from our folder, and rescale the images to the [0-1] range:
"""
dataset = keras.preprocessing.image_dataset_from_directory(
"celeba_gan", label_mode=None, image_size=(64, 64), batch_size=32
)
dataset = dataset.map(lambda x: x / 255.0)
"""
Let's display a sample image:
"""
for x in dataset:
plt.axis("off")
plt.imshow((x.numpy() * 255).astype("int32")[0])
break
"""
## Create the discriminator
It maps a 64x64 image to a binary classification score.
"""
discriminator = keras.Sequential(
[
keras.Input(shape=(64, 64, 3)),
layers.Conv2D(64, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Flatten(),
layers.Dropout(0.2),
layers.Dense(1, activation="sigmoid"),
],
name="discriminator",
)
discriminator.summary()
"""
## Create the generator
It mirrors the discriminator, replacing `Conv2D` layers with `Conv2DTranspose` layers.
"""
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
layers.Dense(8 * 8 * 128),
layers.Reshape((8, 8, 128)),
layers.Conv2DTranspose(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(256, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(512, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(3, kernel_size=5, padding="same", activation="sigmoid"),
],
name="generator",
)
generator.summary()
"""
## Override `train_step`
"""
class GAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super(GAN, self).__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
def compile(self, d_optimizer, g_optimizer, loss_fn):
super(GAN, self).compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
self.d_loss_metric = keras.metrics.Mean(name="d_loss")
self.g_loss_metric = keras.metrics.Mean(name="g_loss")
@property
def metrics(self):
return [self.d_loss_metric, self.g_loss_metric]
def train_step(self, real_images):
# Sample random points in the latent space
batch_size = tf.shape(real_images)[0]
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
# Decode them to fake images
generated_images = self.generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(tf.shape(labels))
# Train the discriminator
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = self.discriminator(self.generator(random_latent_vectors))
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
# Update metrics
self.d_loss_metric.update_state(d_loss)
self.g_loss_metric.update_state(g_loss)
return {
"d_loss": self.d_loss_metric.result(),
"g_loss": self.g_loss_metric.result(),
}
"""
## Create a callback that periodically saves generated images
"""
class GANMonitor(keras.callbacks.Callback):
def __init__(self, num_img=3, latent_dim=128):
self.num_img = num_img
self.latent_dim = latent_dim
def on_epoch_end(self, epoch, logs=None):
random_latent_vectors = tf.random.normal(shape=(self.num_img, self.latent_dim))
generated_images = self.model.generator(random_latent_vectors)
generated_images *= 255
generated_images.numpy()
for i in range(self.num_img):
img = keras.preprocessing.image.array_to_img(generated_images[i])
img.save("generated_img_%03d_%d.png" % (epoch, i))
"""
## Train the end-to-end model
"""
epochs = 1 # In practice, use ~100 epochs
gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss_fn=keras.losses.BinaryCrossentropy(),
)
gan.fit(
dataset, epochs=epochs, callbacks=[GANMonitor(num_img=10, latent_dim=latent_dim)]
)
"""
Some of the last generated images around epoch 30
(results keep improving after that):

"""
|
|
"""
Additional strategies from Axelrod's two tournaments.
"""
import random
from axelrod import Player, Actions
from.memoryone import MemoryOnePlayer
C, D = Actions.C, Actions.D
flip_dict = {C: D, D: C}
## First Tournament
class Davis(Player):
"""A player starts by cooperating for 10 rounds then plays Grudger,
defecting if at any point the opponent has defected."""
name = 'Davis'
classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def __init__(self, rounds_to_cooperate=10):
"""
Parameters
----------
rounds_to_cooperate: int, 10
The number of rounds to cooperate initially
"""
Player.__init__(self)
self._rounds_to_cooperate = rounds_to_cooperate
self.init_args = (self._rounds_to_cooperate,)
def strategy(self, opponent):
"""Begins by playing C, then plays D for the remaining rounds if the
opponent ever plays D."""
if len(self.history) < self._rounds_to_cooperate:
return C
if opponent.defections:
return D
return C
class Feld(Player):
"""
Defects when opponent defects. Cooperates with a probability that decreases
to 0.5 at round 200.
"""
name = "Feld"
classifier = {
'memory_depth': 200, # Varies actually, eventually becomes depth 1
'stochastic': True,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def __init__(self, start_coop_prob=1.0, end_coop_prob=0.5,
rounds_of_decay=200):
"""
Parameters
----------
start_coop_prob, float
The initial probability to cooperate
end_coop_prob, float
The final probability to cooperate
rounds_of_decay, int
The number of rounds to linearly decrease from start_coop_prob
to end_coop_prob
"""
Player.__init__(self)
self._start_coop_prob = start_coop_prob
self._end_coop_prob = end_coop_prob
self._rounds_of_decay = rounds_of_decay
self.init_args = (start_coop_prob,
end_coop_prob,
rounds_of_decay)
def _cooperation_probability(self):
"""It's not clear what the interpolating function is, so we'll do
something simple that decreases monotonically from 1.0 to 0.5 over
200 rounds."""
diff = (self._end_coop_prob - self._start_coop_prob)
slope = diff / float(self._rounds_of_decay)
rounds = len(self.history)
return max(self._start_coop_prob + slope * rounds,
self._end_coop_prob)
def strategy(self, opponent):
if not self.history:
return C
if opponent.history[-1] == D:
return D
p = self._cooperation_probability()
r = random.random()
if r < p:
return C
return D
class Grofman(MemoryOnePlayer):
"""
Cooperates with probability 2/7.
"""
name = "Grofman"
def __init__(self):
p = float(2) / 7
four_vector = (p, p, p, p)
super(self.__class__, self).__init__(four_vector)
self.init_args = ()
class Joss(MemoryOnePlayer):
"""
Cooperates with probability 0.9 when the opponent cooperates, otherwise
emulates Tit-For-Tat.
"""
name = "Joss"
def __init__(self, p=0.9):
"""
Parameters
----------
p, float
The probability of cooperating when the previous round was (C, C)
or (D, C), i.e. the opponent cooperated.
"""
four_vector = (p, 0, p, 0)
self.p = p
super(self.__class__, self).__init__(four_vector)
self.init_args = (p,)
def __repr__(self):
return "%s: %s" % (self.name, round(self.p, 2))
class Shubik(Player):
"""
Plays like Tit-For-Tat with the following modification. After
each retaliation, the number of rounds that Shubik retaliates
increases by 1.
"""
name = 'Shubik'
classifier = {
'memory_depth': float('inf'),
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def __init__(self):
Player.__init__(self)
self.is_retaliating = False
self.retaliation_length = 0
self.retaliation_remaining = 0
def _decrease_retaliation_counter(self):
"""Lower the remaining owed retaliation count and flip to non-retaliate
if the count drops to zero."""
if self.is_retaliating:
self.retaliation_remaining -= 1
if self.retaliation_remaining == 0:
self.is_retaliating = False
def strategy(self, opponent):
if not opponent.history:
return C
if opponent.history[-1] == D:
# Retaliate against defections
if self.history[-1] == C: # it's on now!
# Lengthen the retaliation period
self.is_retaliating = True
self.retaliation_length += 1
self.retaliation_remaining = self.retaliation_length
self._decrease_retaliation_counter()
return D
else:
# Just retaliate
if self.is_retaliating:
self._decrease_retaliation_counter()
return D
if self.is_retaliating:
# Are we retaliating still?
self._decrease_retaliation_counter()
return D
return C
def reset(self):
Player.reset(self)
self.is_retaliating = False
self.retaliation_length = 0
self.retaliation_remaining = 0
class Tullock(Player):
"""
Cooperates for the first 11 rounds then randomly cooperates 10% less often
than the opponent has in previous rounds."""
name = "Tullock"
classifier = {
'memory_depth': 11, # long memory, modified by init
'stochastic': True,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def __init__(self, rounds_to_cooperate=11):
"""
Parameters
----------
rounds_to_cooperate: int, 10
The number of rounds to cooperate initially
"""
Player.__init__(self)
self._rounds_to_cooperate = rounds_to_cooperate
self.__class__.memory_depth = rounds_to_cooperate
self.init_args = (rounds_to_cooperate,)
def strategy(self, opponent):
rounds = self._rounds_to_cooperate
if len(self.history) < rounds:
return C
cooperate_count = opponent.history[-rounds:].count(C)
prop_cooperate = cooperate_count / float(rounds)
prob_cooperate = max(0, prop_cooperate - 0.10)
r = random.random()
if r < prob_cooperate:
return C
return D
## Second Tournament
class Champion(Player):
"""
Strategy submitted to Axelrod's second tournament by Danny Champion.
"""
name = "Champion"
classifier = {
'memory_depth': float('inf'),
'stochastic': True,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def strategy(self, opponent):
current_round = len(self.history)
expected_length = self.tournament_attributes['length']
# Cooperate for the first 1/20-th of the game
if current_round == 0:
return C
if current_round < expected_length / 20.:
return C
# Mirror partner for the next phase
if current_round < expected_length * 5 / 40.:
return opponent.history[-1]
# Now cooperate unless all of the necessary conditions are true
defection_prop = float(opponent.defections) / len(opponent.history)
if opponent.history[-1] == D:
r = random.random()
if defection_prop > max(0.4, r):
return D
return C
class Eatherley(Player):
"""
Strategy submitted to Axelrod's second tournament by Graham Eatherley.
"""
name = "Eatherley"
classifier = {
'memory_depth': float('inf'),
'stochastic': True,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def strategy(self, opponent):
# Cooperate on the first move
if not len(self.history):
return C
# Reciprocate cooperation
if opponent.history[-1] == C:
return C
# Respond to defections with probability equal to opponent's total
# proportion of defections
defection_prop = float(opponent.defections) / len(opponent.history)
r = random.random()
if r < defection_prop:
return D
return C
class Tester(Player):
"""
Submitted to Axelrod's second tournament by David Gladstein.
Defects on the first move and plays TFT if the opponent ever defects (after
one apology cooperation round). Otherwise alternate cooperation and defection.
"""
name = "Tester"
classifier = {
'memory_depth': float('inf'),
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def __init__(self):
Player.__init__(self)
self.is_TFT = False
def strategy(self, opponent):
# Defect on the first move
if not opponent.history:
return D
# Am I TFT?
if self.is_TFT:
return D if opponent.history[-1:] == [D] else C
else:
# Did opponent defect?
if opponent.history[-1] == D:
self.is_TFT = True
return C
if len(self.history) in [1, 2]:
return C
# Alternate C and D
return flip_dict[self.history[-1]]
def reset(self):
Player.reset(self)
self.is_TFT = False
|
|
#!python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""The API catalogs-zone of the codeintel database.
See the database/database.py module docstring for an overview.
"""
import sys
import os
from os.path import (join, dirname, exists, expanduser, splitext, basename,
split, abspath, isabs, isdir, isfile, normpath,
normcase)
import pickle as pickle
import threading
import time
from hashlib import md5
import bisect
import fnmatch
from glob import glob
from pprint import pprint, pformat
import logging
from io import StringIO
import codecs
import copy
import weakref
import queue
import ciElementTree as ET
from codeintel2.common import *
from codeintel2.buffer import Buffer
from codeintel2.util import dedent, safe_lang_from_lang, banner, hotshotit
from codeintel2.tree import tree_from_cix_path
from codeintel2.database.util import filter_blobnames_for_prefix
from codeintel2.database.resource import AreaResource
#---- globals
log = logging.getLogger("codeintel.db")
# log.setLevel(logging.DEBUG)
#---- Database zone and lib implementations
class CatalogsZone(object):
"""Singleton zone managing the db/catalogs/... area.
TODO: Locking: .cull_mem() and .save() will be called periodically
on indexer thread. Anything they access needs to be guarded.
"""
_res_index = None
_blob_index = None
_toplevelname_index = None
_toplevelprefix_index = None
_have_updated_at_least_once = False
def __init__(self, mgr, catalog_dirs=None):
self.mgr = mgr
self.db = mgr.db
if catalog_dirs is None:
catalog_dirs = []
assert isinstance(catalog_dirs, list)
self.catalog_dirs = catalog_dirs
self.base_dir = join(self.db.base_dir, "db", "catalogs")
self._lib_cache = {} # (lang, selection_res_ids) -> CatalogLib
self._lock = threading.RLock()
self._blob_and_atime_from_blobname_from_lang_cache = {}
self._dbsubpaths_and_lpaths_to_save = []
def __repr__(self):
return "<catalog zone>"
def _selection_from_selector(self, selections):
"""Given a sequence of catalog selection strings (each is a
catalog name or full path to a catalog .cix file) return a dict
mapping:
<normalized-selector> -> <selection-string>
If "selections" is None, this returns None.
"""
if selections is None:
return None
selection_from_selector = {}
for selection in selections:
if isabs(selection):
selector = normpath(normcase(selection))
else:
selector = selection.lower()
selection_from_selector[selector] = selection
return selection_from_selector
_res_ids_from_selector_cache = None
def _res_ids_from_selections(self, selections):
"""Returns a tuple of the database resource ids for the given
selections and a list of selections that didn't match any loaded
resources.
"""
if self._res_ids_from_selector_cache is None:
cache = self._res_ids_from_selector_cache = {}
for cix_area_path, res_data in list(self.res_index.items()):
cix_path = AreaResource(cix_area_path).path
res_id = res_data[0]
cache[normpath(normcase(cix_path))] = [res_id]
name = splitext(basename(cix_path))[0].lower()
if name not in cache:
cache[name] = []
cache[name].append(res_id)
log.debug("_res_ids_from_selector_cache: %r", cache)
res_ids = []
missing_selections = []
for selector, selection \
in list(self._selection_from_selector(selections).items()):
try:
res_ids += self._res_ids_from_selector_cache[selector]
except KeyError as ex:
missing_selections.append(selection)
log.debug("_res_ids_from_selections: res_ids=%r", res_ids)
return tuple(res_ids), missing_selections
@LazyClassAttribute
def _std_catalog_dir(cls):
return join(dirname(dirname(abspath(__file__))), "catalogs")
_catalog_dirs = None
@property
def catalog_dirs(self):
return self._catalog_dirs
@catalog_dirs.setter
def catalog_dirs(self, value):
assert not isinstance(value, str), \
"catalog_dirs must be an iterable, not a string"
catalog_dirs = list(value)
if self._std_catalog_dir not in catalog_dirs:
catalog_dirs.append(self._std_catalog_dir)
self._catalog_dirs = catalog_dirs
# cause a rescan next time we try to get a catalog lib
self._have_updated_at_least_once = False
def get_lib(self, lang, selections=None):
"""Return a CatalogLib for the given lang and selections."""
assert not isinstance(selections, str),\
"catalog lib 'selections' must be None or a sequence, not %r: %r"\
% (type(selections), selections)
if not self._have_updated_at_least_once:
self.update(selections)
if selections is not None:
selection_res_ids, missing_selections \
= self._res_ids_from_selections(selections)
if missing_selections:
self.update(missing_selections)
selection_res_ids, missing_selections \
= self._res_ids_from_selections(selections)
if missing_selections:
log.warn("the following catalog selections didn't match "
"any loaded API catalog: '%s'",
"', '".join(missing_selections))
else:
selection_res_ids = None
key = (lang, selection_res_ids)
if key not in self._lib_cache:
self._lib_cache[key] = CatalogLib(self, lang,
selections, selection_res_ids)
return self._lib_cache[key]
@property
def res_index(self):
"""Load and return the resource index (res_index)."""
if self._res_index is None:
idxpath = join(self.base_dir, "res_index")
self._res_index = self.db.load_pickle(idxpath, {})
return self._res_index
@property
def blob_index(self):
"""Load and return the blob index (blob_index)."""
if self._blob_index is None:
idxpath = join(self.base_dir, "blob_index")
self._blob_index = self.db.load_pickle(idxpath, {})
return self._blob_index
@property
def toplevelname_index(self):
"""Load and return the top-level name index (toplevelname_index)."""
if self._toplevelname_index is None:
idxpath = join(self.base_dir, "toplevelname_index")
self._toplevelname_index = self.db.load_pickle(idxpath, {})
return self._toplevelname_index
@property
def toplevelprefix_index(self):
"""Load and return the top-level prefix index (toplevelprefix_index)."""
if self._toplevelprefix_index is None:
idxpath = join(self.base_dir, "toplevelprefix_index")
self._toplevelprefix_index = self.db.load_pickle(idxpath, {})
return self._toplevelprefix_index
def save(self):
self._lock.acquire()
try:
for dbsubpath, lpaths in self._dbsubpaths_and_lpaths_to_save:
self.db.save_pickle(join(self.base_dir, dbsubpath), lpaths)
self._dbsubpaths_and_lpaths_to_save = []
finally:
self._lock.release()
def cull_mem(self):
"""Drop blobs from cache that have not been accessed in over 5
minutes.
To attempt to keep memory consumption under control we want to
ensure we don't keep everything cached from the db in memory
until process completion.
"""
# TOTEST: Does Python/Komodo actually release this memory or
# are we kidding ourselves?
self._lock.acquire()
try:
N = 10
if len(self._blob_and_atime_from_blobname_from_lang_cache) < N:
# Too few blobs in memory to bother culling.
return
log.info("catalog: culling memory")
now = time.time()
for lang, blob_and_atime_from_blobname \
in list(self._blob_and_atime_from_blobname_from_lang_cache.items()):
for blobname, (blob, atime) in list(blob_and_atime_from_blobname.items()):
if now - atime > 300.0: # >5 minutes since last access
del blob_and_atime_from_blobname[blobname]
finally:
self._lock.release()
def reportMemory(self):
"""
Report on memory usage from this CatalogsZone.
@returns {dict} memory usage; keys are the paths, values are a dict of
"amount" -> number
"units" -> "bytes" | "count"
"desc" -> str description
"""
log.debug("CatalogsZone: reporting memory")
import memutils
total_mem_usage = 0
result = {}
for lang, blob_and_atime_from_blobname in list(self._blob_and_atime_from_blobname_from_lang_cache.items()):
for blobname, [blob, atime] in list(blob_and_atime_from_blobname.items()):
result["explicit/python/codeintel/%s/catalog/%s" % (lang, blobname)] = {
"amount": memutils.memusage(blob),
"units": "bytes",
"desc": "The number of bytes of %s codeintel %s catalog blobs." % (lang, blobname),
}
return result
def avail_catalogs(self, selections=None):
"""Generate a list of available catalogs.
"selections" (optional) is a list of string of the same form
as to `.get_lib()'. It is used to determine the boolean
value of <selected> in the yielded tuples.
Generated dicts as follows:
{"name": <catalog-name>, # 'name' attr of <codeintel> tag
# or file basename
"lang": <lang>, # 'lang' attribute of first <file> tag
"description": <desc>, # 'description' attr of <codeintel>
"cix_path": <cix-path>,
"selected": <selected>,
"selection": <selection>,
}
where <selected> is boolean indicating if this catalog is
selected according to "selections" and <selection> is the string
in "selections" that resulted in this.
"""
selection_from_selector = self._selection_from_selector(selections)
for cix_path in (cix_path for d in self.catalog_dirs if exists(d)
for cix_path in glob(join(d, "*.cix"))):
name = lang = description = None
try:
for event, elem in ET.iterparse(cix_path, events=("start",)):
if elem.tag == "codeintel":
name = elem.get("name")
description = elem.get("description")
elif elem.tag == "file":
lang = elem.get("lang")
break
except ET.XMLParserError as ex:
log.warn("%s: error reading catalog, skipping it (%s)",
cix_path, ex)
continue
if lang is None:
log.warn("%s: no 'lang' attribute on catalog <file> tag, "
"skipping it", cix_path)
continue
if name is None:
name = splitext(basename(cix_path))[0]
norm_name = name.lower()
norm_cix_path = normpath(normcase(cix_path))
if selection_from_selector is None:
selected = True
selection = None
else:
selection = (selection_from_selector.get(norm_name)
or selection_from_selector.get(norm_cix_path))
selected = selection is not None
yield {"name": name,
"lang": lang,
"description": description,
"cix_path": cix_path,
"selected": selected,
"selection": selection}
def update(self, selections=None, progress_cb=None):
"""Update the catalog as necessary.
"selections" (optional) is a list of string of the same form
as to `.get_lib()' -- used here to filter the catalogs
that we consider for updating.
"progress_cb" (optional) is a callable that is called as
follows to show the progress of the update:
progress_cb(<desc>, <value>)
where <desc> is a short string describing the current step
and <value> is an integer between 0 and 100 indicating the
level of completeness.
"""
self._lock.acquire()
try:
self._have_updated_at_least_once = True
# Figure out what updates need to be done...
if progress_cb:
try:
progress_cb("Determining necessary catalog updates...", 5)
except:
log.exception("error in progress_cb (ignoring)")
res_name_from_res_path = dict( # this is our checklist
(p, v[2]) for p, v in list(self.res_index.items()))
todos = []
log.info("updating %s: %d catalog dir(s)", self,
len(self.catalog_dirs))
for catalog_info in self.avail_catalogs(selections):
cix_path = catalog_info["cix_path"]
res = AreaResource(cix_path)
# check that the update-time is the mtime (i.e. up-to-date)
try:
res_id, last_updated, name, res_data \
= self.res_index[res.area_path]
except KeyError:
# add this new CIX file
todos.append(("add", res, catalog_info["name"]))
else:
mtime = os.stat(cix_path).st_mtime
if last_updated != mtime: # epsilon? '>=' instead of '!='?
# update with newer version
todos.append(("update", res, catalog_info["name"]))
# else:
# log.debug("not updating '%s' catalog: mtime is unchanged",
# catalog_info["name"])
del res_name_from_res_path[res.area_path] # tick it off
for res_area_path, res_name in list(res_name_from_res_path.items()):
# remove this obsolete CIX file
try:
todos.append(("remove", AreaResource(
res_area_path), res_name))
except ValueError as ex:
# Skip resources in unknown areas. This is primarily to
# allow debugging/testing (when the set of registered
# path_areas may not include the set when running in
# Komodo.)
pass
# Filter todos on selections, if any.
if selections is not None:
selection_from_selector = self._selection_from_selector(
selections)
before = todos[:]
todos = [todo for todo in todos
if todo[2].lower() in selection_from_selector
or normpath(normcase(todo[1].path)) in selection_from_selector
]
# ... and then do them.
if not todos:
return
for i, (action, res, name) in enumerate(todos):
log.debug("%s `%s' catalog (%s)", action, name, res)
try:
if action == "add":
desc = "Adding '%s' API catalog" % basename(
res.subpath)
if progress_cb:
try:
progress_cb(desc, (5 + 95/len(todos)*i))
except:
log.exception(
"error in progress_cb (ignoring)")
else:
self.db.report_event(desc)
self._add_res(res)
elif action == "remove":
desc = "Removing '%s' API catalog" % basename(
res.subpath)
if progress_cb:
try:
progress_cb(desc, (5 + 95/len(todos)*i))
except:
log.exception(
"error in progress_cb (ignoring)")
else:
self.db.report_event(desc)
self._remove_res(res)
elif action == "update":
desc = "Updating '%s' API catalog" % basename(
res.subpath)
if progress_cb:
try:
progress_cb(desc, (5 + 95/len(todos)*i))
except:
log.exception(
"error in progress_cb (ignoring)")
else:
self.db.report_event(desc)
# XXX Bad for filesystem. Change this to do it
# more intelligently if possible.
self._remove_res(res)
self._add_res(res)
except DatabaseError as ex:
log.warn("%s (skipping)" % ex)
if progress_cb:
try:
progress_cb("Saving catalog indices...", 95)
except:
log.exception("error in progress_cb (ignoring)")
self._res_ids_from_selector_cache = None # invalidate this cache
if self._res_index is not None:
self.db.save_pickle(
join(self.base_dir, "res_index"),
self._res_index)
if self._blob_index is not None:
self.db.save_pickle(
join(self.base_dir, "blob_index"),
self._blob_index)
if self._toplevelname_index is not None:
self.db.save_pickle(
join(self.base_dir, "toplevelname_index"),
self._toplevelname_index)
if self._toplevelprefix_index is not None:
self.db.save_pickle(
join(self.base_dir, "toplevelprefix_index"),
self._toplevelprefix_index)
finally:
self._lock.release()
_existing_res_ids_cache = None
_new_res_id_counter = 0
def _new_res_id(self):
if self._existing_res_ids_cache is None:
self._existing_res_ids_cache \
= dict((d[0], True) for d in list(self.res_index.values()))
while True:
if self._new_res_id_counter not in self._existing_res_ids_cache:
new_res_id = self._new_res_id_counter
self._new_res_id_counter += 1
self._existing_res_ids_cache[new_res_id] = True
return new_res_id
self._new_res_id_counter += 1
def _remove_res(self, res):
LEN_PREFIX = self.db.LEN_PREFIX
res_id, last_updated, name, res_data = self.res_index[res.area_path]
# res_data: {lang -> blobname -> ilk -> toplevelnames}
for lang, tfifb in list(res_data.items()):
dbfile_and_res_id_from_blobname = self.blob_index[lang]
for blobname, toplevelnames_from_ilk in list(tfifb.items()):
# Update 'blob_index' for $lang.
dbfile, res_id = dbfile_and_res_id_from_blobname[blobname]
del dbfile_and_res_id_from_blobname[blobname]
# Remove ".blob" file (and associated caches).
pattern = join(self.base_dir, safe_lang_from_lang(lang),
dbfile+".*")
try:
for path in glob(pattern):
log.debug("fs-write: remove catalog %s blob file '%s'",
lang, basename(path))
os.remove(path)
except EnvironmentError as ex:
# XXX If get lots of these, then try harder. Perhaps
# creating a zombies area, or creating a list of
# them: self.db.add_zombie(dbpath).
# XXX THis isn't a correct analysis: the dbfile may just
# not have been there.
log.warn("could not remove dbfile '%s' (%s '%s'): "
"leaving zombie", dbpath, lang, blobname)
# Update 'toplevel*_index' for $lang.
# toplevelname_index: {lang -> ilk -> toplevelname -> res_id -> blobnames}
# toplevelprefix_index: {lang -> ilk -> prefix -> res_id ->
# toplevelnames}
for ilk, toplevelnames in toplevelnames_from_ilk.items():
try:
bfrft = self.toplevelname_index[lang][ilk]
for toplevelname in toplevelnames:
del bfrft[toplevelname][res_id]
if not bfrft[toplevelname]:
del bfrft[toplevelname]
except KeyError as ex:
self.db.corruption("CatalogsZone._remove_res",
"error removing top-level names of ilk '%s' for "
"'%s' resource from toplevelname_index: %s"
% (ilk, basename(res.path), ex),
"ignore")
try:
tfrfp = self.toplevelprefix_index[lang][ilk]
for toplevelname in toplevelnames:
prefix = toplevelname[:LEN_PREFIX]
del tfrfp[prefix][res_id]
if not tfrfp[prefix]:
del tfrfp[prefix]
except KeyError as ex:
self.db.corruption("CatalogsZone._remove_res",
"error removing top-level name of ilk '%s' for "
"'%s' resource from toplevelprefix_index: %s"
% (ilk, basename(res.path), ex),
"ignore")
del self.res_index[res.area_path]
def _add_res(self, res):
cix_path = res.path
try:
tree = tree_from_cix_path(cix_path)
except ET.XMLParserError as ex:
log.warn("could not load `%s' into catalog (skipping): %s",
cix_path, ex)
return
LEN_PREFIX = self.db.LEN_PREFIX
res_id = self._new_res_id()
res_data = {} # {lang -> blobname -> ilk -> toplevelnames}
name = tree.get("name") or splitext(basename(cix_path))[0]
for blob in tree.findall("file/scope"):
lang, blobname = blob.get("lang"), blob.get("name")
if not lang:
raise DatabaseError("add `%s': no 'lang' attr on %r"
% (res, blob))
# Create 'res_data'.
tfifb = res_data.setdefault(lang, {})
toplevelnames_from_ilk = tfifb.setdefault(blobname, {})
if lang in self.db.import_everything_langs:
for toplevelname, elem in blob.names.items():
ilk = elem.get("ilk") or elem.tag
if ilk not in toplevelnames_from_ilk:
toplevelnames_from_ilk[ilk] = set([toplevelname])
else:
toplevelnames_from_ilk[ilk].add(toplevelname)
# Update 'toplevel*_index'.
# toplevelname_index: {lang -> ilk -> toplevelname -> res_id -> blobnames}
# toplevelprefix_index: {lang -> ilk -> prefix -> res_id ->
# toplevelnames}
bfrftfi = self.toplevelname_index.setdefault(lang, {})
tfrfpfi = self.toplevelprefix_index.setdefault(lang, {})
for ilk, toplevelnames in toplevelnames_from_ilk.items():
bfrft = bfrftfi.setdefault(ilk, {})
tfrfp = tfrfpfi.setdefault(ilk, {})
for toplevelname in toplevelnames:
bfr = bfrft.setdefault(toplevelname, {})
if res_id not in bfr:
bfr[res_id] = set([blobname])
else:
bfr[res_id].add(blobname)
prefix = toplevelname[:LEN_PREFIX]
tfr = tfrfp.setdefault(prefix, {})
if res_id not in tfr:
tfr[res_id] = set([toplevelname])
else:
tfr[res_id].add(toplevelname)
# Update 'blob_index'.
dbfile_and_res_id_from_blobname \
= self.blob_index.setdefault(lang, {})
assert blobname not in dbfile_and_res_id_from_blobname, \
("codeintel: %s %r blob in `%s' collides "
"with existing %s %r blob (from res_id %r) in catalog: "
"(XXX haven't decided how to deal with that yet)"
% (lang, blobname, cix_path, lang, blobname,
dbfile_and_res_id_from_blobname[blobname][1]))
dbfile = self.db.bhash_from_blob_info(cix_path, lang, blobname)
dbfile_and_res_id_from_blobname[blobname] = (dbfile, res_id)
# Write out '.blob' file.
dbdir = join(self.base_dir, safe_lang_from_lang(lang))
if not exists(dbdir):
log.debug("fs-write: mkdir '%s'", dbdir)
os.makedirs(dbdir)
log.debug("fs-write: catalog %s blob '%s'", lang, dbfile)
ET.ElementTree(blob).write(join(dbdir, dbfile+".blob"))
# Update 'res_index'.
last_updated = os.stat(cix_path).st_mtime
self.res_index[res.area_path] \
= (res_id, last_updated, name, res_data)
def res_id_from_lang_and_blobname(self, lang, blobname):
try:
dbfile, res_id = self.blob_index[lang][blobname]
except KeyError:
return None
else:
return res_id
def get_blob(self, lang, blobname, look_in_cache_only=False):
try:
dbfile, res_id = self.blob_index[lang][blobname]
except KeyError:
return None
# If index path is in the cache: return it, update its atime.
now = time.time()
blob_and_atime_from_blobname \
= self._blob_and_atime_from_blobname_from_lang_cache.setdefault(lang, {})
if blobname in blob_and_atime_from_blobname:
log.debug("cache-read: load %s blob `%s'", lang, blobname)
blob, atime = blob_and_atime_from_blobname[blobname]
blob_and_atime_from_blobname[blobname] = (blob, now)
return blob
# Need to load and cache it.
if look_in_cache_only:
return None
dbsubpath = join(self.base_dir, safe_lang_from_lang(lang), dbfile)
blob = self.db.load_blob(dbsubpath)
blob_and_atime_from_blobname[blobname] = (blob, now)
return blob
def lpaths_from_lang_and_blobname(self, lang, blobname):
"""Get lpaths for the named blob.
We get it from the blob's "lpaths" cache key (calculating that
if necessary).
"""
blob = self.get_blob(lang, blobname, look_in_cache_only=True)
if blob is not None:
if "lpaths" in blob.cache:
return blob.cache["lpaths"]
else:
blob = self.get_blob(lang, blobname)
if blob is None:
raise NotFoundInDatabase("%s '%s' blob not found in catalogs"
% (lang, blobname))
if "lpaths" in blob.cache:
return blob.cache["lpaths"]
# Need to calculate lpaths from 'blob'.
log.debug("calc symbol info for %s '%s' catalog blob", lang, blobname)
langintel = self.mgr.langintel_from_lang(lang)
lpaths = langintel.lpaths_from_blob(blob)
# Update cache and queue this up to be saved to disk (by .save()).
blob.cache["lpaths"] = lpaths
dbfile, res_id = self.blob_index[lang][blobname]
self._lock.acquire()
try:
self._dbsubpaths_and_lpaths_to_save.append(
(join(safe_lang_from_lang(lang), dbfile+".lpaths"), lpaths)
)
finally:
self._lock.release()
return lpaths
class CatalogLib(object):
"""A light lang-specific and selection-filtered view on the whole
CatalogsZone.
"""
name = "cataloglib"
def __init__(self, catalogs_zone, lang,
selections=None, selection_res_ids=None):
self.catalogs_zone = catalogs_zone
self.lang = lang
self.selections = selections
if selection_res_ids is None:
self.selection_res_id_set = None
else:
self.selection_res_id_set = set(selection_res_ids)
self._import_handler = None
self._blob_imports_from_prefix_cache = {}
_repr_cache = None
def __repr__(self):
if self._repr_cache is None:
# Include the base names of the selected resources in the name.
if self.selection_res_id_set is None:
selection_names = ['(all)']
else:
selection_names = []
for s in self.selections:
if isabs(s):
selection_names.append(splitext(basename(s))[0])
else:
selection_names.append(s)
self._repr_cache = "<%s cataloglib: %s>"\
% (self.lang, ', '.join(selection_names))
return self._repr_cache
@property
def import_handler(self):
if self._import_handler is None:
self._import_handler \
= self.catalogs_zone.mgr.citadel.import_handler_from_lang(self.lang)
return self._import_handler
def has_blob(self, blobname):
res_id = self.catalogs_zone.res_id_from_lang_and_blobname(self.lang,
blobname)
if res_id is None:
return False
if self.selection_res_id_set is None:
return True
return res_id in self.selection_res_id_set
def get_blob(self, blobname):
if not self.has_blob(blobname): # knows how to filter on selections
return None
return self.catalogs_zone.get_blob(self.lang, blobname)
def get_blob_imports(self, prefix):
"""Return the set of imports under the given prefix.
"prefix" is a tuple of import name parts. E.g. ("xml", "sax")
for "import xml.sax." in Python. Or ("XML", "Parser") for
"use XML::Parser::" in Perl.
See description in database.py docstring for details.
"""
# This code works fine if prefix is the empty tuple.
if prefix not in self._blob_imports_from_prefix_cache:
try:
dbfile_and_res_id_from_blobname \
= self.catalogs_zone.blob_index[self.lang]
except KeyError:
return set()
if self.selection_res_id_set is None:
matches = filter_blobnames_for_prefix(
dbfile_and_res_id_from_blobname,
prefix,
self.import_handler.sep)
else:
matches = filter_blobnames_for_prefix(
(bn
for bn, (f, res_id) in list(dbfile_and_res_id_from_blobname.items())
if res_id in self.selection_res_id_set),
prefix,
self.import_handler.sep)
self._blob_imports_from_prefix_cache[prefix] = matches
return self._blob_imports_from_prefix_cache[prefix]
def _blobnames_from_toplevelname(self, toplevelname, ilk=None):
"""Yield all blobnames in the currently selected catalogs
with the given toplevelname.
If "ilk" is given then only symbols of that ilk will be considered.
"""
# toplevelname_index: {lang -> ilk -> toplevelname -> res_id ->
# blobnames}
if self.lang in self.catalogs_zone.toplevelname_index:
for i, potential_bfrft \
in self.catalogs_zone.toplevelname_index[self.lang].items():
if ilk is not None and i != ilk:
continue
if toplevelname not in potential_bfrft:
continue
potential_bfr = potential_bfrft[toplevelname]
if self.selection_res_id_set is None:
for blobnames in potential_bfr.values():
for blobname in blobnames:
yield blobname
else:
for res_id, blobnames in potential_bfr.items():
if res_id not in self.selection_res_id_set:
continue
for blobname in blobnames:
yield blobname
def hits_from_lpath(self, lpath, ctlr=None, curr_buf=None):
assert isinstance(lpath, tuple) # common mistake to pass in a string
hits = []
for blobname in self._blobnames_from_toplevelname(lpath[0]):
lpaths = self.catalogs_zone.lpaths_from_lang_and_blobname(
self.lang, blobname)
if lpath not in lpaths:
continue
blob = self.catalogs_zone.get_blob(self.lang, blobname)
# TODO: Convert lpath's in tree-evalrs to tuples instead of lists.
elem = _elem_from_scoperef((blob, list(lpath)))
hits.append((elem, (blob, list(lpath[:-1]))))
return hits
def toplevel_cplns(self, prefix=None, ilk=None, ctlr=None):
"""Return completion info for all top-level names matching the
given prefix and ilk in all selected blobs in this lib.
"prefix" is a 3-character prefix with which to filter top-level
names. If None (or not specified), results are not filtered
based on the prefix.
"ilk" is a symbol type (e.g. "class", "variable", "function")
with which to filter results. If None (or not specified),
results of any ilk are returned.
"ctlr" (optional) is an EvalController instance. If
specified it should be used in the normal way (logging,
checking .is_aborted()).
Returns a list of 2-tuples: (<ilk>, <name>).
Note: the list is not sorted, because often some special sorting
is required for the different completion evaluators that might use
this API.
"""
cplns = []
if prefix is None:
# Use 'toplevelname_index':
# {lang -> ilk -> toplevelname -> res_id -> blobnames}
toplevelname_index = self.catalogs_zone.toplevelname_index
if self.lang in toplevelname_index:
if ilk is not None:
try:
bfrft = toplevelname_index[self.lang][ilk]
except KeyError:
pass
else:
if self.selection_res_id_set is None:
cplns += [(ilk, t) for t in bfrft]
else:
cplns += [(ilk, t) for t, bfr in bfrft.items()
if self.selection_res_id_set.intersection(bfr)]
elif self.selection_res_id_set is None:
for i, bfrft in toplevelname_index[self.lang].items():
cplns += [(i, t) for t in bfrft]
else: # ilk=None, have a selection set
for i, bfrft in toplevelname_index[self.lang].items():
cplns += [(i, t) for t, bfr in bfrft.items()
if self.selection_res_id_set.intersection(bfr)]
else:
# Use 'toplevelprefix_index':
# {lang -> ilk -> prefix -> res_id -> toplevelnames}
toplevelprefix_index = self.catalogs_zone.toplevelprefix_index
if self.lang in toplevelprefix_index:
if ilk is not None:
try:
tfr = toplevelprefix_index[self.lang][ilk][prefix]
except KeyError:
pass
else:
if self.selection_res_id_set is None:
cplns += [(ilk, t)
for toplevelnames in tfr.values()
for t in toplevelnames]
else:
cplns += [(ilk, t)
for r in self.selection_res_id_set.intersection(tfr)
for t in tfr[r]]
elif self.selection_res_id_set is None:
for i, tfrfp in toplevelprefix_index[self.lang].items():
if prefix not in tfrfp:
continue
cplns += [(i, t)
for toplevelnames in tfrfp[prefix].values()
for t in toplevelnames]
else: # ilk=None, have a selection set
for i, tfrfp in toplevelprefix_index[self.lang].items():
if prefix not in tfrfp:
continue
tfr = tfrfp[prefix]
cplns += [(i, t)
for r in self.selection_res_id_set.intersection(tfr)
for t in tfr[r]]
return cplns
#---- internal support routines
def _elem_from_scoperef(scoperef):
"""A scoperef is (<blob>, <lpath>). Return the actual elem in
the <blob> ciElementTree being referred to. Returns None if not found.
"""
elem = scoperef[0]
for lname in scoperef[1]:
try:
elem = elem.names[lname]
except KeyError:
return None
return elem
|
|
import os
import shutil
import zipfile
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.generic.list_detail import object_list
from guardian.shortcuts import assign
from builds.forms import AliasForm
from builds.filters import VersionFilter
from builds.models import Version
from projects.forms import (ImportProjectForm, build_versions_form,
build_upload_html_form, SubprojectForm,
UserForm)
from projects.models import Project
from projects.tasks import unzip_files
from projects import constants
@login_required
def project_dashboard(request):
"""
A dashboard! If you aint know what that means you aint need to.
Essentially we show you an overview of your content.
"""
qs = Version.objects.active(user=request.user).filter(project__users__in=[request.user])
filter = VersionFilter(constants.IMPORTANT_VERSION_FILTERS, queryset=qs)
return object_list(
request,
queryset=request.user.projects.live(),
page=int(request.GET.get('page', 1)),
template_object_name='project',
template_name='projects/project_dashboard.html',
extra_context={
'filter': filter,
}
)
@login_required
def project_manage(request, project_slug):
"""
The management view for a project, where you will have links to edit
the projects' configuration, edit the files associated with that
project, etc.
Now redirects to the normal /projects/<slug> view.
"""
return HttpResponseRedirect(reverse('projects_detail', args=[project_slug]))
@login_required
def project_edit(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
form_class = ImportProjectForm
form = form_class(instance=project, data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_edit.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_versions(request, project_slug):
"""
Shows the available versions and lets the user choose which ones he would
like to have built.
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
if not project.is_imported:
raise Http404
form_class = build_versions_form(project)
form = form_class(data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_versions.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_delete(request, project_slug):
"""
Make a project as deleted on POST, otherwise show a form asking for
confirmation of delete.
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
if request.method == 'POST':
# Remove the repository checkout
shutil.rmtree(project.doc_path, ignore_errors=True)
# Delete the project and everything related to it
project.delete()
project_dashboard = reverse('projects_dashboard')
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_delete.html',
{'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_import(request):
"""
Import docs from an repo
"""
form = ImportProjectForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
project = form.save()
form.instance.users.add(request.user)
assign('view_project', request.user, project)
project_manage = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_manage + '?docs_not_built=True')
return render_to_response(
'projects/project_import.html',
{'form': form},
context_instance=RequestContext(request)
)
@login_required
def export(request, project_slug):
"""
Export a projects' docs as a .zip file, including the .rst source
"""
project = Project.objects.live().get(users=request.user, slug=project_slug)
os.chdir(project.doc_path)
dir_path = os.path.join(settings.MEDIA_ROOT, 'export', project_slug)
zip_filename = '%s.zip' % project.slug
file_path = os.path.join(dir_path, zip_filename)
try:
os.makedirs(dir_path)
except OSError:
#Directory already exists
pass
# Create a <slug>.zip file containing all files in file_path
archive = zipfile.ZipFile(zip_filename, 'w')
for root, subfolders, files in os.walk(file_path):
for file in files:
archive.write(os.path.join(root, file))
archive.close()
return HttpResponseRedirect(os.path.join(settings.MEDIA_URL, 'export', project_slug, zip_filename))
def upload_html(request, project_slug):
proj = get_object_or_404(Project.objects.all(), slug=project_slug)
FormClass = build_upload_html_form(proj)
if request.method == 'POST':
form = FormClass(request.POST, request.FILES, request=request)
if form.is_valid():
file = request.FILES['content']
version_slug = form.cleaned_data['version']
version = proj.versions.get(slug=version_slug)
#Copy file
dest_dir = os.path.join(settings.UPLOAD_ROOT, proj.slug)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_file = os.path.join(dest_dir, file.name)
destination = open(dest_file, 'wb+')
for chunk in file.chunks():
destination.write(chunk)
destination.close()
#Mark version active.
version.active = True
version.uploaded = True
version.built = False
version.save()
#Extract file into the correct place.
html_path = proj.rtd_build_path(version.slug)
unzip_files(dest_file, html_path)
return HttpResponseRedirect(proj.get_absolute_url())
else:
form = FormClass(request=request)
return render_to_response(
'projects/upload_html.html',
{'form': form, 'project': proj},
context_instance=RequestContext(request)
)
@login_required
def edit_alias(request, project_slug, id=None):
proj = get_object_or_404(Project.objects.all(), slug=project_slug)
if id:
alias = proj.aliases.get(pk=id)
form = AliasForm(instance=alias, data=request.POST or None)
else:
form = AliasForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
alias = form.save()
return HttpResponseRedirect(alias.project.get_absolute_url())
return render_to_response(
'projects/alias_edit.html',
{'form': form},
context_instance=RequestContext(request)
)
@login_required
def list_alias(request, project_slug):
proj = get_object_or_404(Project.objects.all(), slug=project_slug)
return object_list(
request,
queryset=proj.aliases.all(),
template_object_name='alias',
template_name='projects/alias_list.html',
)
@login_required
def project_subprojects(request, project_slug):
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
form = SubprojectForm(data=request.POST or None, parent=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
subprojects = project.subprojects.all()
return render_to_response(
'projects/project_subprojects.html',
{'form': form, 'project': project, 'subprojects': subprojects},
context_instance=RequestContext(request)
)
@login_required
def project_subprojects_delete(request, project_slug, child_slug):
parent = get_object_or_404(request.user.projects.live(), slug=project_slug)
child = get_object_or_404(Project.objects.all(), slug=child_slug)
parent.remove_subproject(child)
project_dashboard = reverse('projects_detail', args=[parent.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_users(request, project_slug):
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
form = UserForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
users = project.users.all()
return render_to_response(
'projects/project_users.html',
{'form': form, 'project': project, 'users': users},
context_instance=RequestContext(request)
)
@login_required
def project_users_delete(request, project_slug):
if request.method != 'POST':
raise Http404
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
user = get_object_or_404(User.objects.all(), username=request.POST.get('username'))
if user == request.user:
raise Http404
project.users.remove(user)
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
|
|
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from neutron.common import constants as common_const
from neutron.i18n import _LI
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.controller import ofp_event
from ryu.lib.mac import haddr_to_bin
from ryu.ofproto import ofproto_v1_3
from dragonflow.controller.common import constants as const
from dragonflow.controller.df_base_app import DFlowApp
LOG = log.getLogger(__name__)
# TODO(gsagie) currently the number set in Ryu for this
# (OFPP_IN_PORT) is not working, use this until resolved
OF_IN_PORT = 0xfff8
class L2App(DFlowApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
BASE_RPC_API_VERSION = '1.0'
def __init__(self, *args, **kwargs):
super(L2App, self).__init__(*args, **kwargs)
self.dp = None
self.local_networks = {}
self.db_store = kwargs['db_store']
def start(self):
super(L2App, self).start()
return 1
def is_ready(self):
return self.dp is not None
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
self.dp = ev.msg.datapath
self.add_flow_go_to_table(self.dp,
const.SERVICES_CLASSIFICATION_TABLE,
const.PRIORITY_DEFAULT,
const.L2_LOOKUP_TABLE)
self.add_flow_go_to_table(self.dp, const.ARP_TABLE,
const.PRIORITY_DEFAULT,
const.L2_LOOKUP_TABLE)
# ARP traffic => send to ARP table
match = self.dp.ofproto_parser.OFPMatch(eth_type=0x0806)
self.add_flow_go_to_table(self.dp,
const.SERVICES_CLASSIFICATION_TABLE,
const.PRIORITY_MEDIUM,
const.ARP_TABLE, match=match)
self._install_flows_on_switch_up()
self.send_port_desc_stats_request(self.dp)
def send_port_desc_stats_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
datapath = ev.msg.datapath
ofproto = msg.datapath.ofproto
if reason == ofproto.OFPPR_ADD:
LOG.info(_LI("port added %s"), port_no)
elif reason == ofproto.OFPPR_DELETE:
LOG.info(_LI("port deleted %s"), port_no)
elif reason == ofproto.OFPPR_MODIFY:
LOG.info(_LI("port modified %s"), port_no)
else:
LOG.info(_LI("Illeagal port state %(port_no)s %(reason)s")
% {'port_no': port_no, 'reason': reason})
LOG.info(_LI(" Updating flow table on agents got port update "))
if self.dp:
self.send_port_desc_stats_request(datapath)
if reason == ofproto.OFPPR_DELETE:
pass
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
pass
def remove_local_port(self, lport):
lport_id = lport.get_id()
mac = lport.get_mac()
network_id = lport.get_external_value('local_network_id')
ofport = lport.get_external_value('ofport')
tunnel_key = lport.get_tunnel_key()
parser = self.dp.ofproto_parser
ofproto = self.dp.ofproto
# Remove ingress classifier for port
match = parser.OFPMatch()
match.set_in_port(ofport)
msg = parser.OFPFlowMod(
datapath=self.dp,
cookie=0,
cookie_mask=0,
table_id=const.INGRESS_CLASSIFICATION_DISPATCH_TABLE,
command=ofproto.OFPFC_DELETE,
priority=const.PRIORITY_MEDIUM,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match)
self.dp.send_msg(msg)
# Remove dispatch to local port according to unique tunnel_id
match = parser.OFPMatch(tunnel_id_nxm=tunnel_key)
msg = parser.OFPFlowMod(
datapath=self.dp,
cookie=0,
cookie_mask=0,
table_id=const.INGRESS_CLASSIFICATION_DISPATCH_TABLE,
command=ofproto.OFPFC_DELETE,
priority=const.PRIORITY_MEDIUM,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match)
self.dp.send_msg(msg)
# Remove destination classifier for port
match = parser.OFPMatch()
match.set_metadata(network_id)
match.set_dl_dst(haddr_to_bin(mac))
msg = parser.OFPFlowMod(datapath=self.dp,
cookie=0,
cookie_mask=0,
table_id=const.L2_LOOKUP_TABLE,
command=ofproto.OFPFC_DELETE,
priority=const.PRIORITY_MEDIUM,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match)
self.dp.send_msg(msg)
# Remove egress classifier for port
match = parser.OFPMatch(reg7=tunnel_key)
msg = parser.OFPFlowMod(datapath=self.dp,
cookie=0,
cookie_mask=0,
table_id=const.EGRESS_TABLE,
command=ofproto.OFPFC_DELETE,
priority=const.PRIORITY_MEDIUM,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match)
self.dp.send_msg(msg)
self._del_multicast_broadcast_handling_for_port(network_id, lport_id)
def remove_remote_port(self, lport):
lport_id = lport.get_id()
mac = lport.get_mac()
network_id = lport.get_external_value('local_network_id')
tunnel_key = lport.get_tunnel_key()
parser = self.dp.ofproto_parser
ofproto = self.dp.ofproto
# Remove destination classifier for port
match = parser.OFPMatch()
match.set_metadata(network_id)
match.set_dl_dst(haddr_to_bin(mac))
msg = parser.OFPFlowMod(datapath=self.dp,
cookie=0,
cookie_mask=0,
table_id=const.L2_LOOKUP_TABLE,
command=ofproto.OFPFC_DELETE,
priority=const.PRIORITY_MEDIUM,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match)
self.dp.send_msg(msg)
# Remove egress classifier for port
match = parser.OFPMatch(reg7=tunnel_key)
msg = parser.OFPFlowMod(datapath=self.dp,
cookie=0,
cookie_mask=0,
table_id=const.EGRESS_TABLE,
command=ofproto.OFPFC_DELETE,
priority=const.PRIORITY_MEDIUM,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match)
self.dp.send_msg(msg)
self._del_multicast_broadcast_handling_for_port(network_id, lport_id)
def add_local_port(self, lport):
if self.dp is None:
return
lport_id = lport.get_id()
mac = lport.get_mac()
network_id = lport.get_external_value('local_network_id')
ofport = lport.get_external_value('ofport')
tunnel_key = lport.get_tunnel_key()
parser = self.dp.ofproto_parser
ofproto = self.dp.ofproto
# Ingress classifier for port
match = parser.OFPMatch()
match.set_in_port(ofport)
actions = []
actions.append(parser.OFPActionSetField(reg6=tunnel_key))
actions.append(parser.OFPActionSetField(metadata=network_id))
action_inst = self.dp.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
goto_inst = parser.OFPInstructionGotoTable(
const.SERVICES_CLASSIFICATION_TABLE)
inst = [action_inst, goto_inst]
self.mod_flow(
self.dp,
inst=inst,
table_id=const.INGRESS_CLASSIFICATION_DISPATCH_TABLE,
priority=const.PRIORITY_MEDIUM,
match=match)
# Dispatch to local port according to unique tunnel_id
match = parser.OFPMatch(tunnel_id_nxm=tunnel_key)
actions = []
actions.append(parser.OFPActionOutput(ofport,
ofproto.OFPCML_NO_BUFFER))
action_inst = self.dp.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
inst = [action_inst]
self.mod_flow(
self.dp,
inst=inst,
table_id=const.INGRESS_CLASSIFICATION_DISPATCH_TABLE,
priority=const.PRIORITY_MEDIUM,
match=match)
# Destination classifier for port
priority = const.PRIORITY_MEDIUM
goto_table = const.EGRESS_TABLE
# Router MAC's go to L3 table and have higher priority
if lport.get_device_owner() == common_const.DEVICE_OWNER_ROUTER_INTF:
priority = const.PRIORITY_HIGH
goto_table = const.L3_LOOKUP_TABLE
match = parser.OFPMatch()
match.set_metadata(network_id)
match.set_dl_dst(haddr_to_bin(mac))
actions = []
actions.append(parser.OFPActionSetField(reg7=tunnel_key))
action_inst = self.dp.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
goto_inst = parser.OFPInstructionGotoTable(goto_table)
inst = [action_inst, goto_inst]
self.mod_flow(
self.dp,
inst=inst,
table_id=const.L2_LOOKUP_TABLE,
priority=priority,
match=match)
# Egress classifier for port
match = parser.OFPMatch(reg7=tunnel_key)
actions = [parser.OFPActionOutput(port=ofport)]
action_inst = self.dp.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
inst = [action_inst]
self.mod_flow(
self.dp,
inst=inst,
table_id=const.EGRESS_TABLE,
priority=const.PRIORITY_MEDIUM,
match=match)
self._add_multicast_broadcast_handling_for_port(network_id, lport_id,
tunnel_key)
def _del_multicast_broadcast_handling_for_port(self, network_id,
lport_id):
parser = self.dp.ofproto_parser
ofproto = self.dp.ofproto
command = self.dp.ofproto.OFPFC_MODIFY
network = self.local_networks.get(network_id)
if network is None:
# TODO(gsagie) add error here
return
# TODO(gsagie) check if lport in network structure?
del network[lport_id]
self.local_networks[network_id] = network
match = parser.OFPMatch(eth_dst='01:00:00:00:00:00')
addint = haddr_to_bin('01:00:00:00:00:00')
match.set_dl_dst_masked(addint, addint)
match.set_metadata(network_id)
actions = []
for tunnel_id in network.values():
actions.append(parser.OFPActionSetField(reg7=tunnel_id))
actions.append(parser.NXActionResubmitTable(OF_IN_PORT,
const.EGRESS_TABLE))
inst = [self.dp.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)]
self.mod_flow(
self.dp,
inst=inst,
table_id=const.L2_LOOKUP_TABLE,
command=command,
priority=const.PRIORITY_HIGH,
match=match)
def _add_multicast_broadcast_handling_for_port(self, network_id,
lport_id, tunnel_key):
parser = self.dp.ofproto_parser
ofproto = self.dp.ofproto
command = self.dp.ofproto.OFPFC_MODIFY
network = self.local_networks.get(network_id)
if network is None:
network = {}
self.local_networks[network_id] = network
command = self.dp.ofproto.OFPFC_ADD
network[lport_id] = tunnel_key
match = parser.OFPMatch(eth_dst='01:00:00:00:00:00')
addint = haddr_to_bin('01:00:00:00:00:00')
match.set_dl_dst_masked(addint, addint)
match.set_metadata(network_id)
actions = []
for tunnel_id in network.values():
actions.append(parser.OFPActionSetField(reg7=tunnel_id))
actions.append(parser.NXActionResubmitTable(OF_IN_PORT,
const.EGRESS_TABLE))
inst = [self.dp.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)]
self.mod_flow(
self.dp,
inst=inst,
table_id=const.L2_LOOKUP_TABLE,
command=command,
priority=const.PRIORITY_HIGH,
match=match)
def add_remote_port(self, lport):
if self.dp is None:
return
lport_id = lport.get_id()
mac = lport.get_mac()
network_id = lport.get_external_value('local_network_id')
ofport = lport.get_external_value('ofport')
tunnel_key = lport.get_tunnel_key()
parser = self.dp.ofproto_parser
ofproto = self.dp.ofproto
# Destination classifier for port
match = parser.OFPMatch()
match.set_metadata(network_id)
match.set_dl_dst(haddr_to_bin(mac))
actions = []
actions.append(parser.OFPActionSetField(reg7=tunnel_key))
action_inst = self.dp.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
goto_inst = parser.OFPInstructionGotoTable(const.EGRESS_TABLE)
inst = [action_inst, goto_inst]
self.mod_flow(
self.dp,
inst=inst,
table_id=const.L2_LOOKUP_TABLE,
priority=const.PRIORITY_MEDIUM,
match=match)
# Egress classifier for port
match = parser.OFPMatch(reg7=tunnel_key)
actions = []
actions.append(parser.OFPActionSetField(tunnel_id_nxm=tunnel_key))
actions.append(parser.OFPActionOutput(port=ofport))
action_inst = self.dp.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
inst = [action_inst]
self.mod_flow(
self.dp,
inst=inst,
table_id=const.EGRESS_TABLE,
priority=const.PRIORITY_MEDIUM,
match=match)
self._add_multicast_broadcast_handling_for_port(network_id, lport_id,
tunnel_key)
def _install_flows_on_switch_up(self):
# Clear local networks cache so the multicast/broadcast flows
# are installed correctly
self.local_networks.clear()
for port in self.db_store.get_ports():
if port.get_external_value('is_local'):
self.add_local_port(port)
else:
self.add_remote_port(port)
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.plugins.openstack import scenario
from rally.task import atomic
from rally.task import utils as bench_utils
class CeilometerScenario(scenario.OpenStackScenario):
"""Base class for Ceilometer scenarios with basic atomic actions."""
RESOURCE_NAME_PREFIX = "rally_ceilometer_"
def _get_alarm_dict(self, **kwargs):
"""Prepare and return an alarm dict for creating an alarm.
:param kwargs: optional parameters to create alarm
:returns: alarm dictionary used to create an alarm
"""
alarm_id = self._generate_random_name()
alarm = {"alarm_id": alarm_id,
"name": alarm_id,
"description": "Test Alarm"}
alarm.update(kwargs)
return alarm
@atomic.action_timer("ceilometer.list_alarms")
def _list_alarms(self, alarm_id=None):
"""List alarms.
List alarm matching alarm_id. It fetches all alarms
if alarm_id is None.
:param alarm_id: specifies id of the alarm
:returns: list of alarms
"""
if alarm_id:
return self.clients("ceilometer").alarms.get(alarm_id)
else:
return self.clients("ceilometer").alarms.list()
@atomic.action_timer("ceilometer.create_alarm")
def _create_alarm(self, meter_name, threshold, kwargs):
"""Create an alarm.
:param meter_name: specifies meter name of the alarm
:param threshold: specifies alarm threshold
:param kwargs: contains optional features of alarm to be created
:returns: alarm
"""
alarm_dict = self._get_alarm_dict(**kwargs)
alarm_dict.update({"meter_name": meter_name,
"threshold": threshold})
alarm = self.clients("ceilometer").alarms.create(**alarm_dict)
return alarm
@atomic.action_timer("ceilometer.delete_alarm")
def _delete_alarm(self, alarm_id):
"""Delete an alarm.
:param alarm_id: specifies id of the alarm
"""
self.clients("ceilometer").alarms.delete(alarm_id)
@atomic.action_timer("ceilometer.update_alarm")
def _update_alarm(self, alarm_id, alarm_dict_delta):
"""Update an alarm.
:param alarm_id: specifies id of the alarm
:param alarm_dict_delta: features of alarm to be updated
"""
self.clients("ceilometer").alarms.update(alarm_id, **alarm_dict_delta)
@atomic.action_timer("ceilometer.get_alarm_history")
def _get_alarm_history(self, alarm_id):
"""Assemble the alarm history requested.
:param alarm_id: specifies id of the alarm
:returns: list of alarm changes
"""
return self.clients("ceilometer").alarms.get_history(alarm_id)
@atomic.action_timer("ceilometer.get_alarm_state")
def _get_alarm_state(self, alarm_id):
"""Get the state of the alarm.
:param alarm_id: specifies id of the alarm
:returns: state of the alarm
"""
return self.clients("ceilometer").alarms.get_state(alarm_id)
@atomic.action_timer("ceilometer.set_alarm_state")
def _set_alarm_state(self, alarm, state, timeout):
"""Set the state of the alarm.
:param alarm: alarm instance
:param state: an alarm state to be set
:param timeout: The number of seconds for which to attempt a
successful check of the alarm state.
:returns: alarm in the set state
"""
self.clients("ceilometer").alarms.set_state(alarm.alarm_id, state)
return bench_utils.wait_for(alarm,
is_ready=bench_utils.resource_is(state),
update_resource=bench_utils
.get_from_manager(),
timeout=timeout, check_interval=1)
@atomic.action_timer("ceilometer.list_events")
def _list_events(self):
"""Get list of user's events.
It fetches all events.
:returns: list of events
"""
return self.admin_clients("ceilometer").events.list()
@atomic.action_timer("ceilometer.get_event")
def _get_event(self, event_id):
"""Get event with specific id.
Get event matching event_id.
:param event_id: specifies id of the event
:returns: event
"""
return self.admin_clients("ceilometer").events.get(event_id)
@atomic.action_timer("ceilometer.list_event_types")
def _list_event_types(self):
"""Get list of all event types.
:returns: list of event types
"""
return self.admin_clients("ceilometer").event_types.list()
@atomic.action_timer("ceilometer.list_event_traits")
def _list_event_traits(self, event_type, trait_name):
"""Get list of event traits.
:param event_type: specifies the type of event
:param trait_name: specifies trait name
:returns: list of event traits
"""
return self.admin_clients("ceilometer").traits.list(event_type,
trait_name)
@atomic.action_timer("ceilometer.list_event_trait_descriptions")
def _list_event_trait_descriptions(self, event_type):
"""Get list of event trait descriptions.
:param event_type: specifies the type of event
:returns: list of event trait descriptions
"""
return self.admin_clients("ceilometer").trait_descriptions.list(
event_type)
@atomic.action_timer("ceilometer.list_meters")
def _list_meters(self):
"""Get list of user's meters."""
return self.clients("ceilometer").meters.list()
@atomic.action_timer("ceilometer.list_resources")
def _list_resources(self):
"""List all resources.
:returns: list of all resources
"""
return self.clients("ceilometer").resources.list()
@atomic.action_timer("ceilometer.list_samples")
def _list_samples(self):
"""List all Samples.
:returns: list of all samples
"""
return self.clients("ceilometer").samples.list()
@atomic.action_timer("ceilometer.get_resource")
def _get_resource(self, resource_id):
"""Retrieve details about one resource."""
return self.clients("ceilometer").resources.get(resource_id)
@atomic.action_timer("ceilometer.get_stats")
def _get_stats(self, meter_name):
"""Get stats for a specific meter.
:param meter_name: Name of ceilometer meter
"""
return self.clients("ceilometer").statistics.list(meter_name)
@atomic.action_timer("ceilometer.create_meter")
def _create_meter(self, **kwargs):
"""Create a new meter.
:param name_length: Length of meter name to be generated
:param kwargs: Contains the optional attributes for meter creation
:returns: Newly created meter
"""
name = self._generate_random_name()
samples = self.clients("ceilometer").samples.create(
counter_name=name, **kwargs)
return samples[0]
@atomic.action_timer("ceilometer.query_alarms")
def _query_alarms(self, filter, orderby, limit):
"""Query alarms with specific parameters.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: queried alarms
"""
return self.clients("ceilometer").query_alarms.query(
filter, orderby, limit)
@atomic.action_timer("ceilometer.query_alarm_history")
def _query_alarm_history(self, filter, orderby, limit):
"""Query history of an alarm.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: alarm history
"""
return self.clients("ceilometer").query_alarm_history.query(
filter, orderby, limit)
@atomic.action_timer("ceilometer.create_sample")
def _create_sample(self, counter_name, counter_type, counter_unit,
counter_volume, resource_id=None, **kwargs):
"""Create a Sample with specified parameters.
:param counter_name: specifies name of the counter
:param counter_type: specifies type of the counter
:param counter_unit: specifies name of the counter
:param counter_volume: specifies name of the counter
:param resource_id: specifies resource id for the sample created
:param kwargs: contains optional parameters for creating a sample
:returns: created sample
"""
kwargs.update({"counter_name": counter_name,
"counter_type": counter_type,
"counter_unit": counter_unit,
"counter_volume": counter_volume,
"resource_id": resource_id if resource_id
else self._generate_random_name(
prefix="rally_resource_")})
return self.clients("ceilometer").samples.create(**kwargs)
@atomic.action_timer("ceilometer.query_samples")
def _query_samples(self, filter, orderby, limit):
"""Query samples with specified parameters.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: queried samples
"""
return self.clients("ceilometer").query_samples.query(
filter, orderby, limit)
|
|
"""
kombu.transport.redis
=====================
Redis transport.
"""
from __future__ import absolute_import
import numbers
import socket
from bisect import bisect
from collections import namedtuple
from contextlib import contextmanager
from time import time
from amqp import promise
from kombu.exceptions import InconsistencyError, VersionMismatch
from kombu.five import Empty, values, string_t
from kombu.log import get_logger
from kombu.utils import cached_property, uuid
from kombu.utils.eventio import poll, READ, ERR
from kombu.utils.encoding import bytes_to_str
from kombu.utils.json import loads, dumps
from kombu.utils.url import _parse_url
NO_ROUTE_ERROR = """
Cannot route message for exchange {0!r}: Table empty or key no longer exists.
Probably the key ({1!r}) has been removed from the Redis database.
"""
try:
from billiard.util import register_after_fork
except ImportError: # pragma: no cover
try:
from multiprocessing.util import register_after_fork # noqa
except ImportError:
def register_after_fork(*args, **kwargs): # noqa
pass
try:
import redis
except ImportError: # pragma: no cover
redis = None # noqa
from . import virtual
logger = get_logger('kombu.transport.redis')
crit, warn = logger.critical, logger.warn
DEFAULT_PORT = 6379
DEFAULT_DB = 0
PRIORITY_STEPS = [0, 3, 6, 9]
error_classes_t = namedtuple('error_classes_t', (
'connection_errors', 'channel_errors',
))
# This implementation may seem overly complex, but I assure you there is
# a good reason for doing it this way.
#
# Consuming from several connections enables us to emulate channels,
# which means we can have different service guarantees for individual
# channels.
#
# So we need to consume messages from multiple connections simultaneously,
# and using epoll means we don't have to do so using multiple threads.
#
# Also it means we can easily use PUBLISH/SUBSCRIBE to do fanout
# exchanges (broadcast), as an alternative to pushing messages to fanout-bound
# queues manually.
def get_redis_error_classes():
from redis import exceptions
# This exception suddenly changed name between redis-py versions
if hasattr(exceptions, 'InvalidData'):
DataError = exceptions.InvalidData
else:
DataError = exceptions.DataError
return error_classes_t(
(virtual.Transport.connection_errors + (
InconsistencyError,
socket.error,
IOError,
OSError,
exceptions.ConnectionError,
exceptions.AuthenticationError)),
(virtual.Transport.channel_errors + (
DataError,
exceptions.InvalidResponse,
exceptions.ResponseError)),
)
class MutexHeld(Exception):
pass
@contextmanager
def Mutex(client, name, expire):
lock_id = uuid()
i_won = client.setnx(name, lock_id)
try:
if i_won:
client.expire(name, expire)
yield
else:
if not client.ttl(name):
client.expire(name, expire)
raise MutexHeld()
finally:
if i_won:
pipe = client.pipeline(True)
try:
pipe.watch(name)
if pipe.get(name) == lock_id:
pipe.multi()
pipe.delete(name)
pipe.execute()
pipe.unwatch()
except redis.WatchError:
pass
class QoS(virtual.QoS):
restore_at_shutdown = True
def __init__(self, *args, **kwargs):
super(QoS, self).__init__(*args, **kwargs)
self._vrestore_count = 0
def append(self, message, delivery_tag):
delivery = message.delivery_info
EX, RK = delivery['exchange'], delivery['routing_key']
with self.pipe_or_acquire() as pipe:
pipe.zadd(self.unacked_index_key, delivery_tag, time()) \
.hset(self.unacked_key, delivery_tag,
dumps([message._raw, EX, RK])) \
.execute()
super(QoS, self).append(message, delivery_tag)
def restore_unacked(self):
for tag in self._delivered:
self.restore_by_tag(tag)
self._delivered.clear()
def ack(self, delivery_tag):
self._remove_from_indices(delivery_tag).execute()
super(QoS, self).ack(delivery_tag)
def reject(self, delivery_tag, requeue=False):
if requeue:
self.restore_by_tag(delivery_tag, leftmost=True)
self.ack(delivery_tag)
@contextmanager
def pipe_or_acquire(self, pipe=None):
if pipe:
yield pipe
else:
with self.channel.conn_or_acquire() as client:
yield client.pipeline()
def _remove_from_indices(self, delivery_tag, pipe=None):
with self.pipe_or_acquire(pipe) as pipe:
return pipe.zrem(self.unacked_index_key, delivery_tag) \
.hdel(self.unacked_key, delivery_tag)
def restore_visible(self, start=0, num=10, interval=10):
self._vrestore_count += 1
if (self._vrestore_count - 1) % interval:
return
with self.channel.conn_or_acquire() as client:
ceil = time() - self.visibility_timeout
try:
with Mutex(client, self.unacked_mutex_key,
self.unacked_mutex_expire):
visible = client.zrevrangebyscore(
self.unacked_index_key, ceil, 0,
start=num and start, num=num, withscores=True)
for tag, score in visible or []:
self.restore_by_tag(tag, client)
except MutexHeld:
pass
def restore_by_tag(self, tag, client=None, leftmost=False):
with self.channel.conn_or_acquire(client) as client:
p, _, _ = self._remove_from_indices(
tag, client.pipeline().hget(self.unacked_key, tag)).execute()
if p:
M, EX, RK = loads(bytes_to_str(p)) # json is unicode
self.channel._do_restore_message(M, EX, RK, client, leftmost)
@cached_property
def unacked_key(self):
return self.channel.unacked_key
@cached_property
def unacked_index_key(self):
return self.channel.unacked_index_key
@cached_property
def unacked_mutex_key(self):
return self.channel.unacked_mutex_key
@cached_property
def unacked_mutex_expire(self):
return self.channel.unacked_mutex_expire
@cached_property
def visibility_timeout(self):
return self.channel.visibility_timeout
class MultiChannelPoller(object):
eventflags = READ | ERR
#: Set by :meth:`get` while reading from the socket.
_in_protected_read = False
#: Set of one-shot callbacks to call after reading from socket.
after_read = None
def __init__(self):
# active channels
self._channels = set()
# file descriptor -> channel map.
self._fd_to_chan = {}
# channel -> socket map
self._chan_to_sock = {}
# poll implementation (epoll/kqueue/select)
self.poller = poll()
# one-shot callbacks called after reading from socket.
self.after_read = set()
def close(self):
for fd in values(self._chan_to_sock):
try:
self.poller.unregister(fd)
except (KeyError, ValueError):
pass
self._channels.clear()
self._fd_to_chan.clear()
self._chan_to_sock.clear()
def add(self, channel):
self._channels.add(channel)
def discard(self, channel):
self._channels.discard(channel)
def _on_connection_disconnect(self, connection):
sock = getattr(connection, '_sock', None)
if sock is not None:
self.poller.unregister(sock)
def _register(self, channel, client, type):
if (channel, client, type) in self._chan_to_sock:
self._unregister(channel, client, type)
if client.connection._sock is None: # not connected yet.
client.connection.connect()
sock = client.connection._sock
self._fd_to_chan[sock.fileno()] = (channel, type)
self._chan_to_sock[(channel, client, type)] = sock
self.poller.register(sock, self.eventflags)
def _unregister(self, channel, client, type):
self.poller.unregister(self._chan_to_sock[(channel, client, type)])
def _register_BRPOP(self, channel):
"""enable BRPOP mode for channel."""
ident = channel, channel.client, 'BRPOP'
if channel.client.connection._sock is None or \
ident not in self._chan_to_sock:
channel._in_poll = False
self._register(*ident)
if not channel._in_poll: # send BRPOP
channel._brpop_start()
def _register_LISTEN(self, channel):
"""enable LISTEN mode for channel."""
if channel.subclient.connection._sock is None:
channel._in_listen = False
self._register(channel, channel.subclient, 'LISTEN')
if not channel._in_listen:
channel._subscribe() # send SUBSCRIBE
def on_poll_start(self):
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
def on_poll_init(self, poller):
self.poller = poller
for channel in self._channels:
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def maybe_restore_messages(self):
for channel in self._channels:
if channel.active_queues:
# only need to do this once, as they are not local to channel.
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def on_readable(self, fileno):
chan, type = self._fd_to_chan[fileno]
if chan.qos.can_consume():
return chan.handlers[type]()
def handle_event(self, fileno, event):
if event & READ:
return self.on_readable(fileno), self
elif event & ERR:
chan, type = self._fd_to_chan[fileno]
chan._poll_error(type)
def get(self, timeout=None):
self._in_protected_read = True
try:
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
events = self.poller.poll(timeout)
for fileno, event in events or []:
ret = self.handle_event(fileno, event)
if ret:
return ret
# - no new data, so try to restore messages.
# - reset active redis commands.
self.maybe_restore_messages()
raise Empty()
finally:
self._in_protected_read = False
while self.after_read:
try:
fun = self.after_read.pop()
except KeyError:
break
else:
fun()
@property
def fds(self):
return self._fd_to_chan
class Channel(virtual.Channel):
QoS = QoS
_client = None
_subclient = None
supports_fanout = True
keyprefix_queue = '_kombu.binding.%s'
keyprefix_fanout = '/{db}.'
sep = '\x06\x16'
_in_poll = False
_in_listen = False
_fanout_queues = {}
ack_emulation = True
unacked_key = 'unacked'
unacked_index_key = 'unacked_index'
unacked_mutex_key = 'unacked_mutex'
unacked_mutex_expire = 300 # 5 minutes
unacked_restore_limit = None
visibility_timeout = 3600 # 1 hour
priority_steps = PRIORITY_STEPS
socket_timeout = None
max_connections = 10
#: Transport option to enable disable fanout keyprefix.
#: Should be enabled by default, but that is not
#: backwards compatible. Can also be string, in which
#: case it changes the default prefix ('/{db}.') into to something
#: else. The prefix must include a leading slash and a trailing dot.
fanout_prefix = False
#: If enabled the fanout exchange will support patterns in routing
#: and binding keys (like a topic exchange but using PUB/SUB).
#: This will be enabled by default in a future version.
fanout_patterns = False
_pool = None
from_transport_options = (
virtual.Channel.from_transport_options +
('ack_emulation',
'unacked_key',
'unacked_index_key',
'unacked_mutex_key',
'unacked_mutex_expire',
'visibility_timeout',
'unacked_restore_limit',
'fanout_prefix',
'fanout_patterns',
'socket_timeout',
'max_connections',
'priority_steps') # <-- do not add comma here!
)
def __init__(self, *args, **kwargs):
super_ = super(Channel, self)
super_.__init__(*args, **kwargs)
if not self.ack_emulation: # disable visibility timeout
self.QoS = virtual.QoS
self._queue_cycle = []
self.Client = self._get_client()
self.ResponseError = self._get_response_error()
self.active_fanout_queues = set()
self.auto_delete_queues = set()
self._fanout_to_queue = {}
self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive}
if self.fanout_prefix:
if isinstance(self.fanout_prefix, string_t):
self.keyprefix_fanout = self.fanout_prefix
else:
# previous versions did not set a fanout, so cannot enable
# by default.
self.keyprefix_fanout = ''
# Evaluate connection.
try:
self.client.info()
except Exception:
if self._pool:
self._pool.disconnect()
raise
self.connection.cycle.add(self) # add to channel poller.
# copy errors, in case channel closed but threads still
# are still waiting for data.
self.connection_errors = self.connection.connection_errors
register_after_fork(self, self._after_fork)
def _after_fork(self):
if self._pool is not None:
self._pool.disconnect()
def _on_connection_disconnect(self, connection):
if self.connection and self.connection.cycle:
self.connection.cycle._on_connection_disconnect(connection)
def _do_restore_message(self, payload, exchange, routing_key,
client=None, leftmost=False):
with self.conn_or_acquire(client) as client:
try:
try:
payload['headers']['redelivered'] = True
except KeyError:
pass
for queue in self._lookup(exchange, routing_key):
(client.lpush if leftmost else client.rpush)(
queue, dumps(payload),
)
except Exception:
crit('Could not restore message: %r', payload, exc_info=True)
def _restore(self, message, leftmost=False):
if not self.ack_emulation:
return super(Channel, self)._restore(message)
tag = message.delivery_tag
with self.conn_or_acquire() as client:
P, _ = client.pipeline() \
.hget(self.unacked_key, tag) \
.hdel(self.unacked_key, tag) \
.execute()
if P:
M, EX, RK = loads(bytes_to_str(P)) # json is unicode
self._do_restore_message(M, EX, RK, client, leftmost)
def _restore_at_beginning(self, message):
return self._restore(message, leftmost=True)
def basic_consume(self, queue, *args, **kwargs):
if queue in self._fanout_queues:
exchange, _ = self._fanout_queues[queue]
self.active_fanout_queues.add(queue)
self._fanout_to_queue[exchange] = queue
ret = super(Channel, self).basic_consume(queue, *args, **kwargs)
self._update_cycle()
return ret
def basic_cancel(self, consumer_tag):
# If we are busy reading messages we may experience
# a race condition where a message is consumed after
# cancelling, so we must delay this operation until reading
# is complete (Issue celery/celery#1773).
connection = self.connection
if connection:
if connection.cycle._in_protected_read:
return connection.cycle.after_read.add(
promise(self._basic_cancel, (consumer_tag, )),
)
return self._basic_cancel(consumer_tag)
def _basic_cancel(self, consumer_tag):
try:
queue = self._tag_to_queue[consumer_tag]
except KeyError:
return
try:
self.active_fanout_queues.remove(queue)
except KeyError:
pass
else:
self._unsubscribe_from(queue)
try:
exchange, _ = self._fanout_queues[queue]
self._fanout_to_queue.pop(exchange)
except KeyError:
pass
ret = super(Channel, self).basic_cancel(consumer_tag)
self._update_cycle()
return ret
def _get_publish_topic(self, exchange, routing_key):
if routing_key and self.fanout_patterns:
return ''.join([self.keyprefix_fanout, exchange, '/', routing_key])
return ''.join([self.keyprefix_fanout, exchange])
def _get_subscribe_topic(self, queue):
exchange, routing_key = self._fanout_queues[queue]
return self._get_publish_topic(exchange, routing_key)
def _subscribe(self):
keys = [self._get_subscribe_topic(queue)
for queue in self.active_fanout_queues]
if not keys:
return
c = self.subclient
if c.connection._sock is None:
c.connection.connect()
self._in_listen = True
c.psubscribe(keys)
def _unsubscribe_from(self, queue):
topic = self._get_subscribe_topic(queue)
c = self.subclient
should_disconnect = False
if c.connection._sock is None:
c.connection.connect()
should_disconnect = True
try:
c.unsubscribe([topic])
finally:
if should_disconnect and c.connection:
c.connection.disconnect()
def _handle_message(self, client, r):
if bytes_to_str(r[0]) == 'unsubscribe' and r[2] == 0:
client.subscribed = False
elif bytes_to_str(r[0]) == 'pmessage':
return {'type': r[0], 'pattern': r[1],
'channel': r[2], 'data': r[3]}
else:
return {'type': r[0], 'pattern': None,
'channel': r[1], 'data': r[2]}
def _receive(self):
c = self.subclient
response = None
try:
response = c.parse_response()
except self.connection_errors:
self._in_listen = False
raise Empty()
if response is not None:
payload = self._handle_message(c, response)
if bytes_to_str(payload['type']).endswith('message'):
channel = bytes_to_str(payload['channel'])
if payload['data']:
if channel[0] == '/':
_, _, channel = channel.partition('.')
try:
message = loads(bytes_to_str(payload['data']))
except (TypeError, ValueError):
warn('Cannot process event on channel %r: %s',
channel, repr(payload)[:4096], exc_info=1)
raise Empty()
exchange = channel.split('/', 1)[0]
return message, self._fanout_to_queue[exchange]
raise Empty()
def _brpop_start(self, timeout=1):
queues = self._consume_cycle()
if not queues:
return
keys = [self._q_for_pri(queue, pri) for pri in PRIORITY_STEPS
for queue in queues] + [timeout or 0]
self._in_poll = True
self.client.connection.send_command('BRPOP', *keys)
def _brpop_read(self, **options):
try:
try:
dest__item = self.client.parse_response(self.client.connection,
'BRPOP',
**options)
except self.connection_errors:
# if there's a ConnectionError, disconnect so the next
# iteration will reconnect automatically.
self.client.connection.disconnect()
raise Empty()
if dest__item:
dest, item = dest__item
dest = bytes_to_str(dest).rsplit(self.sep, 1)[0]
self._rotate_cycle(dest)
return loads(bytes_to_str(item)), dest
else:
raise Empty()
finally:
self._in_poll = False
def _poll_error(self, type, **options):
if type == 'LISTEN':
self.subclient.parse_response()
else:
self.client.parse_response(self.client.connection, type)
def _get(self, queue):
with self.conn_or_acquire() as client:
for pri in PRIORITY_STEPS:
item = client.rpop(self._q_for_pri(queue, pri))
if item:
return loads(bytes_to_str(item))
raise Empty()
def _size(self, queue):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.llen(self._q_for_pri(queue, pri))
sizes = cmds.execute()
return sum(size for size in sizes
if isinstance(size, numbers.Integral))
def _q_for_pri(self, queue, pri):
pri = self.priority(pri)
return '%s%s%s' % ((queue, self.sep, pri) if pri else (queue, '', ''))
def priority(self, n):
steps = self.priority_steps
return steps[bisect(steps, n) - 1]
def _put(self, queue, message, **kwargs):
"""Deliver message."""
pri = self._get_message_priority(message)
with self.conn_or_acquire() as client:
client.lpush(self._q_for_pri(queue, pri), dumps(message))
def _put_fanout(self, exchange, message, routing_key, **kwargs):
"""Deliver fanout message."""
with self.conn_or_acquire() as client:
client.publish(
self._get_publish_topic(exchange, routing_key),
dumps(message),
)
def _new_queue(self, queue, auto_delete=False, **kwargs):
if auto_delete:
self.auto_delete_queues.add(queue)
def _queue_bind(self, exchange, routing_key, pattern, queue):
if self.typeof(exchange).type == 'fanout':
# Mark exchange as fanout.
self._fanout_queues[queue] = (
exchange, routing_key.replace('#', '*'),
)
with self.conn_or_acquire() as client:
client.sadd(self.keyprefix_queue % (exchange, ),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
def _delete(self, queue, exchange, routing_key, pattern, *args):
self.auto_delete_queues.discard(queue)
with self.conn_or_acquire() as client:
client.srem(self.keyprefix_queue % (exchange, ),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.delete(self._q_for_pri(queue, pri))
cmds.execute()
def _has_queue(self, queue, **kwargs):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.exists(self._q_for_pri(queue, pri))
return any(cmds.execute())
def get_table(self, exchange):
key = self.keyprefix_queue % exchange
with self.conn_or_acquire() as client:
values = client.smembers(key)
if not values:
raise InconsistencyError(NO_ROUTE_ERROR.format(exchange, key))
return [tuple(bytes_to_str(val).split(self.sep)) for val in values]
def _purge(self, queue):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
priq = self._q_for_pri(queue, pri)
cmds = cmds.llen(priq).delete(priq)
sizes = cmds.execute()
return sum(sizes[::2])
def close(self):
if self._pool:
self._pool.disconnect()
if not self.closed:
# remove from channel poller.
self.connection.cycle.discard(self)
# delete fanout bindings
for queue in self._fanout_queues:
if queue in self.auto_delete_queues:
self.queue_delete(queue)
self._close_clients()
super(Channel, self).close()
def _close_clients(self):
# Close connections
for attr in 'client', 'subclient':
try:
self.__dict__[attr].connection.disconnect()
except (KeyError, AttributeError, self.ResponseError):
pass
def _prepare_virtual_host(self, vhost):
if not isinstance(vhost, numbers.Integral):
if not vhost or vhost == '/':
vhost = DEFAULT_DB
elif vhost.startswith('/'):
vhost = vhost[1:]
try:
vhost = int(vhost)
except ValueError:
raise ValueError(
'Database is int between 0 and limit - 1, not {0}'.format(
vhost,
))
return vhost
def _connparams(self):
conninfo = self.connection.client
connparams = {'host': conninfo.hostname or '127.0.0.1',
'port': conninfo.port or DEFAULT_PORT,
'virtual_host': conninfo.virtual_host,
'password': conninfo.password,
'max_connections': self.max_connections,
'socket_timeout': self.socket_timeout}
host = connparams['host']
if '://' in host:
scheme, _, _, _, _, path, query = _parse_url(host)
if scheme == 'socket':
connparams.update({
'connection_class': redis.UnixDomainSocketConnection,
'path': '/' + path}, **query)
connparams.pop('host', None)
connparams.pop('port', None)
connparams['db'] = self._prepare_virtual_host(
connparams.pop('virtual_host', None))
channel = self
connection_cls = (
connparams.get('connection_class') or
redis.Connection
)
class Connection(connection_cls):
def disconnect(self):
channel._on_connection_disconnect(self)
super(Connection, self).disconnect()
connparams['connection_class'] = Connection
return connparams
def _create_client(self):
return self.Client(connection_pool=self.pool)
def _get_pool(self):
params = self._connparams()
self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db'])
return redis.ConnectionPool(**params)
def _get_client(self):
if redis.VERSION < (2, 4, 4):
raise VersionMismatch(
'Redis transport requires redis-py versions 2.4.4 or later. '
'You have {0.__version__}'.format(redis))
# KombuRedis maintains a connection attribute on it's instance and
# uses that when executing commands
# This was added after redis-py was changed.
class KombuRedis(redis.Redis): # pragma: no cover
def __init__(self, *args, **kwargs):
super(KombuRedis, self).__init__(*args, **kwargs)
self.connection = self.connection_pool.get_connection('_')
return KombuRedis
@contextmanager
def conn_or_acquire(self, client=None):
if client:
yield client
else:
if self._in_poll:
client = self._create_client()
try:
yield client
finally:
self.pool.release(client.connection)
else:
yield self.client
@property
def pool(self):
if self._pool is None:
self._pool = self._get_pool()
return self._pool
@cached_property
def client(self):
"""Client used to publish messages, BRPOP etc."""
return self._create_client()
@cached_property
def subclient(self):
"""Pub/Sub connection used to consume fanout queues."""
client = self._create_client()
pubsub = client.pubsub()
pool = pubsub.connection_pool
pubsub.connection = pool.get_connection('pubsub', pubsub.shard_hint)
return pubsub
def _update_cycle(self):
"""Update fair cycle between queues.
We cycle between queues fairly to make sure that
each queue is equally likely to be consumed from,
so that a very busy queue will not block others.
This works by using Redis's `BRPOP` command and
by rotating the most recently used queue to the
and of the list. See Kombu github issue #166 for
more discussion of this method.
"""
self._queue_cycle = list(self.active_queues)
def _consume_cycle(self):
"""Get a fresh list of queues from the queue cycle."""
active = len(self.active_queues)
return self._queue_cycle[0:active]
def _rotate_cycle(self, used):
"""Move most recently used queue to end of list."""
cycle = self._queue_cycle
try:
cycle.append(cycle.pop(cycle.index(used)))
except ValueError:
pass
def _get_response_error(self):
from redis import exceptions
return exceptions.ResponseError
@property
def active_queues(self):
"""Set of queues being consumed from (excluding fanout queues)."""
return {queue for queue in self._active_queues
if queue not in self.active_fanout_queues}
class Transport(virtual.Transport):
Channel = Channel
polling_interval = None # disable sleep between unsuccessful polls.
default_port = DEFAULT_PORT
driver_type = 'redis'
driver_name = 'redis'
implements = virtual.Transport.implements.extend(
async=True,
exchange_types=frozenset(['direct', 'topic', 'fanout'])
)
def __init__(self, *args, **kwargs):
if redis is None:
raise ImportError('Missing redis library (pip install redis)')
super(Transport, self).__init__(*args, **kwargs)
# Get redis-py exceptions.
self.connection_errors, self.channel_errors = self._get_errors()
# All channels share the same poller.
self.cycle = MultiChannelPoller()
def driver_version(self):
return redis.__version__
def register_with_event_loop(self, connection, loop):
cycle = self.cycle
cycle.on_poll_init(loop.poller)
cycle_poll_start = cycle.on_poll_start
add_reader = loop.add_reader
on_readable = self.on_readable
def _on_disconnect(connection):
if connection._sock:
loop.remove(connection._sock)
cycle._on_connection_disconnect = _on_disconnect
def on_poll_start():
cycle_poll_start()
[add_reader(fd, on_readable, fd) for fd in cycle.fds]
loop.on_tick.add(on_poll_start)
loop.call_repeatedly(10, cycle.maybe_restore_messages)
def on_readable(self, fileno):
"""Handle AIO event for one of our file descriptors."""
item = self.cycle.on_readable(fileno)
if item:
message, queue = item
if not queue or queue not in self._callbacks:
raise KeyError(
'Message for queue {0!r} without consumers: {1}'.format(
queue, message))
self._callbacks[queue](message)
def _get_errors(self):
"""Utility to import redis-py's exceptions at runtime."""
return get_redis_error_classes()
|
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from collections import defaultdict
from dataclasses import dataclass
from typing import Iterable, Optional, Tuple
from pants.backend.python.target_types import PythonSourceField
from pants.backend.python.typecheck.mypy.skip_field import SkipMyPyField
from pants.backend.python.typecheck.mypy.subsystem import (
MyPy,
MyPyConfigFile,
MyPyFirstPartyPlugins,
)
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import (
Pex,
PexRequest,
PexRequirements,
VenvPex,
VenvPexProcess,
)
from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.check import REPORT_DIR, CheckRequest, CheckResult, CheckResults
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests, RemovePrefix
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import FieldSet, Target, TransitiveTargets, TransitiveTargetsRequest
from pants.engine.unions import UnionRule
from pants.python.python_setup import PythonSetup
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class MyPyFieldSet(FieldSet):
required_fields = (PythonSourceField,)
sources: PythonSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipMyPyField).value
@dataclass(frozen=True)
class MyPyPartition:
root_targets: FrozenOrderedSet[Target]
closure: FrozenOrderedSet[Target]
interpreter_constraints: InterpreterConstraints
class MyPyRequest(CheckRequest):
field_set_type = MyPyFieldSet
def generate_argv(
mypy: MyPy,
*,
venv_python: str,
file_list_path: str,
python_version: Optional[str],
) -> Tuple[str, ...]:
args = [f"--python-executable={venv_python}", *mypy.args]
if mypy.config:
args.append(f"--config-file={mypy.config}")
if python_version:
args.append(f"--python-version={python_version}")
args.append(f"@{file_list_path}")
return tuple(args)
def determine_python_files(files: Iterable[str]) -> Tuple[str, ...]:
"""We run over all .py and .pyi files, but .pyi files take precedence.
MyPy will error if we say to run over the same module with both its .py and .pyi files, so we
must be careful to only use the .pyi stub.
"""
result: OrderedSet[str] = OrderedSet()
for f in files:
if f.endswith(".pyi"):
py_file = f[:-1] # That is, strip the `.pyi` suffix to be `.py`.
result.discard(py_file)
result.add(f)
elif f.endswith(".py"):
pyi_file = f + "i"
if pyi_file not in result:
result.add(f)
return tuple(result)
@rule
async def mypy_typecheck_partition(
partition: MyPyPartition,
config_file: MyPyConfigFile,
first_party_plugins: MyPyFirstPartyPlugins,
mypy: MyPy,
python_setup: PythonSetup,
) -> CheckResult:
# MyPy requires 3.5+ to run, but uses the typed-ast library to work with 2.7, 3.4, 3.5, 3.6,
# and 3.7. However, typed-ast does not understand 3.8+, so instead we must run MyPy with
# Python 3.8+ when relevant. We only do this if <3.8 can't be used, as we don't want a
# loose requirement like `>=3.6` to result in requiring Python 3.8+, which would error if
# 3.8+ is not installed on the machine.
tool_interpreter_constraints = (
partition.interpreter_constraints
if (
mypy.options.is_default("interpreter_constraints")
and partition.interpreter_constraints.requires_python38_or_newer(
python_setup.interpreter_universe
)
)
else mypy.interpreter_constraints
)
closure_sources_get = Get(PythonSourceFiles, PythonSourceFilesRequest(partition.closure))
roots_sources_get = Get(
SourceFiles,
SourceFilesRequest(tgt.get(PythonSourceField) for tgt in partition.root_targets),
)
# See `requirements_venv_pex` for how this will get wrapped in a `VenvPex`.
requirements_pex_get = Get(
Pex,
PexFromTargetsRequest,
PexFromTargetsRequest.for_requirements(
(tgt.address for tgt in partition.root_targets),
hardcoded_interpreter_constraints=partition.interpreter_constraints,
internal_only=True,
),
)
extra_type_stubs_pex_get = Get(
Pex,
PexRequest(
output_filename="extra_type_stubs.pex",
internal_only=True,
requirements=PexRequirements(mypy.extra_type_stubs),
interpreter_constraints=partition.interpreter_constraints,
),
)
mypy_pex_get = Get(
VenvPex,
PexRequest(
output_filename="mypy.pex",
internal_only=True,
main=mypy.main,
requirements=mypy.pex_requirements(
extra_requirements=first_party_plugins.requirement_strings,
),
interpreter_constraints=tool_interpreter_constraints,
),
)
(
closure_sources,
roots_sources,
mypy_pex,
extra_type_stubs_pex,
requirements_pex,
) = await MultiGet(
closure_sources_get,
roots_sources_get,
mypy_pex_get,
extra_type_stubs_pex_get,
requirements_pex_get,
)
python_files = determine_python_files(roots_sources.snapshot.files)
file_list_path = "__files.txt"
file_list_digest_request = Get(
Digest,
CreateDigest([FileContent(file_list_path, "\n".join(python_files).encode())]),
)
# This creates a venv with all the 3rd-party requirements used by the code. We tell MyPy to
# use this venv by setting `--python-executable`. Note that this Python interpreter is
# different than what we run MyPy with.
#
# We could have directly asked the `PexFromTargetsRequest` to return a `VenvPex`, rather than
# `Pex`, but that would mean missing out on sharing a cache with other goals like `test` and
# `run`.
requirements_venv_pex_request = Get(
VenvPex,
PexRequest(
output_filename="requirements_venv.pex",
internal_only=True,
pex_path=[requirements_pex, extra_type_stubs_pex],
interpreter_constraints=partition.interpreter_constraints,
),
)
requirements_venv_pex, file_list_digest = await MultiGet(
requirements_venv_pex_request, file_list_digest_request
)
merged_input_files = await Get(
Digest,
MergeDigests(
[
file_list_digest,
first_party_plugins.sources_digest,
closure_sources.source_files.snapshot.digest,
requirements_venv_pex.digest,
config_file.digest,
]
),
)
all_used_source_roots = sorted(
set(itertools.chain(first_party_plugins.source_roots, closure_sources.source_roots))
)
env = {
"PEX_EXTRA_SYS_PATH": ":".join(all_used_source_roots),
"MYPYPATH": ":".join(all_used_source_roots),
}
result = await Get(
FallibleProcessResult,
VenvPexProcess(
mypy_pex,
argv=generate_argv(
mypy,
venv_python=requirements_venv_pex.python.argv0,
file_list_path=file_list_path,
python_version=config_file.python_version_to_autoset(
partition.interpreter_constraints, python_setup.interpreter_universe
),
),
input_digest=merged_input_files,
extra_env=env,
output_directories=(REPORT_DIR,),
description=f"Run MyPy on {pluralize(len(python_files), 'file')}.",
level=LogLevel.DEBUG,
),
)
report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))
return CheckResult.from_fallible_process_result(
result,
partition_description=str(sorted(str(c) for c in partition.interpreter_constraints)),
report=report,
)
# TODO(#10864): Improve performance, e.g. by leveraging the MyPy cache.
@rule(desc="Typecheck using MyPy", level=LogLevel.DEBUG)
async def mypy_typecheck(
request: MyPyRequest, mypy: MyPy, python_setup: PythonSetup
) -> CheckResults:
if mypy.skip:
return CheckResults([], checker_name="MyPy")
# When determining how to batch by interpreter constraints, we must consider the entire
# transitive closure to get the final resulting constraints.
# TODO(#10863): Improve the performance of this.
transitive_targets_per_field_set = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address]))
for field_set in request.field_sets
)
interpreter_constraints_to_transitive_targets = defaultdict(set)
for transitive_targets in transitive_targets_per_field_set:
interpreter_constraints = (
InterpreterConstraints.create_from_targets(transitive_targets.closure, python_setup)
or mypy.interpreter_constraints
)
interpreter_constraints_to_transitive_targets[interpreter_constraints].add(
transitive_targets
)
partitions = []
for interpreter_constraints, all_transitive_targets in sorted(
interpreter_constraints_to_transitive_targets.items()
):
combined_roots: OrderedSet[Target] = OrderedSet()
combined_closure: OrderedSet[Target] = OrderedSet()
for transitive_targets in all_transitive_targets:
combined_roots.update(transitive_targets.roots)
combined_closure.update(transitive_targets.closure)
partitions.append(
MyPyPartition(
FrozenOrderedSet(combined_roots),
FrozenOrderedSet(combined_closure),
interpreter_constraints,
)
)
partitioned_results = await MultiGet(
Get(CheckResult, MyPyPartition, partition) for partition in partitions
)
return CheckResults(partitioned_results, checker_name="MyPy")
def rules():
return [
*collect_rules(),
UnionRule(CheckRequest, MyPyRequest),
*pex_from_targets.rules(),
]
|
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2020-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""EdgeQL multiplicity inference.
A top-down multiplicity inferer that traverses the full AST populating
multiplicity fields and performing multiplicity checks.
"""
from __future__ import annotations
from typing import *
import dataclasses
import functools
import itertools
from edb import errors
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from edb.schema import objtypes as s_objtypes
from edb.schema import pointers as s_pointers
from edb.ir import ast as irast
from edb.ir import typeutils as irtyputils
from edb.ir import utils as irutils
from . import cardinality
from . import context as inf_ctx
from . import types as inf_types
from . import utils as inf_utils
ZERO = inf_ctx.MultiplicityInfo(own=qltypes.Multiplicity.ZERO)
ONE = inf_ctx.MultiplicityInfo(own=qltypes.Multiplicity.ONE)
MANY = inf_ctx.MultiplicityInfo(own=qltypes.Multiplicity.MANY)
DISTINCT_UNION = inf_ctx.MultiplicityInfo(
own=qltypes.Multiplicity.ONE,
disjoint_union=True,
)
@dataclasses.dataclass(frozen=True, eq=False)
class ContainerMultiplicityInfo(inf_ctx.MultiplicityInfo):
"""Multiplicity descriptor for an expression returning a container"""
#: Individual multiplicity values for container elements.
elements: Tuple[inf_ctx.MultiplicityInfo, ...] = ()
def _max_multiplicity(
args: Iterable[inf_ctx.MultiplicityInfo]
) -> inf_ctx.MultiplicityInfo:
# Coincidentally, the lexical order of multiplicity is opposite of
# order of multiplicity values.
arg_list = [a.own for a in args]
if not arg_list:
max_mult = qltypes.Multiplicity.ONE
else:
max_mult = min(arg_list)
return inf_ctx.MultiplicityInfo(own=max_mult)
def _common_multiplicity(
args: Iterable[irast.Base],
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
return _max_multiplicity(
infer_multiplicity(a, scope_tree=scope_tree, ctx=ctx) for a in args)
@functools.singledispatch
def _infer_multiplicity(
ir: irast.Base,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
# return MANY
raise ValueError(f'infer_multiplicity: cannot handle {ir!r}')
@_infer_multiplicity.register
def __infer_statement(
ir: irast.Statement,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
return infer_multiplicity(
ir.expr, scope_tree=scope_tree, ctx=ctx)
@_infer_multiplicity.register
def __infer_config_insert(
ir: irast.ConfigInsert,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
return infer_multiplicity(
ir.expr, scope_tree=scope_tree, ctx=ctx)
@_infer_multiplicity.register
def __infer_config_set(
ir: irast.ConfigSet,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
return infer_multiplicity(
ir.expr, scope_tree=scope_tree, ctx=ctx)
@_infer_multiplicity.register
def __infer_config_reset(
ir: irast.ConfigReset,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
if ir.selector:
return infer_multiplicity(
ir.selector, scope_tree=scope_tree, ctx=ctx)
else:
return ONE
@_infer_multiplicity.register
def __infer_empty_set(
ir: irast.EmptySet,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
return ZERO
@_infer_multiplicity.register
def __infer_type_introspection(
ir: irast.TypeIntrospection,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
# TODO: The result is always ONE, but we still want to actually
# introspect the expression. Unfortunately, currently the
# expression is not available at this stage.
#
# E.g. consider:
# WITH X := Foo {bar := {Bar, Bar}}
# SELECT INTROSPECT TYPEOF X.bar;
return ONE
def _infer_shape(
ir: irast.Set,
*,
is_mutation: bool=False,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> None:
for shape_set, shape_op in ir.shape:
new_scope = inf_utils.get_set_scope(shape_set, scope_tree, ctx=ctx)
if shape_set.expr and shape_set.rptr:
expr_mult = infer_multiplicity(
shape_set.expr, scope_tree=new_scope, ctx=ctx)
ptrref = shape_set.rptr.ptrref
if (
expr_mult is MANY
and shape_op is not qlast.ShapeOp.APPEND
and shape_op is not qlast.ShapeOp.SUBTRACT
and irtyputils.is_object(ptrref.out_target)
):
ctx.env.schema, ptrcls = irtyputils.ptrcls_from_ptrref(
ptrref, schema=ctx.env.schema)
assert isinstance(ptrcls, s_pointers.Pointer)
desc = ptrcls.get_verbosename(ctx.env.schema)
if not is_mutation:
desc = f"computed {desc}"
raise errors.QueryError(
f'possibly not a distinct set returned by an '
f'expression for a {desc}',
hint=(
f'You can use assert_distinct() around the expression '
f'to turn this into a runtime assertion, or the '
f'DISTINCT operator to silently discard duplicate '
f'elements.'
),
context=shape_set.context
)
_infer_shape(
shape_set, is_mutation=is_mutation, scope_tree=scope_tree, ctx=ctx)
def _infer_set(
ir: irast.Set,
*,
is_mutation: bool=False,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
result = _infer_set_inner(
ir, is_mutation=is_mutation, scope_tree=scope_tree, ctx=ctx)
ctx.inferred_multiplicity[ir, scope_tree] = result
# The shape doesn't affect multiplicity, but requires validation.
_infer_shape(ir, is_mutation=is_mutation, scope_tree=scope_tree, ctx=ctx)
return result
def _infer_set_inner(
ir: irast.Set,
*,
is_mutation: bool=False,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
rptr = ir.rptr
new_scope = cardinality.inf_utils.get_set_scope(ir, scope_tree, ctx=ctx)
if ir.expr is None:
expr_mult = None
else:
expr_mult = infer_multiplicity(ir.expr, scope_tree=new_scope, ctx=ctx)
if rptr is not None:
rptrref = rptr.ptrref
src_mult = infer_multiplicity(
rptr.source, scope_tree=new_scope, ctx=ctx)
if isinstance(rptrref, irast.TupleIndirectionPointerRef):
if isinstance(src_mult, ContainerMultiplicityInfo):
idx = irtyputils.get_tuple_element_index(rptrref)
path_mult = src_mult.elements[idx]
else:
# All bets are off for tuple elements coming from
# opaque tuples.
path_mult = MANY
elif not irtyputils.is_object(ir.typeref):
# This is not an expression and is some kind of scalar, so
# multiplicity cannot be guaranteed to be ONE (most scalar
# expressions don't have an implicit requirement to be sets)
# unless we also have an exclusive constraint.
if (
expr_mult is not None
and inf_utils.find_visible(rptr.source, new_scope) is not None
):
path_mult = expr_mult
else:
schema = ctx.env.schema
# We should only have some kind of path terminating in a
# property here.
assert isinstance(rptrref, irast.PointerRef)
ptr = schema.get_by_id(rptrref.id, type=s_pointers.Pointer)
if ptr.is_exclusive(schema):
# Got an exclusive constraint
path_mult = ONE
else:
path_mult = MANY
else:
# This is some kind of a link at the end of a path.
# Therefore the target is a proper set.
path_mult = ONE
elif expr_mult is not None:
path_mult = expr_mult
else:
# Evidently this is not a pointer, expression, or a scalar.
# This is an object type and therefore a proper set.
path_mult = ONE
if (
not path_mult.is_many()
and irutils.get_path_root(ir).path_id in ctx.distinct_iterators
):
path_mult = dataclasses.replace(path_mult, disjoint_union=True)
return path_mult
@_infer_multiplicity.register
def __infer_func_call(
ir: irast.FunctionCall,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
card = cardinality.infer_cardinality(ir, scope_tree=scope_tree, ctx=ctx)
args_mult = []
for arg in ir.args:
arg_mult = infer_multiplicity(arg.expr, scope_tree=scope_tree, ctx=ctx)
args_mult.append(arg_mult)
arg.multiplicity = arg_mult.own
if card.is_single():
return ONE
elif str(ir.func_shortname) == 'std::assert_distinct':
return ONE
elif str(ir.func_shortname) == 'std::assert_exists':
return args_mult[0]
elif str(ir.func_shortname) == 'std::enumerate':
# The output of enumerate is always of multiplicity ONE because
# it's a set of tuples with first elements being guaranteed to be
# distinct.
return ContainerMultiplicityInfo(
own=qltypes.Multiplicity.ONE,
elements=(ONE,) + tuple(args_mult),
)
else:
# If the function returns a set (for any reason), all bets are off
# and the maximum multiplicity cannot be inferred.
return MANY
@_infer_multiplicity.register
def __infer_oper_call(
ir: irast.OperatorCall,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
mult = []
cards = []
for arg in ir.args:
cards.append(
cardinality.infer_cardinality(
arg.expr, scope_tree=scope_tree, ctx=ctx))
mult.append(
infer_multiplicity(
arg.expr, scope_tree=scope_tree, ctx=ctx))
op_name = str(ir.func_shortname)
if op_name == 'std::UNION':
# UNION will produce multiplicity MANY unless most or all of
# the elements multiplicity is ZERO (from an empty set), or
# all of the elements are sets of unrelated object types of
# multiplicity at most ONE, or if all elements have been
# proven to be disjoint (e.g. a UNION of INSERTs).
result = ZERO
arg_type = inf_types.infer_type(ir.args[0].expr, env=ctx.env)
if isinstance(arg_type, s_objtypes.ObjectType):
types: List[s_objtypes.ObjectType] = [
inf_types.infer_type(arg.expr, env=ctx.env) # type: ignore
for arg in ir.args
]
lineages = [
(t,) + tuple(t.descendants(ctx.env.schema))
for t in types
]
flattened = tuple(itertools.chain.from_iterable(lineages))
types_disjoint = len(flattened) == len(frozenset(flattened))
else:
types_disjoint = False
for m in mult:
if m.is_one():
if (
result.is_zero()
or types_disjoint
or (result.disjoint_union and m.disjoint_union)
):
result = m
else:
result = MANY
break
elif m.is_many():
result = MANY
break
else:
# ZERO
pass
return result
elif op_name == 'std::DISTINCT':
if mult[0] == ZERO:
return ZERO
else:
return ONE
elif op_name == 'std::IF':
# If the cardinality of the condition is more than ONE, then
# the multiplicity cannot be inferred.
if cards[1].is_single():
# Now it's just a matter of the multiplicity of the
# possible results.
return _max_multiplicity((mult[0], mult[2]))
else:
return MANY
elif op_name == 'std::??':
return _max_multiplicity((mult[0], mult[1]))
else:
# The rest of the operators (other than UNION, DISTINCT, or
# IF..ELSE). We can ignore the SET OF args because the results
# are actually proportional to the element-wise args in our
# operators.
result = _max_multiplicity(mult)
if result == MANY:
return result
# Even when arguments are of multiplicity ONE, we cannot
# exclude the possibility of the result being of multiplicity
# MANY. We need to check that at most one argument has
# cardinality more than ONE.
if len([card for card in cards if card.is_multi()]) > 1:
return MANY
else:
return result
@_infer_multiplicity.register
def __infer_const(
ir: irast.BaseConstant,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
return ONE
@_infer_multiplicity.register
def __infer_param(
ir: irast.Parameter,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
return ONE
@_infer_multiplicity.register
def __infer_const_set(
ir: irast.ConstantSet,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
if len(ir.elements) == len({el.value for el in ir.elements}):
return ONE
else:
return MANY
@_infer_multiplicity.register
def __infer_typecheckop(
ir: irast.TypeCheckOp,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
# Unless this is a singleton, the multiplicity cannot be assumed to be ONE.
card = cardinality.infer_cardinality(
ir, scope_tree=scope_tree, ctx=ctx)
if card is not None and card.is_single():
return ONE
else:
return MANY
@_infer_multiplicity.register
def __infer_typecast(
ir: irast.TypeCast,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
return infer_multiplicity(
ir.expr, scope_tree=scope_tree, ctx=ctx,
)
def _infer_stmt_multiplicity(
ir: irast.FilteredStmt,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
# WITH block bindings need to be validated, they don't have to
# have multiplicity ONE, but their sub-expressions must be valid.
for part in (ir.bindings or []):
infer_multiplicity(part, scope_tree=scope_tree, ctx=ctx)
subj = ir.subject if isinstance(ir, irast.MutatingStmt) else ir.result
result = infer_multiplicity(
subj,
scope_tree=scope_tree,
ctx=ctx,
)
if ir.where:
infer_multiplicity(ir.where, scope_tree=scope_tree, ctx=ctx)
filtered_ptrs = cardinality.extract_filters(
subj, ir.where, scope_tree, ctx)
for _, flt_expr in filtered_ptrs:
# Check if any of the singleton filter expressions in FILTER
# reference enclosing iterators with multiplicity ONE, and
# if so, indicate to the enclosing iterator that this UNION
# is guaranteed to be disjoint.
if (
(irutils.get_path_root(flt_expr).path_id
in ctx.distinct_iterators)
and not infer_multiplicity(
flt_expr, scope_tree=scope_tree, ctx=ctx
).is_many()
):
return DISTINCT_UNION
return result
def _infer_for_multiplicity(
ir: irast.SelectStmt,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
itset = ir.iterator_stmt
assert itset is not None
itexpr = itset.expr
assert itexpr is not None
itmult = infer_multiplicity(itset, scope_tree=scope_tree, ctx=ctx)
if itmult != MANY:
ctx.distinct_iterators.add(itset.path_id)
result_mult = infer_multiplicity(ir.result, scope_tree=scope_tree, ctx=ctx)
if isinstance(ir.result.expr, irast.InsertStmt):
# A union of inserts always has multiplicity ONE
return ONE
elif itmult.is_many():
return MANY
else:
if result_mult.disjoint_union:
return result_mult
else:
return MANY
@_infer_multiplicity.register
def __infer_select_stmt(
ir: irast.SelectStmt,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
if ir.iterator_stmt is not None:
return _infer_for_multiplicity(ir, scope_tree=scope_tree, ctx=ctx)
else:
stmt_mult = _infer_stmt_multiplicity(
ir, scope_tree=scope_tree, ctx=ctx)
clauses = (
[ir.limit, ir.offset]
+ [sort.expr for sort in (ir.orderby or ())]
)
for clause in filter(None, clauses):
new_scope = inf_utils.get_set_scope(clause, scope_tree, ctx=ctx)
infer_multiplicity(clause, scope_tree=new_scope, ctx=ctx)
return stmt_mult
@_infer_multiplicity.register
def __infer_insert_stmt(
ir: irast.InsertStmt,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
# INSERT will always return a proper set, but we still want to
# process the sub-expressions.
infer_multiplicity(
ir.subject, is_mutation=True, scope_tree=scope_tree, ctx=ctx
)
new_scope = inf_utils.get_set_scope(ir.result, scope_tree, ctx=ctx)
infer_multiplicity(
ir.result, is_mutation=True, scope_tree=new_scope, ctx=ctx
)
if ir.on_conflict:
for part in [ir.on_conflict.select_ir, ir.on_conflict.else_ir]:
if part:
infer_multiplicity(part, scope_tree=scope_tree, ctx=ctx)
return DISTINCT_UNION
@_infer_multiplicity.register
def __infer_update_stmt(
ir: irast.UpdateStmt,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
# Presumably UPDATE will always return a proper set, even if it's
# fed something with higher multiplicity, but we still want to
# process the expression being updated.
infer_multiplicity(
ir.result, is_mutation=True, scope_tree=scope_tree, ctx=ctx,
)
result = _infer_stmt_multiplicity(ir, scope_tree=scope_tree, ctx=ctx)
if result is ZERO:
return ZERO
else:
return ONE
@_infer_multiplicity.register
def __infer_delete_stmt(
ir: irast.DeleteStmt,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
# Presumably DELETE will always return a proper set, even if it's
# fed something with higher multiplicity, but we still want to
# process the expression being deleted.
infer_multiplicity(
ir.result, is_mutation=True, scope_tree=scope_tree, ctx=ctx,
)
result = _infer_stmt_multiplicity(ir, scope_tree=scope_tree, ctx=ctx)
if result is ZERO:
return ZERO
else:
return ONE
@_infer_multiplicity.register
def __infer_group_stmt(
ir: irast.GroupStmt,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
raise NotImplementedError
@_infer_multiplicity.register
def __infer_slice(
ir: irast.SliceIndirection,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
# Slice indirection multiplicity is guaranteed to be ONE as long
# as the cardinality of this expression is at most one, otherwise
# the results of index indirection can contain values with
# multiplicity > 1.
card = cardinality.infer_cardinality(
ir, scope_tree=scope_tree, ctx=ctx)
if card is not None and card.is_single():
return ONE
else:
return MANY
@_infer_multiplicity.register
def __infer_index(
ir: irast.IndexIndirection,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
# Index indirection multiplicity is guaranteed to be ONE as long
# as the cardinality of this expression is at most one, otherwise
# the results of index indirection can contain values with
# multiplicity > 1.
card = cardinality.infer_cardinality(
ir, scope_tree=scope_tree, ctx=ctx)
if card is not None and card.is_single():
return ONE
else:
return MANY
@_infer_multiplicity.register
def __infer_array(
ir: irast.Array,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
return _common_multiplicity(ir.elements, scope_tree=scope_tree, ctx=ctx)
@_infer_multiplicity.register
def __infer_tuple(
ir: irast.Tuple,
*,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
els = tuple(
infer_multiplicity(el.val, scope_tree=scope_tree, ctx=ctx)
for el in ir.elements
)
return ContainerMultiplicityInfo(
own=_max_multiplicity(els).own,
elements=els,
)
def infer_multiplicity(
ir: irast.Base,
*,
is_mutation: bool=False,
scope_tree: irast.ScopeTreeNode,
ctx: inf_ctx.InfCtx,
) -> inf_ctx.MultiplicityInfo:
result = ctx.inferred_multiplicity.get((ir, scope_tree))
if result is not None:
return result
# We can use cardinality as a helper in determining multiplicity,
# since singletons have multiplicity one.
card = cardinality.infer_cardinality(
ir, is_mutation=is_mutation, scope_tree=scope_tree, ctx=ctx)
if isinstance(ir, irast.EmptySet):
result = ZERO
elif isinstance(ir, irast.Set):
result = _infer_set(
ir, is_mutation=is_mutation, scope_tree=scope_tree, ctx=ctx,
)
else:
result = _infer_multiplicity(ir, scope_tree=scope_tree, ctx=ctx)
if card is not None and card.is_single() and result.is_many():
# We've validated multiplicity, so now we can just override it
# safely.
result = ONE
if not isinstance(result, inf_ctx.MultiplicityInfo):
raise errors.QueryError(
'could not determine the multiplicity of '
'set produced by expression',
context=ir.context)
ctx.inferred_multiplicity[ir, scope_tree] = result
return result
|
|
# Copyright (c) 2012
# Licensed under the terms of the MIT license; see LICENSE.txt
from .base import StrPattern, Pattern, PatternBase, derepeat
from .repeating import Repeating
from zope.interface import implementer
capturing_pattern = StrPattern("(...)")
noncapturing_pattern = StrPattern("(?:...)")
named_pattern = StrPattern("(?P<name>...)")
class IGroup(Pattern):
def _merge_nonatomic_child():
"return simplified version of group for use in parent"
@implementer(IGroup)
class Group(PatternBase):
def __init__(self, *children, **args):
self._init(**args)
self.children = children
def _init(self, capturing=True, name=None, _atomic=True):
self.capturing = capturing
self.name = name
if name and not capturing:
raise Exception("Groups cannot be both named and non-capturing")
elif name:
# TODO: assert isidentifier(name)
self.pattern = named_pattern.format(name=name)
elif capturing:
self.pattern = capturing_pattern
else:
self.pattern = noncapturing_pattern
if not _atomic: # should not be used by users of the library; is implementation detail
self.pattern = None
self.capturing = False
self._atomic = _atomic
def copy(self, children=None, **keywords):
if children == None:
children = self.children
args = dict(_atomic=self._atomic, capturing=self.capturing, name=self.name)
args.update(keywords)
return Group(*children, **args)
def atomized(self):
if not self._atomic:
return self.copy(_atomic=True)
else:
return self
def deatomized(self, _warn=True):
if self._atomic:
if self.capturing and _warn:
self.warn("de-atomizing a capturing group - capturing-ness will be lost!")
result = self.copy(_atomic=False)
else:
result = self
return result._drop_if_unnecessary()
def toplevel(self):
if self.name and self.capturing:
self.warn("using a named group as top-level - sub-groups will have numerical indices starting from 2!!")
return self.simplified()
elif not self.capturing:
self.warn("using non-capturing group as top-level - will be captured as "
"group 0 anyway due to how the re module numbers groups.")
return self.deatomized(False).simplified()
@property
def ismodifier(self):
if self._atomic:
return False
return len(self.children) and self.children[0].ismodifier
@property
def atoms(self):
if self._atomic:
return 1
else:
return sum(child.atoms for child in self.children)
### ------ Simplification ------
def _merge_nonatomic_child(self):
if not self._atomic:
return self.children
else:
return [self]
def _drop_if_unnecessary(self, children=None):
"""
Return child if group is unnecessary
"""
if children == None:
children = self.children
if (not self.capturing and len(children) == 1 and
(not self._atomic or self.children[0].atoms <= 1)):
return children[0]
else:
return Group(*children, _atomic=self._atomic, capturing=self.capturing,
name=self.name)
def _derepeat_pre(self, children=None):
"""
do first part of derepeating - this part must operate on an unmodified
children list, or it may fail to correctly derepeat
"""
if children == None:
children = self.children
return derepeat(children)
def _derepeat_post(self, pattern, count):
"""
finish derepeating by wrapping in Repeating object if necessary
"""
return Repeating(pattern, count=count)._drop_if_unnecessary()
def derepeated(self):
"return derepeated version of self, with no other changes"
children, count = self._derepeat_pre()
return self._derepeat_post(self.copy(children=children), count)
def _merged_children(self, children=None):
if children == None:
children = self.children
newchildren = []
for child in children:
if IGroup.providedBy(child):
newchildren.extend(child._merge_nonatomic_child())
else:
newchildren.append(child)
return newchildren
def _prerender(self, children=None):
if children == None:
children = self.children
return [Pattern(child) for child in children]
def simplified(self, recursive=True, mergechildren=True):
children, count = self._derepeat_pre()
children = self._prerender(children)
if recursive:
children = [child.simplified() for child in children]
if mergechildren:
children = self._merged_children(children)
result = self._drop_if_unnecessary(children)
return self._derepeat_post(result, count)
### ------ Rendering ------
def render(self):
result = []
for child in self.children:
result.append(Pattern(child).render())
resultstr = "".join(result)
if not self._atomic:
return resultstr
else:
return self.pattern.format(dots=resultstr).render()
def __repr__(self):
extra = []
if not self.capturing:
extra.append("capturing=False")
if self.name:
extra.append("name=%r" % self.name)
if not self._atomic:
extra.append("_atomic=False")
return "Group(%s)" % ", ".join([repr(child) for child in self.children] + extra)
@implementer(Pattern)
class PrevGroup(PatternBase):
earliernamed = StrPattern("(?P=name)")
earlierid = StrPattern("\\number")
def __init__(self, name):
if str(name).isdigit() and int(name):
self.pattern = self.earlierid.format(number=str(name))
else:
self.pattern = self.earliernamed.format(name=name)
def render(self):
return self.pattern.render()
@implementer(Pattern)
class Lookahead(PatternBase): # TODO FIXME XXX
positive = StrPattern("(?=...)")
negative = StrPattern("(?!...)")
def _init(self, negative=False):
if negative:
self.group = self.negative
else:
self.group = self.positive
@implementer(Pattern)
class Lookbehind(Lookahead):
positive = StrPattern("(?<=...)")
negative = StrPattern("(?<!...)")
@implementer(Pattern)
class Yesno(PatternBase):
choicepat = StrPattern("(?(id)yes_pattern|no_pattern)")
def __init__(self, previous, yespattern, nopattern):
self.pattern = choicepat.format(id=str(previous))
self.yespattern = Pattern(yespattern)
self.nopattern = Pattern(nopattern)
def render(self):
formatted = self.pattern.format(yes_pattern=self.yespattern.render(),
no_pattern=self.nopattern.render())
return formatted.render()
|
|
#!/usr/bin/env python2
'''
Python WebSocket library with support for "wss://" encryption.
Copyright 2011 Joel Martin
Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
Supports following protocol versions:
- http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
- http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
- http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10
You can make a cert/key with openssl using:
openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem
as taken from http://docs.python.org/dev/library/ssl.html#certificates
'''
import os, sys, time, errno, signal, socket, traceback, select
import array, struct
from base64 import b64encode, b64decode
# Imports that vary by python version
# python 3.0 differences
if sys.hexversion > 0x3000000:
b2s = lambda buf: buf.decode('latin_1')
s2b = lambda s: s.encode('latin_1')
s2a = lambda s: s
else:
b2s = lambda buf: buf # No-op
s2b = lambda s: s # No-op
s2a = lambda s: [ord(c) for c in s]
try: from io import StringIO
except: from cStringIO import StringIO
try: from http.server import SimpleHTTPRequestHandler
except: from SimpleHTTPServer import SimpleHTTPRequestHandler
# python 2.6 differences
try: from hashlib import md5, sha1
except: from md5 import md5; from sha import sha as sha1
# python 2.5 differences
try:
from struct import pack, unpack_from
except:
from struct import pack
def unpack_from(fmt, buf, offset=0):
slice = buffer(buf, offset, struct.calcsize(fmt))
return struct.unpack(fmt, slice)
# Degraded functionality if these imports are missing
for mod, sup in [('numpy', 'HyBi protocol'), ('ssl', 'TLS/SSL/wss'),
('multiprocessing', 'Multi-Processing'),
('resource', 'daemonizing')]:
try:
globals()[mod] = __import__(mod)
except ImportError:
globals()[mod] = None
print("WARNING: no '%s' module, %s is slower or disabled" % (
mod, sup))
if multiprocessing and sys.platform == 'win32':
# make sockets pickle-able/inheritable
import multiprocessing.reduction
class WebSocketServer(object):
"""
WebSockets server class.
Must be sub-classed with new_client method definition.
"""
buffer_size = 65536
server_handshake_hixie = """HTTP/1.1 101 Web Socket Protocol Handshake\r
Upgrade: WebSocket\r
Connection: Upgrade\r
%sWebSocket-Origin: %s\r
%sWebSocket-Location: %s://%s%s\r
"""
server_handshake_hybi = """HTTP/1.1 101 Switching Protocols\r
Upgrade: websocket\r
Connection: Upgrade\r
Sec-WebSocket-Accept: %s\r
"""
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
policy_response = """<cross-domain-policy><allow-access-from domain="*" to-ports="*" /></cross-domain-policy>\n"""
# An exception before the WebSocket connection was established
class EClose(Exception):
pass
# An exception while the WebSocket client was connected
class CClose(Exception):
pass
def __init__(self, listen_host='', listen_port=None, source_is_ipv6=False,
verbose=False, cert='', key='', ssl_only=None,
daemon=False, record='', web='',
run_once=False, timeout=0, idle_timeout=0):
# settings
self.verbose = verbose
self.listen_host = listen_host
self.listen_port = listen_port
self.prefer_ipv6 = source_is_ipv6
self.ssl_only = ssl_only
self.daemon = daemon
self.run_once = run_once
self.timeout = timeout
self.idle_timeout = idle_timeout
self.launch_time = time.time()
self.ws_connection = False
self.handler_id = 1
# Make paths settings absolute
self.cert = os.path.abspath(cert)
self.key = self.web = self.record = ''
if key:
self.key = os.path.abspath(key)
if web:
self.web = os.path.abspath(web)
if record:
self.record = os.path.abspath(record)
if self.web:
os.chdir(self.web)
# Sanity checks
if not ssl and self.ssl_only:
raise Exception("No 'ssl' module and SSL-only specified")
if self.daemon and not resource:
raise Exception("Module 'resource' required to daemonize")
# Show configuration
print("WebSocket server settings:")
print(" - Listen on %s:%s" % (
self.listen_host, self.listen_port))
print(" - Flash security policy server")
if self.web:
print(" - Web server. Web root: %s" % self.web)
if ssl:
if os.path.exists(self.cert):
print(" - SSL/TLS support")
if self.ssl_only:
print(" - Deny non-SSL/TLS connections")
else:
print(" - No SSL/TLS support (no cert file)")
else:
print(" - No SSL/TLS support (no 'ssl' module)")
if self.daemon:
print(" - Backgrounding (daemon)")
if self.record:
print(" - Recording to '%s.*'" % self.record)
#
# WebSocketServer static methods
#
@staticmethod
def socket(host, port=None, connect=False, prefer_ipv6=False, unix_socket=None, use_ssl=False):
""" Resolve a host (and optional port) to an IPv4 or IPv6
address. Create a socket. Bind to it if listen is set,
otherwise connect to it. Return the socket.
"""
flags = 0
if host == '':
host = None
if connect and not (port or unix_socket):
raise Exception("Connect mode requires a port")
if use_ssl and not ssl:
raise Exception("SSL socket requested but Python SSL module not loaded.");
if not connect and use_ssl:
raise Exception("SSL only supported in connect mode (for now)")
if not connect:
flags = flags | socket.AI_PASSIVE
if not unix_socket:
addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM,
socket.IPPROTO_TCP, flags)
if not addrs:
raise Exception("Could not resolve host '%s'" % host)
addrs.sort(key=lambda x: x[0])
if prefer_ipv6:
addrs.reverse()
sock = socket.socket(addrs[0][0], addrs[0][1])
if connect:
sock.connect(addrs[0][4])
if use_ssl:
sock = ssl.wrap_socket(sock)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addrs[0][4])
sock.listen(100)
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(unix_socket)
return sock
@staticmethod
def daemonize(keepfd=None, chdir='/'):
os.umask(0)
if chdir:
os.chdir(chdir)
else:
os.chdir('/')
os.setgid(os.getgid()) # relinquish elevations
os.setuid(os.getuid()) # relinquish elevations
# Double fork to daemonize
if os.fork() > 0: os._exit(0) # Parent exits
os.setsid() # Obtain new process group
if os.fork() > 0: os._exit(0) # Parent exits
# Signal handling
def terminate(a,b): os._exit(0)
signal.signal(signal.SIGTERM, terminate)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Close open files
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY: maxfd = 256
for fd in reversed(range(maxfd)):
try:
if fd != keepfd:
os.close(fd)
except OSError:
_, exc, _ = sys.exc_info()
if exc.errno != errno.EBADF: raise
# Redirect I/O to /dev/null
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdin.fileno())
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdout.fileno())
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stderr.fileno())
@staticmethod
def unmask(buf, hlen, plen):
pstart = hlen + 4
pend = pstart + plen
if numpy:
b = c = s2b('')
if plen >= 4:
mask = numpy.frombuffer(buf, dtype=numpy.dtype('<u4'),
offset=hlen, count=1)
data = numpy.frombuffer(buf, dtype=numpy.dtype('<u4'),
offset=pstart, count=int(plen / 4))
#b = numpy.bitwise_xor(data, mask).data
b = numpy.bitwise_xor(data, mask).tostring()
if plen % 4:
#print("Partial unmask")
mask = numpy.frombuffer(buf, dtype=numpy.dtype('B'),
offset=hlen, count=(plen % 4))
data = numpy.frombuffer(buf, dtype=numpy.dtype('B'),
offset=pend - (plen % 4),
count=(plen % 4))
c = numpy.bitwise_xor(data, mask).tostring()
return b + c
else:
# Slower fallback
mask = buf[hlen:hlen+4]
data = array.array('B')
mask = s2a(mask)
data.fromstring(buf[pstart:pend])
for i in range(len(data)):
data[i] ^= mask[i % 4]
return data.tostring()
@staticmethod
def encode_hybi(buf, opcode, base64=False):
""" Encode a HyBi style WebSocket frame.
Optional opcode:
0x0 - continuation
0x1 - text frame (base64 encode buf)
0x2 - binary frame (use raw buf)
0x8 - connection close
0x9 - ping
0xA - pong
"""
if base64:
buf = b64encode(buf)
b1 = 0x80 | (opcode & 0x0f) # FIN + opcode
payload_len = len(buf)
if payload_len <= 125:
header = pack('>BB', b1, payload_len)
elif payload_len > 125 and payload_len < 65536:
header = pack('>BBH', b1, 126, payload_len)
elif payload_len >= 65536:
header = pack('>BBQ', b1, 127, payload_len)
#print("Encoded: %s" % repr(header + buf))
return header + buf, len(header), 0
@staticmethod
def decode_hybi(buf, base64=False):
""" Decode HyBi style WebSocket packets.
Returns:
{'fin' : 0_or_1,
'opcode' : number,
'masked' : boolean,
'hlen' : header_bytes_number,
'length' : payload_bytes_number,
'payload' : decoded_buffer,
'left' : bytes_left_number,
'close_code' : number,
'close_reason' : string}
"""
f = {'fin' : 0,
'opcode' : 0,
'masked' : False,
'hlen' : 2,
'length' : 0,
'payload' : None,
'left' : 0,
'close_code' : 1000,
'close_reason' : ''}
blen = len(buf)
f['left'] = blen
if blen < f['hlen']:
return f # Incomplete frame header
b1, b2 = unpack_from(">BB", buf)
f['opcode'] = b1 & 0x0f
f['fin'] = (b1 & 0x80) >> 7
f['masked'] = (b2 & 0x80) >> 7
f['length'] = b2 & 0x7f
if f['length'] == 126:
f['hlen'] = 4
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = unpack_from('>xxH', buf)
elif f['length'] == 127:
f['hlen'] = 10
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = unpack_from('>xxQ', buf)
full_len = f['hlen'] + f['masked'] * 4 + f['length']
if blen < full_len: # Incomplete frame
return f # Incomplete frame header
# Number of bytes that are part of the next frame(s)
f['left'] = blen - full_len
# Process 1 frame
if f['masked']:
# unmask payload
f['payload'] = WebSocketServer.unmask(buf, f['hlen'],
f['length'])
else:
print("Unmasked frame: %s" % repr(buf))
f['payload'] = buf[(f['hlen'] + f['masked'] * 4):full_len]
if base64 and f['opcode'] in [1, 2]:
try:
f['payload'] = b64decode(f['payload'])
except:
print("Exception while b64decoding buffer: %s" %
repr(buf))
raise
if f['opcode'] == 0x08:
if f['length'] >= 2:
f['close_code'] = unpack_from(">H", f['payload'])[0]
if f['length'] > 3:
f['close_reason'] = f['payload'][2:]
return f
@staticmethod
def encode_hixie(buf):
return s2b("\x00" + b2s(b64encode(buf)) + "\xff"), 1, 1
@staticmethod
def decode_hixie(buf):
end = buf.find(s2b('\xff'))
return {'payload': b64decode(buf[1:end]),
'hlen': 1,
'masked': False,
'length': end - 1,
'left': len(buf) - (end + 1)}
@staticmethod
def gen_md5(keys):
""" Generate hash value for WebSockets hixie-76. """
key1 = keys['Sec-WebSocket-Key1']
key2 = keys['Sec-WebSocket-Key2']
key3 = keys['key3']
spaces1 = key1.count(" ")
spaces2 = key2.count(" ")
num1 = int("".join([c for c in key1 if c.isdigit()])) / spaces1
num2 = int("".join([c for c in key2 if c.isdigit()])) / spaces2
return b2s(md5(pack('>II8s',
int(num1), int(num2), key3)).digest())
#
# WebSocketServer logging/output functions
#
def traffic(self, token="."):
""" Show traffic flow in verbose mode. """
if self.verbose and not self.daemon:
sys.stdout.write(token)
sys.stdout.flush()
def msg(self, msg):
""" Output message with handler_id prefix. """
if not self.daemon:
print("% 3d: %s" % (self.handler_id, msg))
def vmsg(self, msg):
""" Same as msg() but only if verbose. """
if self.verbose:
self.msg(msg)
#
# Main WebSocketServer methods
#
def send_frames(self, bufs=None):
""" Encode and send WebSocket frames. Any frames already
queued will be sent first. If buf is not set then only queued
frames will be sent. Returns the number of pending frames that
could not be fully sent. If returned pending frames is greater
than 0, then the caller should call again when the socket is
ready. """
tdelta = int(time.time()*1000) - self.start_time
if bufs:
for buf in bufs:
if self.version.startswith("hybi"):
if self.base64:
encbuf, lenhead, lentail = self.encode_hybi(
buf, opcode=1, base64=True)
else:
encbuf, lenhead, lentail = self.encode_hybi(
buf, opcode=2, base64=False)
else:
encbuf, lenhead, lentail = self.encode_hixie(buf)
if self.rec:
self.rec.write("%s,\n" %
repr("{%s{" % tdelta
+ encbuf[lenhead:len(encbuf)-lentail]))
self.send_parts.append(encbuf)
while self.send_parts:
# Send pending frames
buf = self.send_parts.pop(0)
sent = self.client.send(buf)
if sent == len(buf):
self.traffic("<")
else:
self.traffic("<.")
self.send_parts.insert(0, buf[sent:])
break
return len(self.send_parts)
def recv_frames(self):
""" Receive and decode WebSocket frames.
Returns:
(bufs_list, closed_string)
"""
closed = False
bufs = []
tdelta = int(time.time()*1000) - self.start_time
buf = self.client.recv(self.buffer_size)
if len(buf) == 0:
closed = {'code': 1000, 'reason': "Client closed abruptly"}
return bufs, closed
if self.recv_part:
# Add partially received frames to current read buffer
buf = self.recv_part + buf
self.recv_part = None
while buf:
if self.version.startswith("hybi"):
frame = self.decode_hybi(buf, base64=self.base64)
#print("Received buf: %s, frame: %s" % (repr(buf), frame))
if frame['payload'] == None:
# Incomplete/partial frame
self.traffic("}.")
if frame['left'] > 0:
self.recv_part = buf[-frame['left']:]
break
else:
if frame['opcode'] == 0x8: # connection close
closed = {'code': frame['close_code'],
'reason': frame['close_reason']}
break
else:
if buf[0:2] == s2b('\xff\x00'):
closed = {'code': 1000,
'reason': "Client sent orderly close frame"}
break
elif buf[0:2] == s2b('\x00\xff'):
buf = buf[2:]
continue # No-op
elif buf.count(s2b('\xff')) == 0:
# Partial frame
self.traffic("}.")
self.recv_part = buf
break
frame = self.decode_hixie(buf)
self.traffic("}")
if self.rec:
start = frame['hlen']
end = frame['hlen'] + frame['length']
if frame['masked']:
recbuf = WebSocketServer.unmask(buf, frame['hlen'],
frame['length'])
else:
recbuf = buf[frame['hlen']:frame['hlen'] +
frame['length']]
self.rec.write("%s,\n" %
repr("}%s}" % tdelta + recbuf))
bufs.append(frame['payload'])
if frame['left']:
buf = buf[-frame['left']:]
else:
buf = ''
return bufs, closed
def send_close(self, code=1000, reason=''):
""" Send a WebSocket orderly close frame. """
if self.version.startswith("hybi"):
msg = pack(">H%ds" % len(reason), code, reason)
buf, h, t = self.encode_hybi(msg, opcode=0x08, base64=False)
self.client.send(buf)
elif self.version == "hixie-76":
buf = s2b('\xff\x00')
self.client.send(buf)
# No orderly close for 75
def do_websocket_handshake(self, headers, path):
h = self.headers = headers
self.path = path
prot = 'WebSocket-Protocol'
protocols = h.get('Sec-'+prot, h.get(prot, '')).split(',')
ver = h.get('Sec-WebSocket-Version')
if ver:
# HyBi/IETF version of the protocol
# HyBi-07 report version 7
# HyBi-08 - HyBi-12 report version 8
# HyBi-13 reports version 13
if ver in ['7', '8', '13']:
self.version = "hybi-%02d" % int(ver)
else:
raise self.EClose('Unsupported protocol version %s' % ver)
key = h['Sec-WebSocket-Key']
# Choose binary if client supports it
if 'binary' in protocols:
self.base64 = False
elif 'base64' in protocols:
self.base64 = True
else:
raise self.EClose("Client must support 'binary' or 'base64' protocol")
# Generate the hash value for the accept header
accept = b64encode(sha1(s2b(key + self.GUID)).digest())
response = self.server_handshake_hybi % b2s(accept)
if self.base64:
response += "Sec-WebSocket-Protocol: base64\r\n"
else:
response += "Sec-WebSocket-Protocol: binary\r\n"
response += "\r\n"
else:
# Hixie version of the protocol (75 or 76)
if h.get('key3'):
trailer = self.gen_md5(h)
pre = "Sec-"
self.version = "hixie-76"
else:
trailer = ""
pre = ""
self.version = "hixie-75"
# We only support base64 in Hixie era
self.base64 = True
response = self.server_handshake_hixie % (pre,
h['Origin'], pre, self.scheme, h['Host'], path)
if 'base64' in protocols:
response += "%sWebSocket-Protocol: base64\r\n" % pre
else:
self.msg("Warning: client does not report 'base64' protocol support")
response += "\r\n" + trailer
return response
def do_handshake(self, sock, address):
"""
do_handshake does the following:
- Peek at the first few bytes from the socket.
- If the connection is Flash policy request then answer it,
close the socket and return.
- If the connection is an HTTPS/SSL/TLS connection then SSL
wrap the socket.
- Read from the (possibly wrapped) socket.
- If we have received a HTTP GET request and the webserver
functionality is enabled, answer it, close the socket and
return.
- Assume we have a WebSockets connection, parse the client
handshake data.
- Send a WebSockets handshake server response.
- Return the socket for this WebSocket client.
"""
stype = ""
ready = select.select([sock], [], [], 3)[0]
if not ready:
raise self.EClose("ignoring socket not ready")
# Peek, but do not read the data so that we have a opportunity
# to SSL wrap the socket first
handshake = sock.recv(1024, socket.MSG_PEEK)
#self.msg("Handshake [%s]" % handshake)
if handshake == "":
raise self.EClose("ignoring empty handshake")
elif handshake.startswith(s2b("<policy-file-request/>")):
# Answer Flash policy request
handshake = sock.recv(1024)
sock.send(s2b(self.policy_response))
raise self.EClose("Sending flash policy response")
elif handshake[0] in ("\x16", "\x80", 22, 128):
# SSL wrap the connection
if not ssl:
raise self.EClose("SSL connection but no 'ssl' module")
if not os.path.exists(self.cert):
raise self.EClose("SSL connection but '%s' not found"
% self.cert)
retsock = None
try:
retsock = ssl.wrap_socket(
sock,
server_side=True,
certfile=self.cert,
keyfile=self.key)
except ssl.SSLError:
_, x, _ = sys.exc_info()
if x.args[0] == ssl.SSL_ERROR_EOF:
if len(x.args) > 1:
raise self.EClose(x.args[1])
else:
raise self.EClose("Got SSL_ERROR_EOF")
else:
raise
self.scheme = "wss"
stype = "SSL/TLS (wss://)"
elif self.ssl_only:
raise self.EClose("non-SSL connection received but disallowed")
else:
retsock = sock
self.scheme = "ws"
stype = "Plain non-SSL (ws://)"
wsh = WSRequestHandler(retsock, address, not self.web)
if wsh.last_code == 101:
# Continue on to handle WebSocket upgrade
pass
elif wsh.last_code == 405:
raise self.EClose("Normal web request received but disallowed")
elif wsh.last_code < 200 or wsh.last_code >= 300:
raise self.EClose(wsh.last_message)
elif self.verbose:
raise self.EClose(wsh.last_message)
else:
raise self.EClose("")
response = self.do_websocket_handshake(wsh.headers, wsh.path)
self.msg("%s: %s WebSocket connection" % (address[0], stype))
self.msg("%s: Version %s, base64: '%s'" % (address[0],
self.version, self.base64))
if self.path != '/':
self.msg("%s: Path: '%s'" % (address[0], self.path))
# Send server WebSockets handshake response
#self.msg("sending response [%s]" % response)
retsock.send(s2b(response))
# Return the WebSockets socket which may be SSL wrapped
return retsock
#
# Events that can/should be overridden in sub-classes
#
def started(self):
""" Called after WebSockets startup """
self.vmsg("WebSockets server started")
def poll(self):
""" Run periodically while waiting for connections. """
#self.vmsg("Running poll()")
pass
def fallback_SIGCHLD(self, sig, stack):
# Reap zombies when using os.fork() (python 2.4)
self.vmsg("Got SIGCHLD, reaping zombies")
try:
result = os.waitpid(-1, os.WNOHANG)
while result[0]:
self.vmsg("Reaped child process %s" % result[0])
result = os.waitpid(-1, os.WNOHANG)
except (OSError):
pass
def do_SIGINT(self, sig, stack):
self.msg("Got SIGINT, exiting")
sys.exit(0)
def top_new_client(self, startsock, address):
""" Do something with a WebSockets client connection. """
# Initialize per client settings
self.send_parts = []
self.recv_part = None
self.base64 = False
self.rec = None
self.start_time = int(time.time()*1000)
# handler process
try:
try:
self.client = self.do_handshake(startsock, address)
if self.record:
# Record raw frame data as JavaScript array
fname = "%s.%s" % (self.record,
self.handler_id)
self.msg("opening record file: %s" % fname)
self.rec = open(fname, 'w+')
encoding = "binary"
if self.base64: encoding = "base64"
self.rec.write("var VNC_frame_encoding = '%s';\n"
% encoding)
self.rec.write("var VNC_frame_data = [\n")
self.ws_connection = True
self.new_client()
except self.CClose:
# Close the client
_, exc, _ = sys.exc_info()
if self.client:
self.send_close(exc.args[0], exc.args[1])
except self.EClose:
_, exc, _ = sys.exc_info()
# Connection was not a WebSockets connection
if exc.args[0]:
self.msg("%s: %s" % (address[0], exc.args[0]))
except Exception:
_, exc, _ = sys.exc_info()
self.msg("handler exception: %s" % str(exc))
if self.verbose:
self.msg(traceback.format_exc())
finally:
if self.rec:
self.rec.write("'EOF'];\n")
self.rec.close()
if self.client and self.client != startsock:
# Close the SSL wrapped socket
# Original socket closed by caller
self.client.close()
def new_client(self):
""" Do something with a WebSockets client connection. """
raise("WebSocketServer.new_client() must be overloaded")
def start_server(self):
"""
Daemonize if requested. Listen for for connections. Run
do_handshake() method for each connection. If the connection
is a WebSockets client then call new_client() method (which must
be overridden) for each new client connection.
"""
lsock = self.socket(self.listen_host, self.listen_port, False, self.prefer_ipv6)
if self.daemon:
self.daemonize(keepfd=lsock.fileno(), chdir=self.web)
self.started() # Some things need to happen after daemonizing
# Allow override of SIGINT
signal.signal(signal.SIGINT, self.do_SIGINT)
if not multiprocessing:
# os.fork() (python 2.4) child reaper
signal.signal(signal.SIGCHLD, self.fallback_SIGCHLD)
last_active_time = self.launch_time
while True:
try:
try:
self.client = None
startsock = None
pid = err = 0
child_count = 0
if multiprocessing and self.idle_timeout:
child_count = len(multiprocessing.active_children())
time_elapsed = time.time() - self.launch_time
if self.timeout and time_elapsed > self.timeout:
self.msg('listener exit due to --timeout %s'
% self.timeout)
break
if self.idle_timeout:
idle_time = 0
if child_count == 0:
idle_time = time.time() - last_active_time
else:
idle_time = 0
last_active_time = time.time()
if idle_time > self.idle_timeout and child_count == 0:
self.msg('listener exit due to --idle-timeout %s'
% self.idle_timeout)
break
try:
self.poll()
ready = select.select([lsock], [], [], 1)[0]
if lsock in ready:
startsock, address = lsock.accept()
else:
continue
except Exception:
_, exc, _ = sys.exc_info()
if hasattr(exc, 'errno'):
err = exc.errno
elif hasattr(exc, 'args'):
err = exc.args[0]
else:
err = exc[0]
if err == errno.EINTR:
self.vmsg("Ignoring interrupted syscall")
continue
else:
raise
if self.run_once:
# Run in same process if run_once
self.top_new_client(startsock, address)
if self.ws_connection :
self.msg('%s: exiting due to --run-once'
% address[0])
break
elif multiprocessing:
self.vmsg('%s: new handler Process' % address[0])
p = multiprocessing.Process(
target=self.top_new_client,
args=(startsock, address))
p.start()
# child will not return
else:
# python 2.4
self.vmsg('%s: forking handler' % address[0])
pid = os.fork()
if pid == 0:
# child handler process
self.top_new_client(startsock, address)
break # child process exits
# parent process
self.handler_id += 1
except KeyboardInterrupt:
_, exc, _ = sys.exc_info()
print("In KeyboardInterrupt")
pass
except SystemExit:
_, exc, _ = sys.exc_info()
print("In SystemExit")
break
except Exception:
_, exc, _ = sys.exc_info()
self.msg("handler exception: %s" % str(exc))
if self.verbose:
self.msg(traceback.format_exc())
finally:
if startsock:
startsock.close()
# Close listen port
self.vmsg("Closing socket listening at %s:%s"
% (self.listen_host, self.listen_port))
lsock.close()
# HTTP handler with WebSocket upgrade support
class WSRequestHandler(SimpleHTTPRequestHandler):
def __init__(self, req, addr, only_upgrade=False):
self.only_upgrade = only_upgrade # only allow upgrades
SimpleHTTPRequestHandler.__init__(self, req, addr, object())
def do_GET(self):
if (self.headers.get('upgrade') and
self.headers.get('upgrade').lower() == 'websocket'):
if (self.headers.get('sec-websocket-key1') or
self.headers.get('websocket-key1')):
# For Hixie-76 read out the key hash
self.headers.__setitem__('key3', self.rfile.read(8))
# Just indicate that an WebSocket upgrade is needed
self.last_code = 101
self.last_message = "101 Switching Protocols"
elif self.only_upgrade:
# Normal web request responses are disabled
self.last_code = 405
self.last_message = "405 Method Not Allowed"
else:
SimpleHTTPRequestHandler.do_GET(self)
def send_response(self, code, message=None):
# Save the status code
self.last_code = code
SimpleHTTPRequestHandler.send_response(self, code, message)
def log_message(self, f, *args):
# Save instead of printing
self.last_message = f % args
|
|
"""Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
package_data = []
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
bokeh_path = join(site_packages, "bokeh")
if not (exists(bokeh_path) and isdir(bokeh_path)):
return
prompt = "Found existing bokeh install: %s\nRemove it? [y|N] " % bokeh_path
val = input(prompt)
if val == "y":
print("Removing old bokeh install...", end=" ")
try:
shutil.rmtree(bokeh_path)
print("Done")
except (IOError, OSError):
print("Unable to remove old bokeh at %s, exiting" % bokeh_path)
sys.exit(-1)
else:
print("Not removing old bokeh install")
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print("Unable to remove old path file at %s, exiting" % path_file)
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned error message:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
msg = proc.stderr.read().decode('ascii', errors='ignore')
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_FAIL_MSG % red(msg))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
stamp, txt = pat.match(line).groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
blddir = join("bokehjs", "build")
bkjs_size = os.stat(join(blddir, "js", "bokeh.js")).st_size / 2**10
bkjs_min_size = os.stat(join(blddir, "js", "bokeh.min.js")).st_size / 2**10
bkcss_size = os.stat(join(blddir, "css", "bokeh.css")).st_size / 2**10
bkcss_min_size = os.stat(join(blddir, "css", "bokeh.min.css")).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % bkjs_size)
print(" - bokeh.css : %6.1f KB" % bkcss_size)
print(" - bokeh.min.js : %6.1f KB" % bkjs_min_size)
print(" - bokeh.min.css : %6.1f KB" % bkcss_min_size)
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
build_dir = 'build/lib/bokeh'
if os.path.exists(build_dir):
dir_util.remove_tree(build_dir)
for root, dirs, files in os.walk('.'):
for item in files:
if item.endswith('.pyc'):
os.remove(os.path.join(root, item))
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Aliases for build_js and install_js
for i in range(len(sys.argv)):
if sys.argv[i] == '--build-js':
sys.argv[i] = '--build_js'
if sys.argv[i] == '--install-js':
sys.argv[i] = '--install_js'
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics')
package_path(join(SERVER, 'static'))
package_path(join(SERVER, '_templates'))
package_path(join(ROOT, 'bokeh', '_templates'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
package_path(join(ROOT, 'bokeh', 'server', 'redis.conf'))
scripts = ['bokeh-server', 'websocket_worker.py']
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'six>=1.5.2',
'requests>=1.2.3',
'PyYAML>=3.10',
'python-dateutil>=2.1',
'Jinja2>=2.7',
'numpy>=1.7.1',
'pandas>=0.11.0',
'Flask>=0.10.1',
'pyzmq>=14.3.1',
'tornado>=4.0.1',
]
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=[
'bokeh',
'bokeh.models',
'bokeh.models.tests',
'bokeh.models.widgets',
'bokeh.charts',
'bokeh.charts.builder',
'bokeh.charts.builder.tests',
'bokeh.charts.tests',
'bokeh._legacy_charts',
'bokeh._legacy_charts.builder',
'bokeh._legacy_charts.builder.tests',
'bokeh._legacy_charts.tests',
'bokeh.compat',
'bokeh.compat.mplexporter',
'bokeh.compat.mplexporter.renderers',
'bokeh.crossfilter',
'bokeh.sampledata',
'bokeh.server',
'bokeh.server.models',
'bokeh.server.storage',
'bokeh.server.tests',
'bokeh.server.utils',
'bokeh.server.views',
'bokeh.server.websocket',
'bokeh.server.zmq',
'bokeh.sphinxext',
'bokeh.tests',
'bokeh.transforms',
'bokeh.util',
'bokeh.util.tests',
'bokeh.validation',
],
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='[email protected]',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=scripts,
zip_safe=False,
install_requires=REQUIRES
)
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import re
import unittest
from iptest import IronPythonTestCase, is_cli, run_test
class ReTest(IronPythonTestCase):
def test_none(self):
for x in 'compile search match split findall finditer'.split():
y = getattr(re, x)
self.assertRaises(TypeError, y, None)
self.assertRaises(TypeError, y, None, None)
self.assertRaises(TypeError, y, None, 'abc')
self.assertRaises(TypeError, y, 'abc', None)
# Other exceptional input tests
for x in (re.sub, re.subn):
self.assertRaises(TypeError, x, 'abc', None, 'abc')
self.assertRaises(TypeError, x, 'abc', None, None)
self.assertRaises(TypeError, x, None, 'abc', 'abc')
self.assertRaises(TypeError, x, 'abc', 'abc', None)
self.assertRaises(TypeError, re.escape, None)
def test_sanity_re(self):
'''
Basic sanity tests for the re module. Each module member is
used at least once.
'''
#compile
self.assertTrue(hasattr(re.compile("(abc){1}"), "pattern"))
self.assertTrue(hasattr(re.compile("(abc){1}", re.L), "pattern"))
self.assertTrue(hasattr(re.compile("(abc){1}", flags=re.L), "pattern"))
#I IGNORECASE L LOCAL MMULTILINE S DOTALL U UNICODE X VERBOSE
flags = ["I", "IGNORECASE",
"L", "LOCALE",
"M", "MULTILINE",
"S", "DOTALL",
"U", "UNICODE",
"X", "VERBOSE"]
for f in flags:
self.assertTrue(hasattr(re, f))
#search
self.assertEqual(re.search("(abc){1}", ""), None)
self.assertEqual(re.search("(abc){1}", "abcxyz").span(), (0,3))
self.assertEqual(re.search("(abc){1}", "abcxyz", re.L).span(), (0,3))
self.assertEqual(re.search("(abc){1}", "abcxyz", flags=re.L).span(), (0,3))
self.assertEqual(re.search("(abc){1}", "xyzabc").span(), (3,6))
#match
self.assertEqual(re.match("(abc){1}", ""), None)
self.assertEqual(re.match("(abc){1}", "abcxyz").span(), (0,3))
self.assertEqual(re.match("(abc){1}", "abcxyz", re.L).span(), (0,3))
self.assertEqual(re.match("(abc){1}", "abcxyz", flags=re.L).span(), (0,3))
#split
self.assertEqual(re.split("(abc){1}", ""), [''])
self.assertEqual(re.split("(abc){1}", "abcxyz"), ['', 'abc', 'xyz'])
#maxsplit
self.assertEqual(re.split("(abc){1}", "abc", 0), ['', 'abc', ''])
for i in range(3):
self.assertEqual(re.split("(abc){1}", "abc", maxsplit=i), ['', 'abc', ''])
self.assertEqual(re.split("(abc){1}", "", maxsplit=i), [''])
self.assertEqual(re.split("(abc){1}", "abcxyz", maxsplit=i), ['', 'abc', 'xyz'])
self.assertEqual(re.split("(abc){1}", "abcxyzabc", maxsplit=0), ['', 'abc', 'xyz', 'abc', ''])
self.assertEqual(re.split("(abc){1}", "abcxyzabc", maxsplit=1), ['', 'abc', 'xyzabc'])
self.assertEqual(re.split("(abc){1}", "abcxyzabc", maxsplit=2), ['', 'abc', 'xyz', 'abc', ''])
#findall
self.assertEqual(re.findall("(abc){1}", ""), [])
self.assertEqual(re.findall("(abc){1}", "abcxyz"), ['abc'])
self.assertEqual(re.findall("(abc){1}", "abcxyz", re.L), ['abc'])
self.assertEqual(re.findall("(abc){1}", "abcxyz", flags=re.L), ['abc'])
self.assertEqual(re.findall("(abc){1}", "xyzabcabc"), ['abc', 'abc'])
#finditer
self.assertEqual([x.group() for x in re.finditer("(abc){1}", "")], [])
self.assertEqual([x.group() for x in re.finditer("(abc){1}", "abcxyz")], ['abc'])
self.assertEqual([x.group() for x in re.finditer("(abc){1}", "abcxyz", re.L)], ['abc'])
self.assertEqual([x.group() for x in re.finditer("(abc){1}", "abcxyz", flags=re.L)], ['abc'])
self.assertEqual([x.group() for x in re.finditer("(abc){1}", "xyzabcabc")], ['abc', 'abc'])
rex = re.compile("foo")
for m in rex.finditer("this is a foo and a foo bar"):
self.assertEqual((m.pos, m.endpos), (0, 27))
for m in rex.finditer(""):
self.assertEqual((m.pos, m.endpos), (0, 1))
for m in rex.finditer("abc"):
self.assertEqual((m.pos, m.endpos), (0, 4))
for m in rex.finditer("foo foo foo foo foo"):
self.assertEqual((m.pos, m.endpos), (0, 19))
#sub
self.assertEqual(re.sub("(abc){1}", "9", "abcd"), "9d")
self.assertEqual(re.sub("(abc){1}", "abcxyz",'abcd'), "abcxyzd")
self.assertEqual(re.sub("(abc){1}", "1", "abcd", 0), "1d")
self.assertEqual(re.sub("(abc){1}", "1", "abcd", count=0), "1d")
self.assertEqual(re.sub("(abc){1}", "1", "abcdabcd", 1), "1dabcd")
self.assertEqual(re.sub("(abc){1}", "1", "abcdabcd", 2), "1d1d")
self.assertEqual(re.sub("(abc){1}", "1", "ABCdabcd", 2, flags=re.I), "1d1d")
#subn
self.assertEqual(re.subn("(abc){1}", "9", "abcd"), ("9d", 1))
self.assertEqual(re.subn("(abc){1}", "abcxyz",'abcd'), ("abcxyzd",1))
self.assertEqual(re.subn("(abc){1}", "1", "abcd", 0), ("1d",1))
self.assertEqual(re.subn("(abc){1}", "1", "abcd", count=0), ("1d",1))
self.assertEqual(re.subn("(abc){1}", "1", "abcdabcd", 1), ("1dabcd",1))
self.assertEqual(re.subn("(abc){1}", "1", "abcdabcd", 2), ("1d1d",2))
self.assertEqual(re.subn("(abc){1}", "1", "ABCdabcd", 2, flags=re.I), ("1d1d",2))
#escape
self.assertEqual(re.escape("abc"), "abc")
self.assertEqual(re.escape(""), "")
self.assertEqual(re.escape("_"), "_")
self.assertEqual(re.escape("a_c"), "a_c")
#error
exc = re.error()
exc = re.error("some args")
#purge
re.purge()
def test_sanity_re_pattern(self):
'''
Basic sanity tests for the re module's Regular Expression
objects (i.e., Pattern in CPython). Each method/member is
utilized at least once.
'''
pattern = re.compile("(abc){1}")
#match
self.assertEqual(pattern.match(""), None)
self.assertEqual(pattern.match("abcxyz").span(), (0,3))
self.assertEqual(pattern.match("abc", 0).span(), (0,3))
self.assertEqual(pattern.match("abc", 0, 3).span(), (0,3))
self.assertEqual(pattern.match("abc", pos=0, endpos=3).span(), (0,3))
for i in [-1, -2, -5, -7, -8, -65536]:
for j in [3, 4, 5, 7, 8, 65536]:
self.assertEqual(pattern.match("abc", i, j).span(), (0,3))
self.assertRaises(OverflowError, lambda: pattern.match("abc", 0, 2**64).span())
self.assertRaises(OverflowError, lambda: pattern.match("abc", -(2**64), 3).span())
#search
self.assertEqual(pattern.search(""), None)
self.assertEqual(pattern.search("abcxyz").span(), (0,3))
self.assertEqual(pattern.search("abc", 0).span(), (0,3))
self.assertEqual(pattern.search("abc", 0, 3).span(), (0,3))
self.assertEqual(pattern.search("abc", pos=0, endpos=3).span(), (0,3))
self.assertEqual(pattern.search("xyzabc").span(), (3,6))
#split
self.assertEqual(pattern.split(""), [''])
self.assertEqual(pattern.split("abcxyz"), ['', 'abc', 'xyz'])
self.assertEqual(pattern.split("abc", 0), ['', 'abc', ''])
self.assertEqual(pattern.split("abc", maxsplit=0), ['', 'abc', ''])
self.assertEqual(pattern.split("abcxyzabc", maxsplit=1), ['', 'abc', 'xyzabc'])
#findall
self.assertEqual(pattern.findall(""), [])
self.assertEqual(pattern.findall("abcxyz"), ['abc'])
self.assertEqual(pattern.findall("abc", 0), ['abc'])
self.assertEqual(pattern.findall("abc", 0, 3), ['abc'])
self.assertEqual(pattern.findall("abc", pos=0, endpos=3), ['abc'])
self.assertEqual(pattern.findall("xyzabcabc"), ['abc', 'abc'])
#sub
self.assertEqual(pattern.sub("9", "abcd"), "9d")
self.assertEqual(pattern.sub("abcxyz",'abcd'), "abcxyzd")
self.assertEqual(pattern.sub("1", "abcd", 0), "1d")
self.assertEqual(pattern.sub("1", "abcd", count=0), "1d")
self.assertEqual(pattern.sub("1", "abcdabcd", 1), "1dabcd")
self.assertEqual(pattern.sub("1", "abcdabcd", 2), "1d1d")
#subn
self.assertEqual(pattern.subn("9", "abcd"), ("9d", 1))
self.assertEqual(pattern.subn("abcxyz",'abcd'), ("abcxyzd",1))
self.assertEqual(pattern.subn("1", "abcd", 0), ("1d",1))
self.assertEqual(pattern.subn("1", "abcd", count=0), ("1d",1))
self.assertEqual(pattern.subn("1", "abcdabcd", 1), ("1dabcd",1))
self.assertEqual(pattern.subn("1", "abcdabcd", 2), ("1d1d",2))
#flags
self.assertEqual(pattern.flags, re.U)
self.assertEqual(re.compile("(abc){1}", re.L).flags, re.L | re.U)
#groupindex
self.assertEqual(pattern.groupindex, {})
self.assertEqual(re.compile("(?P<abc>)(?P<bcd>)").groupindex, {'bcd': 2, 'abc': 1})
#pattern
self.assertEqual(pattern.pattern, "(abc){1}")
self.assertEqual(re.compile("").pattern, "")
def test_groupindex_empty(self):
test_list = [ ".", "^", "$", "1*", "2+", "3?", "4*?", "5+?", "6??", "7{1}", "8{1,2}",
"9{1,2}?", "[a-z]", "|", "(...)", "(?:abc)",
"\(\?P\<Blah\>abc\)", "(?#...)", "(?=...)", "(?!...)", "(?<=...)",
"(?<!...)", "\1", "\A", "\d"
]
for x in test_list:
self.assertEqual(re.compile(x).groupindex, {})
def test_sanity_re_match(self):
'''
Basic sanity tests for the re module's Match objects. Each method/member
is utilized at least once.
'''
pattern = re.compile("(abc){1}")
match_obj = pattern.match("abcxyzabc123 and some other words...")
#expand
self.assertEqual(match_obj.expand("\1\g<1>.nt"), '\x01abc.nt')
#group
self.assertEqual(match_obj.group(), 'abc')
self.assertEqual(match_obj.group(1), 'abc')
#groups
self.assertEqual(match_obj.groups(), ('abc',))
self.assertEqual(match_obj.groups(1), ('abc',))
self.assertEqual(match_obj.groups(99), ('abc',))
#groupdict
self.assertEqual(match_obj.groupdict(), {})
self.assertEqual(match_obj.groupdict(None), {})
self.assertEqual(re.compile("(abc)").match("abcxyzabc123 and...").groupdict(), {})
#start
self.assertEqual(match_obj.start(), 0)
self.assertEqual(match_obj.start(1), 0)
#end
self.assertEqual(match_obj.end(), 3)
self.assertEqual(match_obj.end(1), 3)
#span
self.assertEqual(match_obj.span(), (0,3))
self.assertEqual(match_obj.span(1), (0,3))
#pos
self.assertEqual(match_obj.pos, 0)
#endpos
self.assertEqual(match_obj.endpos, 36)
#lastindex
self.assertEqual(match_obj.lastindex, 1)
#lastgroup
#CodePlex Work Item 5518
#self.assertEqual(match_obj.lastgroup, None)
#re
self.assertTrue(match_obj.re==pattern)
#string
self.assertEqual(match_obj.string, "abcxyzabc123 and some other words...")
def test_comment(self):
'''
(?#...)
'''
pattern = "a(?#foo)bc"
c = re.compile(pattern)
self.assertEqual(c.findall("abc"), ['abc'])
pattern = "a(?#)bc"
c = re.compile(pattern)
self.assertEqual(c.findall("abc"), ['abc'])
pattern = "a(?#foo)bdc"
c = re.compile(pattern)
self.assertEqual(len(c.findall("abc")), 0)
def test_optional_paren(self):
pattern = r"""\(?\w+\)?"""
c = re.compile(pattern, re.X)
self.assertEqual(c.findall('abc'), ['abc'])
def test_back_match(self):
p = re.compile('(?P<grp>.+?)(?P=grp)')
self.assertEqual(p.match('abcabc').groupdict(), {'grp':'abc'})
p = re.compile(r'(?P<delim>[%$])(?P<escaped>(?P=delim))')
self.assertEqual(p.match('$$').groupdict(), {'escaped': '$', 'delim': '$'})
self.assertEqual(p.match('$%'), None)
p = re.compile(r'(?P<grp>ab)(a(?P=grp)b)')
self.assertEqual(p.match('abaabb').groups(), ('ab', 'aabb'))
def test_expand(self):
self.assertEqual(re.match("(a)(b)", "ab").expand("blah\g<1>\g<2>"), "blahab")
self.assertEqual(re.match("(a)()", "ab").expand("blah\g<1>\g<2>\n\r\t\\\\"),'blaha\n\r\t\\')
self.assertEqual(re.match("(a)()", "ab").expand(""),'')
def test_sub(self):
x = '\n #region Generated Foo\nblah\nblah#end region'
a = re.compile("^([ \t]+)#region Generated Foo.*?#end region", re.MULTILINE|re.DOTALL)
self.assertEqual(a.sub("xx", x), "\nxx") # should match successfully
self.assertEqual(a.sub("\\x12", x), "\n\\x12") # should match, but shouldn't un-escape for \x
#if optional count arg is 0 then all occurrences should be replaced
self.assertEqual('bbbb', re.sub("a","b","abab", 0))
self.assertEqual(re.sub(r'(?P<id>b)', '\g<id>\g<id>yadayada', 'bb'), 'bbyadayadabbyadayada')
self.assertEqual(re.sub(r'(?P<id>b)', '\g<1>\g<id>yadayada', 'bb'), 'bbyadayadabbyadayada')
self.assertRaises(IndexError, re.sub, r'(?P<id>b)', '\g<1>\g<i2>yadayada', 'bb')
# the native implementation just gives a sre_constants.error instead indicating an invalid
# group reference
if is_cli:
self.assertRaises(IndexError, re.sub, r'(?P<id>b)', '\g<1>\g<30>yadayada', 'bb')
self.assertEqual(re.sub('x*', '-', 'abc'), '-a-b-c-')
self.assertEqual(re.subn('x*', '-', 'abc'), ('-a-b-c-', 4))
self.assertEqual(re.sub('a*', '-', 'abc'), '-b-c-')
self.assertEqual(re.subn('a*', '-', 'abc'), ('-b-c-', 3))
self.assertEqual(re.sub('a*', '-', 'a'), '-')
self.assertEqual(re.subn('a*', '-', 'a'), ('-', 1))
self.assertEqual(re.sub("a*", "-", "abaabb"), '-b-b-b-')
self.assertEqual(re.subn("a*", "-", "abaabb"), ('-b-b-b-', 4))
self.assertEqual(re.sub("(a*)b", "-", "abaabb"), '---')
self.assertEqual(re.subn("(a*)b", "-", "abaabb"), ('---', 3))
self.assertEqual(re.subn("(ab)*", "cd", "abababababab", 10), ('cd', 1))
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.subn('x*', '-', 'abxd'), ('-a-b-d-', 4))
self.assertTrue(re.sub('([^aeiou])y$', r'\lies', 'vacancy') == 'vacan\\lies')
self.assertTrue(re.sub('([^aeiou])y$', r'\1ies', 'vacancy') == 'vacancies')
self.assertEqual(re.sub("a+", "\n\t\\\?\"\b", "abc"), '\n\t\\?"\x08bc')
self.assertEqual(re.sub("a+", r"\n\t\\\?\"\b", "abc"), '\n\t\\\\?\\"\x08bc')
self.assertEqual(re.sub("a+", "\n\t\\\\\\?\"\b", "abc"), '\n\t\\\\?"\x08bc')
def test_dot(self):
a = re.compile('.')
self.assertEqual(a.groupindex, {})
p = re.compile('.')
z = []
for c in p.finditer('abc'): z.append((c.start(), c.end()))
z.sort()
self.assertEqual(z, [(0,1), (1,2), (2,3)])
def test_x(self):
nonmatchingp = re.compile('x')
self.assertEqual(nonmatchingp.search('ecks', 1, 4), None)
def test_match(self):
p = re.compile('.')
self.assertEqual(p.match('bazbar', 1,2).span(), (1,2))
def test_span(self):
self.assertEqual(re.match('(baz)(bar)(m)', "bazbarmxyz").span(2),(3, 6))
def test_regs(self):
self.assertEqual(re.match('(baz)(bar)(m)', "bazbarmxyz").regs,
((0, 7), (0, 3), (3, 6), (6, 7)))
self.assertEqual(re.match('bazbar(mm)+(abc)(xyz)', "bazbarmmmmabcxyz123456abc").regs,
((0, 16), (8, 10), (10, 13), (13, 16)))
def test_endpos(self):
self.assertEqual(re.match('(baz)(bar)(m)', "bazbarmx").endpos, 8)
pass
def test_re(self):
#Just ensure it's there for now
stuff = re.match('a(baz)(bar)(m)', "abazbarmx")
self.assertTrue(hasattr(stuff, "re"))
self.assertTrue(hasattr(stuff.re, "sub"))
def test_pos(self):
self.assertEqual(re.match('(baz)(bar)(m)', "bazbarmx").pos, 0)
def test_startandend(self):
m = re.match(r'(a)|(b)', 'b')
self.assertEqual(m.groups(), (None, 'b'))
self.assertEqual(m.group(0), "b")
self.assertEqual(m.start(0), 0)
self.assertEqual(m.end(0), 1)
self.assertEqual(m.start(1), -1)
self.assertEqual(m.end(1), -1)
m = re.match(".*", '')
self.assertEqual(m.groups(), ())
self.assertEqual(m.start(0), 0)
self.assertEqual(m.end(0), 0)
self.assertRaises(IndexError, m.group, "112")
self.assertRaises(IndexError, m.group, 112)
self.assertRaises(IndexError, m.group, "-1")
self.assertRaises(IndexError, m.group, -1)
self.assertRaises(IndexError, m.start, 112)
self.assertRaises(IndexError, m.start, -1)
self.assertRaises(IndexError, m.end, "112")
self.assertRaises(IndexError, m.end, 112)
self.assertRaises(IndexError, m.end, "-1")
self.assertRaises(IndexError, m.end, -1)
match = re.match(r'(?P<test>test)', 'test')
self.assertEqual(match.start('test'), 0)
self.assertEqual(match.end('test'), 4)
def test_start_of_str(self):
startOfStr = re.compile('^')
self.assertEqual(startOfStr.match('bazbar', 1), None)
self.assertEqual(startOfStr.match('bazbar', 0,0).span(), (0,0))
self.assertEqual(startOfStr.match('bazbar', 1,2), None)
self.assertEqual(startOfStr.match('bazbar', endpos=3).span(), (0,0))
self.assertEqual(re.sub('^', 'x', ''), 'x')
self.assertEqual(re.sub('^', 'x', ' '), 'x ')
self.assertEqual(re.sub('^', 'x', 'abc'), 'xabc')
# check that groups in split RE are added properly
def test_split(self):
self.assertEqual(re.split('{(,)?}', '1 {} 2 {,} 3 {} 4'), ['1 ', None, ' 2 ', ',', ' 3 ', None, ' 4'])
pnogrp = ','
ptwogrp = '((,))'
csv = '0,1,1,2,3,5,8,13,21,44'
self.assertEqual(re.split(pnogrp, csv, 1), ['0', csv[2:]])
self.assertEqual(re.split(pnogrp, csv, 2), ['0','1', csv[4:]])
self.assertEqual(re.split(pnogrp, csv, 1000), re.split(pnogrp, csv))
self.assertEqual(re.split(pnogrp, csv, 0), re.split(pnogrp, csv))
self.assertEqual(re.split(pnogrp, csv, -1), [csv])
ponegrp = '(,)'
self.assertEqual(re.split(ponegrp, csv, 1), ['0', ',', csv[2:]])
def test_escape(self):
compiled = re.compile(re.escape("hi_"))
all = re.compile('(.*)')
self.assertEqual(all.search('abcdef', 3).group(0), 'def')
self.assertRaises(IndexError, re.match("a[bcd]*b", 'abcbd').group, 1)
self.assertEqual(re.match('(a[bcd]*b)', 'abcbd').group(1), 'abcb')
s = ''
for i in range(32, 128):
if not chr(i).isalnum():
s = s + chr(i)
x = re.escape(s)
self.assertEqual(x, '\\ \\!\\"\\#\\$\\%\\&\\\'\\(\\)\\*\\+\\,\\-\\.\\/\\:\\;\\<\\=\\>\\?\\@\\[\\\\\\]\\^_\\`\\{\\|\\}\\~\\\x7f')
x = re.compile(r'[\\A-Z\.\+]')
self.assertTrue(x.search('aaaA\\B\\Caaa'))
# From the docs: "^" matches only at the start of the string, or in MULTILINE mode also immediately
# following a newline.
m = re.compile("a").match("ba", 1) # succeed
self.assertEqual('a', m.group(0))
# bug 23668
#self.assertEqual(re.compile("^a").search("ba", 1), None) # fails; 'a' not at start
#self.assertEqual(re.compile("^a").search("\na", 1), None) # fails; 'a' not at start
m = re.compile("^a", re.M).search("\na", 1) # succeed (multiline)
self.assertEqual('a', m.group(0))
# bug 938
#self.assertEqual(re.compile("^a", re.M).search("ba", 1), None) # fails; no preceding \n
# findall
def test_findall(self):
for (x, y, z) in (
('\d+', '99 blahblahblah 183 blah 12 blah 7777 yada yada', ['99', '183', '12', '7777']),
('^\d+', '0blahblahblah blah blah yada yada1', ['0']),
('^\d+', 'blahblahblah blah blah yada yada1', []),
("(\d+)|(\w+)", "x = 999y + 23", [('', 'x'), ('999', ''), ('', 'y'), ('23', '')]),
("(\d)(\d\d)(\d\d\d)", "123456789123456789", [('1', '23', '456'), ('7', '89', '123'), ('4', '56', '789')]),
(r"(?i)(\w+)\s+fish\b", "green fish black fish red fish blue fish", ['green', 'black', 'red', 'blue']),
('(a)(b)', 'abab', [('a', 'b'), ('a', 'b')]),
):
self.assertEqual(re.findall(x, y), z)
self.assertEqual(re.compile(x).findall(y), z)
def test_match_groups(self):
m = re.match('(?P<test>a)(b)', 'ab')
self.assertTrue(m.groups() == ('a', 'b'))
m = re.match('(u)(?P<test>v)(b)(?P<Named2>w)(x)(y)', 'uvbwxy')
self.assertTrue(m.groups() == ('u', 'v', 'b', 'w', 'x', 'y'))
def test_options(self):
# coverage for ?iLmsux options in re.compile path
tests = [ ("t(?=s)", "atreftsadbeatwttta", ['t']),
("t(?!s)", "atreftsadbeatststs", ['t']) ]
# native implementation does not handle extensions specified in this way
if is_cli:
tests.extend([
("(?i:foo)", "fooFoo FOO fOo fo oFO O\n\t\nFo ofO O", ['foo', 'Foo', 'FOO', 'fOo']),
("(?im:^foo)", "fooFoo FOO fOo\n\t\nFoo\nFOO", ['foo', 'Foo', 'FOO']), # ignorecase, multiline (matches at beginning of string and at each newline)
("(?s:foo.*bar)", "foo yadayadayada\nyadayadayada bar", ['foo yadayadayada\nyadayadayada bar']), # dotall (make "." match any chr, including a newline)
("(?x:baz bar)", "bazbar foo bar bazbar \n\n\tbazbar", ['bazbar', 'bazbar', 'bazbar']), #verbose (ignore whitespace)
])
for (x, y, z) in tests:
self.assertEqual(re.findall(x, y), z)
self.assertEqual(re.compile(x).findall(y), z)
def test_bug858(self):
pattern = r"""\(? #optional paren
\)? #optional paren
\d+ """
c = re.compile(pattern, re.X)
l = c.findall("989")
self.assertTrue(l == ['989'])
def test_finditer(self):
# finditer
matches = re.finditer("baz","barbazbarbazbar")
num = 0
for m in matches:
num = num + 1
self.assertEqual("baz", m.group(0))
self.assertTrue(num == 2)
matches = re.finditer("baz","barbazbarbazbar", re.L)
num = 0
for m in matches:
num = num + 1
self.assertEqual("baz", m.group(0))
self.assertTrue(num == 2)
matches = re.compile("baz").finditer("barbazbarbazbar", 0)
num = 0
for m in matches:
num = num + 1
self.assertEqual("baz", m.group(0))
self.assertTrue(num == 2)
matches = re.compile("baz").finditer("barbazbarbazbar", 14)
num = 0
for m in matches:
num = num + 1
self.assertEqual("baz", m.group(0))
self.assertTrue(num == 0)
matches = re.compile("baz").finditer("barbazbarbazbar", 0, 14)
num = 0
for m in matches:
num = num + 1
self.assertEqual("baz", m.group(0))
self.assertTrue(num == 2)
matches = re.compile("baz").finditer("barbazbarbazbar", 9, 12)
num = 0
for m in matches:
num = num + 1
self.assertEqual("baz", m.group(0))
self.assertEqual(num, 1)
matches = re.compile("baz").finditer("barbazbarbazbar", 9, 11)
num = 0
for m in matches:
num = num + 1
self.assertEqual("baz", m.group(0))
self.assertEqual(num, 0)
matches = re.compile("baz").finditer("barbazbarbazbar", 10, 12)
num = 0
for m in matches:
num = num + 1
self.assertEqual("baz", m.group(0))
self.assertEqual(num, 0)
def test_search(self):
# search
sp = re.search('super', 'blahsupersuper').span()
self.assertTrue(sp == (4, 9))
sp = re.search('super', 'superblahsuper').span()
self.assertTrue(sp == (0, 5))
#re.search.group() index error
self.assertEqual(re.search("z.*z", "az123za").group(),'z123z')
self.assertEqual(re.search("z.*z", "az12za").group(),'z12z')
self.assertEqual(re.search("z.*z", "azza").group(),'zz')
self.assertEqual(re.search("z123p+z", "az123ppppppppppza").group(),'z123ppppppppppz')
self.assertEqual(re.search("z123p+z", "az123pza").group(),'z123pz')
self.assertEqual(re.search("z123p?z", "az123pza").group(),'z123pz')
self.assertEqual(re.search("z123p?z", "az123za").group(),'z123z')
self.assertEqual(re.search('b', 'abc').string, 'abc')
def test_subn(self):
# subn
tup = re.subn("ab", "cd", "abababababab")
self.assertTrue(tup == ('cdcdcdcdcdcd', 6))
tup = re.subn("ab", "cd", "abababababab", 0)
self.assertTrue(tup == ('cdcdcdcdcdcd', 6))
tup = re.subn("ab", "cd", "abababababab", 1)
self.assertTrue(tup == ('cdababababab', 1))
tup = re.subn("ab", "cd", "abababababab", 10)
self.assertTrue(tup == ('cdcdcdcdcdcd', 6))
tup = re.subn("ababab", "cd", "ab", 10)
self.assertTrue(tup == ('ab', 0))
tup = re.subn("ababab", "cd", "ab")
self.assertTrue(tup == ('ab', 0))
tup = re.subn("(ab)*", "cd", "abababababab", 10)
self.assertTrue(tup == ('cd', 1))
tup = re.subn("(ab)?", "cd", "abababababab", 10)
self.assertTrue(tup == ('cdcdcdcdcdcd', 6))
def test_groups(self):
reg = re.compile("\[(?P<header>.*?)\]")
m = reg.search("[DEFAULT]")
self.assertTrue( m.groups() == ('DEFAULT',))
self.assertTrue( m.group('header') == 'DEFAULT' )
reg2 = re.compile("(?P<grp>\S+)?")
m2 = reg2.search("")
self.assertTrue ( m2.groups() == (None,))
self.assertTrue ( m2.groups('Default') == ('Default',))
def test_locale_flags(self):
self.assertEqual(re.compile(r"^\#[ \t]*(\w[\d\w]*)[ \t](.*)").flags, re.U)
self.assertEqual(re.compile(r"^\#[ \t]*(\w[\d\w]*)[ \t](.*)", re.L).flags, re.L | re.U)
self.assertEqual(re.compile(r"(?L)^\#[ \t]*(\w[\d\w]*)[ \t](.*)").flags, re.L | re.U)
def test_end(self):
ex = re.compile(r'\s+')
m = ex.match('(object Petal', 7)
self.assertTrue (m.end(0) == 8)
def test_lone_hat(self):
"""Single ^ reg-ex shouldn't match w/ a sub-set of a string"""
sol = re.compile('^')
self.assertEqual(sol.match('bazbar', 1, 2), None)
self.assertEqual(sol.match('foobar', 1, 2), None)
def test_escape_backslash(self):
x = re.compile (r"[\\A-Z\.\+]")
self.assertEqual(x.search('aaaA\\B\\Caaa').span(), (3,4))
def test_eol(self):
r = re.compile(r'<(/|\Z)')
s = r.search("<", 0)
self.assertTrue(s != None)
self.assertEqual(s.span(), (0, 1))
self.assertEqual(s.group(0), '<')
self.assertEqual(r.search("<Z", 0), None)
def test_lastindex(self):
for (pat, index) in [
('(a)b', 1), ('((a)(b))', 1), ('((ab))', 1),
('(a)(b)', 2),
('(a)?ab', None),
('(a)?b', 1),
]:
self.assertEqual(re.match(pat, 'ab').lastindex, index)
for (pat, index) in [
('(a)ab', 1),
('(a)(a)b', 2),
('(a)(a)(b)', 3),
('((a)a(b))', 1),
('((a)(a)(b))', 1),
('(a(a)(b))', 1),
('(a(a)?(b))', 1),
('(aa(a)?(b))', 1),
('(aa(b))', 1),
('(a(ab))', 1),
('(a)?ab', 1),
('a(a)?ab', None),
('a(a)?(a)?b', 1),
('a(a)?(a)?(b)', 3),
('a(a)b', 1),
('(a(a))(b)', 3),
('(a(a))b', 1),
('((a)(a))(b)', 4),
('((a)(a))b', 1),
]:
self.assertEqual(re.match(pat, 'aab').lastindex, index)
def test_empty_split(self):
cases =[
('', ['']),
('*', ['*']),
(':', ['', '']),
('::', ['', '']),
('a::', ['a', '']),
('::b', ['', 'b']),
(':c:', ['', 'c', '']),
(':\t: ', ['', '\t', ' ']),
('a:b::c', ['a', 'b', 'c']),
(':a:b::c', ['', 'a', 'b', 'c']),
('::a:b::c:', ['', 'a', 'b', 'c', '']),
]
for expr, result in cases:
self.assertEqual(re.split(":*", expr), result)
def test_cp15298(self):
regex = "^" + "\d\.\d\.\d \(IronPython \d\.\d(\.\d)? ((Alpha )|(Beta )|())\(\d\.\d\.\d\.\d{3,4}\) on \.NET \d(\.\d{1,5}){3}\)" * 15 + "$"
match_str = "2.5.0 (IronPython 2.0 Beta (2.0.0.1000) on .NET 2.0.50727.1433)" * 15
compiled_regex = re.compile(regex)
retval = compiled_regex.match(match_str)
self.assertTrue(retval != None)
retval = re.match(regex, match_str)
self.assertTrue(retval != None)
def test_cp11136(self):
regex = re.compile(r"^(?P<msg>NMAKE[A-Za-z0-9]*)'\"?(?P<file>[\\A-Za-z0-9/:_\.\+]+)" )
self.assertTrue(regex.search(r"NMAKE0119'adirectory\afile.txt")!=None)
def test_cp17111(self):
test_cases = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789~!@#%&_+-=]{};':,.//<>" + '"'
for x in test_cases:
regex = re.compile(r".*\\%s" % x)
self.assertTrue(regex.search(r"\\%s" % x)!=None)
self.assertTrue(regex.search(r"")==None)
def test_cp1089(self):
test_cases = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789~!@#%&_+-=]{};':,.//<>" + '"'
for x in test_cases:
#Just make sure they don't throw
temp = re.compile('\\\\' + x)
def test_cp16657(self):
self.assertTrue(re.compile(r'^bar', re.M).search('foo\nbar') != None)
self.assertTrue(re.compile(r'^bar(?m)').search('foo\nbar') != None)
self.assertTrue(re.compile(r'^bar', re.M).search('foo\nbaar') == None)
self.assertTrue(re.compile(r'^bar(?m)').search('foo\nbaar') == None)
self.assertTrue(re.compile(r'^bar', re.U).search('bar') != None)
self.assertTrue(re.compile(r'^bar(?u)').search('bar') != None)
self.assertTrue(re.compile(r'^bar', re.U).search('baar') == None)
self.assertTrue(re.compile(r'^bar(?u)').search('baar') == None)
self.assertTrue(re.compile(r' b ar ', re.X).search('bar') != None)
self.assertTrue(re.compile(r'b ar(?x)').search('bar') != None)
self.assertTrue(re.compile(r' b ar ', re.X).search('baar') == None)
self.assertTrue(re.compile(r'b ar(?x)').search('baar') == None)
self.assertTrue(re.compile(r'b ar').search('bar') == None)
def test_n_m_quantifier(self):
self.assertEqual(re.search('ab{,2}a', 'abba').span(), (0, 4))
self.assertEqual(re.search('ab{,2}a', 'aba').span(), (0, 3))
self.assertEqual(re.search('ab{,2}a', 'abbba'), None)
self.assertEqual(re.search('ab{,2}a', 'abba').span(), re.search('ab{0,2}a', 'abba').span())
self.assertEqual(re.search('ab{0,2}a', 'abbba'), None)
self.assertEqual(re.search('ab{2,}a', 'abba').span(), (0,4))
self.assertEqual(re.search('ab{2,}a', 'abbba').span(), (0,5))
self.assertEqual(re.search('ab{2,}a', 'aba'), None)
def test_mixed_named_and_unnamed_groups(self):
example1=r"(?P<one>Blah)"
example2=r"(?P<one>(Blah))"
RegExsToTest=[example1,example2]
for regString in RegExsToTest:
g=re.compile(regString)
self.assertEqual(g.groupindex, {'one' : 1})
def test__pickle(self):
'''
TODO: just a sanity test for now. Needs far more testing.
'''
regex = re.compile(r"^(?P<msg>NMAKE[A-Za-z0-9]*)'\"?(?P<file>[\\A-Za-z0-9/:_\.\+]+)" )
pickled_regex = re._pickle(regex)
self.assertEqual(len(pickled_regex), 2)
self.assertEqual(pickled_regex[1],
('^(?P<msg>NMAKE[A-Za-z0-9]*)\'\\"?(?P<file>[\\\\A-Za-z0-9/:_\\.\\+]+)', re.U))
def test_conditional(self):
p = re.compile(r'(a)?(b)((?(1)c))')
self.assertEqual(p.match('abc').groups(), ('a', 'b', 'c'))
p = re.compile(r'(?P<first>a)?(b)((?(first)c))')
self.assertEqual(p.match('abc').groups(), ('a', 'b', 'c'))
s = r'((?(a)ab|cd))'
if is_cli:
p = re.compile(s)
self.assertEqual(p.match('ab').groups(), ('ab',))
else:
self.assertRaises(re.error, re.compile, s)
def test_cp35146(self):
# re.compile returns cached instances
self.assertEqual(re.compile('cp35146'), re.compile('cp35146'))
def test_cp35135(self):
self.assertEqual(re.match(r"(?iu)aA", "aa").string, "aa")
self.assertEqual(re.match(r"(?iu)Aa", "aa").string, "aa")
self.assertEqual(re.match(r"(?iLmsux)Aa", "aa").string, "aa")
def test_issue506(self):
self.assertEqual(re.compile("^a", re.M).search("ba", 1), None)
def test_issue1370(self):
self.assertEqual(re.compile("\Z").match("\n"), None)
self.assertEqual(re.compile("\Z").match("").group(0), "")
def test_ipy2_gh21(self):
"""https://github.com/IronLanguages/ironpython2/issues/21"""
self.assertRaisesMessage(re.error, "redefinition of group name 'hoge' as group 2; was group 1", re.compile, r'(?P<hoge>\w+):(?P<hoge>\w+)')
self.assertRaisesMessage(re.error, "redefinition of group name 'hoge' as group 3; was group 2", re.compile, r'(abc)(?P<hoge>\w+):(?P<hoge>\w+)')
self.assertRaisesMessage(re.error, "redefinition of group name 'hoge' as group 4; was group 2", re.compile, r'(abc)(?P<hoge>\w+):(abc)(?P<hoge>\w+)')
def test_ipy3_gh814(self):
"""https://github.com/IronLanguages/ironpython3/issues/814"""
self.assertEqual(re.match(r'\s+', "\xa0", flags=re.UNICODE).group(0), "\xa0")
self.assertIsNone(re.match(r'\s+', "\xa0", flags=re.ASCII))
def test_pos_endpos(self):
p = re.compile("a")
for func in (p.match, p.fullmatch, p.search):
m = func("a", 100)
self.assertIsNone(m)
m = func("a")
for m2 in (func("a", -100), func("a", -100, 100), func("a", endpos=100)):
self.assertEqual(m.span(), m2.span())
m = p.findall("a", 100)
self.assertEqual(m, [])
m = p.findall("a")
for m2 in (p.findall("a", -100), p.findall("a", -100, 100), p.findall("a", endpos=100)):
self.assertEqual(m, m2)
m = list(p.finditer("a", 100))
self.assertEqual(m, [])
m = next(p.finditer("a"))
for m2 in (p.finditer("a", -100), p.finditer("a", -100, 100), p.finditer("a", endpos=100)):
self.assertEqual(m.span(), next(m2).span())
run_test(__name__)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os
import re
import shutil
import socket
import sys
import netaddr
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import importutils
import six
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import utils as commonutils
from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('dhcp_confs',
default='$state_path/dhcp',
help=_('Location to store DHCP server config files')),
cfg.StrOpt('dhcp_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.StrOpt('dnsmasq_config_file',
default='',
help=_('Override the default dnsmasq settings with this file')),
cfg.ListOpt('dnsmasq_dns_servers',
help=_('Comma-separated list of the DNS servers which will be '
'used as forwarders.'),
deprecated_name='dnsmasq_dns_server'),
cfg.BoolOpt('dhcp_delete_namespaces', default=False,
help=_("Delete namespace after removing a dhcp server.")),
cfg.IntOpt(
'dnsmasq_lease_max',
default=(2 ** 24),
help=_('Limit number of leases to prevent a denial-of-service.')),
cfg.BoolOpt('dhcp_broadcast_reply', default=False,
help=_("Use broadcast in DHCP replies")),
]
IPV4 = 4
IPV6 = 6
UDP = 'udp'
TCP = 'tcp'
DNS_PORT = 53
DHCPV4_PORT = 67
DHCPV6_PORT = 547
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80
WIN2k3_STATIC_DNS = 249
NS_PREFIX = 'qdhcp-'
class DictModel(dict):
"""Convert dict into an object that provides attribute access to values."""
def __init__(self, *args, **kwargs):
"""Convert dict values to DictModel values."""
super(DictModel, self).__init__(*args, **kwargs)
def needs_upgrade(item):
"""Check if `item` is a dict and needs to be changed to DictModel.
"""
return isinstance(item, dict) and not isinstance(item, DictModel)
def upgrade(item):
"""Upgrade item if it needs to be upgraded."""
if needs_upgrade(item):
return DictModel(item)
else:
return item
for key, value in self.iteritems():
if isinstance(value, (list, tuple)):
# Keep the same type but convert dicts to DictModels
self[key] = type(value)(
(upgrade(item) for item in value)
)
elif needs_upgrade(value):
# Change dict instance values to DictModel instance values
self[key] = DictModel(value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
class NetModel(DictModel):
def __init__(self, use_namespaces, d):
super(NetModel, self).__init__(d)
self._ns_name = (use_namespaces and
"%s%s" % (NS_PREFIX, self.id) or None)
@property
def namespace(self):
return self._ns_name
@six.add_metaclass(abc.ABCMeta)
class DhcpBase(object):
def __init__(self, conf, network, root_helper='sudo',
version=None, plugin=None):
self.conf = conf
self.network = network
self.root_helper = root_helper
self.device_manager = DeviceManager(self.conf,
self.root_helper, plugin)
self.version = version
@abc.abstractmethod
def enable(self):
"""Enables DHCP for this network."""
@abc.abstractmethod
def disable(self, retain_port=False):
"""Disable dhcp for this network."""
def restart(self):
"""Restart the dhcp service for the network."""
self.disable(retain_port=True)
self.enable()
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the DHCP server."""
@abc.abstractmethod
def reload_allocations(self):
"""Force the DHCP server to reload the assignment database."""
@classmethod
def existing_dhcp_networks(cls, conf, root_helper):
"""Return a list of existing networks ids that we have configs for."""
raise NotImplementedError()
@classmethod
def check_version(cls):
"""Execute version checks on DHCP server."""
raise NotImplementedError()
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated"""
raise NotImplementedError()
@classmethod
def should_enable_metadata(cls, conf, network):
"""True if the metadata-proxy should be enabled for the network."""
raise NotImplementedError()
class DhcpLocalProcess(DhcpBase):
PORTS = []
def _enable_dhcp(self):
"""check if there is a subnet within the network with dhcp enabled."""
for subnet in self.network.subnets:
if subnet.enable_dhcp:
return True
return False
def enable(self):
"""Enables DHCP for this network by spawning a local process."""
if self.active:
self.restart()
elif self._enable_dhcp():
interface_name = self.device_manager.setup(self.network)
self.interface_name = interface_name
self.spawn_process()
def disable(self, retain_port=False):
"""Disable DHCP for this network by killing the local process."""
pid = self.pid
if pid:
if self.active:
cmd = ['kill', '-9', pid]
utils.execute(cmd, self.root_helper)
else:
LOG.debug('DHCP for %(net_id)s is stale, pid %(pid)d '
'does not exist, performing cleanup',
{'net_id': self.network.id, 'pid': pid})
if not retain_port:
self.device_manager.destroy(self.network,
self.interface_name)
else:
LOG.debug('No DHCP started for %s', self.network.id)
self._remove_config_files()
if not retain_port:
if self.conf.dhcp_delete_namespaces and self.network.namespace:
ns_ip = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
LOG.exception(_LE('Failed trying to delete namespace: %s'),
self.network.namespace)
def _remove_config_files(self):
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
conf_dir = os.path.join(confs_dir, self.network.id)
shutil.rmtree(conf_dir, ignore_errors=True)
def get_conf_file_name(self, kind, ensure_conf_dir=False):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
conf_dir = os.path.join(confs_dir, self.network.id)
if ensure_conf_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, kind)
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter(f.read()) if converter else f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except IOError:
msg = _('Unable to access %s')
LOG.debug(msg, file_name)
return None
@property
def pid(self):
"""Last known pid for the DHCP process spawned for this network."""
return self._get_value_from_conf_file('pid', int)
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
return self.network.id in f.readline()
except IOError:
return False
@property
def interface_name(self):
return self._get_value_from_conf_file('interface')
@interface_name.setter
def interface_name(self, value):
interface_file_path = self.get_conf_file_name('interface',
ensure_conf_dir=True)
utils.replace_file(interface_file_path, value)
@abc.abstractmethod
def spawn_process(self):
pass
class Dnsmasq(DhcpLocalProcess):
# The ports that need to be opened when security policies are active
# on the Neutron port used for DHCP. These are provided as a convenience
# for users of this class.
PORTS = {IPV4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
}
_TAG_PREFIX = 'tag%d'
NEUTRON_NETWORK_ID_KEY = 'NEUTRON_NETWORK_ID'
NEUTRON_RELAY_SOCKET_PATH_KEY = 'NEUTRON_RELAY_SOCKET_PATH'
MINIMUM_VERSION = 2.63
@classmethod
def check_version(cls):
ver = 0
try:
cmd = ['dnsmasq', '--version']
out = utils.execute(cmd)
ver = re.findall("\d+.\d+", out)[0]
is_valid_version = float(ver) >= cls.MINIMUM_VERSION
if not is_valid_version:
LOG.error(_LE('FAILED VERSION REQUIREMENT FOR DNSMASQ. '
'DHCP AGENT MAY NOT RUN CORRECTLY! '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION)
raise SystemExit(1)
except (OSError, RuntimeError, IndexError, ValueError):
LOG.error(_LE('Unable to determine dnsmasq version. '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION)
raise SystemExit(1)
return float(ver)
@classmethod
def existing_dhcp_networks(cls, conf, root_helper):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs))
return [
c for c in os.listdir(confs_dir)
if uuidutils.is_uuid_like(c)
]
def spawn_process(self):
"""Spawns a Dnsmasq process for the network."""
env = {
self.NEUTRON_NETWORK_ID_KEY: self.network.id,
}
cmd = [
'dnsmasq',
'--no-hosts',
'--no-resolv',
'--strict-order',
'--bind-interfaces',
'--interface=%s' % self.interface_name,
'--except-interface=lo',
'--pid-file=%s' % self.get_conf_file_name(
'pid', ensure_conf_dir=True),
'--dhcp-hostsfile=%s' % self._output_hosts_file(),
'--addn-hosts=%s' % self._output_addn_hosts_file(),
'--dhcp-optsfile=%s' % self._output_opts_file(),
'--leasefile-ro',
]
possible_leases = 0
for i, subnet in enumerate(self.network.subnets):
mode = None
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# Note(scollins) If the IPv6 attributes are not set, set it as
# static to preserve previous behavior
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
ra_mode = getattr(subnet, 'ipv6_ra_mode', None)
if (addr_mode in [constants.DHCPV6_STATEFUL,
constants.DHCPV6_STATELESS] or
not addr_mode and not ra_mode):
mode = 'static'
cidr = netaddr.IPNetwork(subnet.cidr)
if self.conf.dhcp_lease_duration == -1:
lease = 'infinite'
else:
lease = '%ss' % self.conf.dhcp_lease_duration
# mode is optional and is not set - skip it
if mode:
cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode, lease))
possible_leases += cidr.size
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_servers:
cmd.extend(
'--server=%s' % server
for server in self.conf.dnsmasq_dns_servers)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
if self.conf.dhcp_broadcast_reply:
cmd.append('--dhcp-broadcast')
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
ip_wrapper.netns.execute(cmd, addl_env=env)
def _release_lease(self, mac_address, ip):
"""Release a DHCP lease."""
cmd = ['dhcp_release', self.interface_name, ip, mac_address]
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
ip_wrapper.netns.execute(cmd)
def reload_allocations(self):
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
# If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp():
self.disable()
LOG.debug('Killing dhcpmasq for network since all subnets have '
'turned off DHCP: %s', self.network.id)
return
self._release_unused_leases()
self._output_hosts_file()
self._output_addn_hosts_file()
self._output_opts_file()
if self.active:
cmd = ['kill', '-HUP', self.pid]
utils.execute(cmd, self.root_helper)
else:
LOG.debug('Pid %d is stale, relaunching dnsmasq', self.pid)
LOG.debug('Reloading allocations for network: %s', self.network.id)
self.device_manager.update(self.network, self.interface_name)
def _iter_hosts(self):
"""Iterate over hosts.
For each host on the network we yield a tuple containing:
(
port, # a DictModel instance representing the port.
alloc, # a DictModel instance of the allocated ip and subnet.
host_name, # Host name.
name, # Canonical hostname in the format 'hostname[.domain]'.
)
"""
v6_nets = dict((subnet.id, subnet) for subnet in
self.network.subnets if subnet.ip_version == 6)
for port in self.network.ports:
for alloc in port.fixed_ips:
# Note(scollins) Only create entries that are
# associated with the subnet being managed by this
# dhcp agent
if alloc.subnet_id in v6_nets:
addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode
if addr_mode != constants.DHCPV6_STATEFUL:
continue
hostname = 'host-%s' % alloc.ip_address.replace(
'.', '-').replace(':', '-')
fqdn = hostname
if self.conf.dhcp_domain:
fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain)
yield (port, alloc, hostname, fqdn)
def _output_hosts_file(self):
"""Writes a dnsmasq compatible dhcp hosts file.
The generated file is sent to the --dhcp-hostsfile option of dnsmasq,
and lists the hosts on the network which should receive a dhcp lease.
Each line in this file is in the form::
'mac_address,FQDN,ip_address'
IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in
this file if it did not give a lease to a host listed in it (e.g.:
multiple dnsmasq instances on the same network if this network is on
multiple network nodes). This file is only defining hosts which
should receive a dhcp lease, the hosts resolution in itself is
defined by the `_output_addn_hosts_file` method.
"""
buf = six.StringIO()
filename = self.get_conf_file_name('host')
LOG.debug('Building host file: %s', filename)
for (port, alloc, hostname, name) in self._iter_hosts():
# (dzyu) Check if it is legal ipv6 address, if so, need wrap
# it with '[]' to let dnsmasq to distinguish MAC address from
# IPv6 address.
ip_address = alloc.ip_address
if netaddr.valid_ipv6(ip_address):
ip_address = '[%s]' % ip_address
LOG.debug('Adding %(mac)s : %(name)s : %(ip)s',
{"mac": port.mac_address, "name": name,
"ip": ip_address})
if getattr(port, 'extra_dhcp_opts', False):
buf.write('%s,%s,%s,%s%s\n' %
(port.mac_address, name, ip_address,
'set:', port.id))
else:
buf.write('%s,%s,%s\n' %
(port.mac_address, name, ip_address))
utils.replace_file(filename, buf.getvalue())
LOG.debug('Done building host file %s', filename)
return filename
def _read_hosts_file_leases(self, filename):
leases = set()
if os.path.exists(filename):
with open(filename) as f:
for l in f.readlines():
host = l.strip().split(',')
leases.add((host[2].strip('[]'), host[0]))
return leases
def _release_unused_leases(self):
filename = self.get_conf_file_name('host')
old_leases = self._read_hosts_file_leases(filename)
new_leases = set()
for port in self.network.ports:
for alloc in port.fixed_ips:
new_leases.add((alloc.ip_address, port.mac_address))
for ip, mac in old_leases - new_leases:
self._release_lease(mac, ip)
def _output_addn_hosts_file(self):
"""Writes a dnsmasq compatible additional hosts file.
The generated file is sent to the --addn-hosts option of dnsmasq,
and lists the hosts on the network which should be resolved even if
the dnsmaq instance did not give a lease to the host (see the
`_output_hosts_file` method).
Each line in this file is in the same form as a standard /etc/hosts
file.
"""
buf = six.StringIO()
for (port, alloc, hostname, fqdn) in self._iter_hosts():
# It is compulsory to write the `fqdn` before the `hostname` in
# order to obtain it in PTR responses.
buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
addn_hosts = self.get_conf_file_name('addn_hosts')
utils.replace_file(addn_hosts, buf.getvalue())
return addn_hosts
def _output_opts_file(self):
"""Write a dnsmasq compatible options file."""
if self.conf.enable_isolated_metadata:
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
options = []
isolated_subnets = self.get_isolated_subnets(self.network)
dhcp_ips = collections.defaultdict(list)
subnet_idx_map = {}
for i, subnet in enumerate(self.network.subnets):
if (not subnet.enable_dhcp or
(subnet.ip_version == 6 and
getattr(subnet, 'ipv6_address_mode', None)
in [None, constants.IPV6_SLAAC])):
continue
if subnet.dns_nameservers:
options.append(
self._format_option(
subnet.ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(
subnet.ip_version, subnet.dns_nameservers))))
else:
# use the dnsmasq ip as nameservers only if there is no
# dns-server submitted by the server
subnet_idx_map[subnet.id] = i
if self.conf.dhcp_domain and subnet.ip_version == 6:
options.append('tag:tag%s,option6:domain-search,%s' %
(i, ''.join(self.conf.dhcp_domain)))
gateway = subnet.gateway_ip
host_routes = []
for hr in subnet.host_routes:
if hr.destination == "0.0.0.0/0":
if not gateway:
gateway = hr.nexthop
else:
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
# Add host routes for isolated network segments
if (isolated_subnets[subnet.id] and
self.conf.enable_isolated_metadata and
subnet.ip_version == 4):
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
)
if subnet.ip_version == 4:
if host_routes:
if gateway:
host_routes.append("%s,%s" % ("0.0.0.0/0", gateway))
options.append(
self._format_option(subnet.ip_version, i,
'classless-static-route',
','.join(host_routes)))
options.append(
self._format_option(subnet.ip_version, i,
WIN2k3_STATIC_DNS,
','.join(host_routes)))
if gateway:
options.append(self._format_option(subnet.ip_version,
i, 'router',
gateway))
else:
options.append(self._format_option(subnet.ip_version,
i, 'router'))
for port in self.network.ports:
if getattr(port, 'extra_dhcp_opts', False):
for ip_version in (4, 6):
if any(
netaddr.IPAddress(ip.ip_address).version == ip_version
for ip in port.fixed_ips):
options.extend(
# TODO(xuhanp):Instead of applying extra_dhcp_opts
# to both DHCPv4 and DHCPv6, we need to find a new
# way to specify options for v4 and v6
# respectively. We also need to validate the option
# before applying it.
self._format_option(ip_version, port.id,
opt.opt_name, opt.opt_value)
for opt in port.extra_dhcp_opts)
# provides all dnsmasq ip as dns-server if there is more than
# one dnsmasq for a subnet and there is no dns-server submitted
# by the server
if port.device_owner == constants.DEVICE_OWNER_DHCP:
for ip in port.fixed_ips:
i = subnet_idx_map.get(ip.subnet_id)
if i is None:
continue
dhcp_ips[i].append(ip.ip_address)
for i, ips in dhcp_ips.items():
for ip_version in (4, 6):
vx_ips = [ip for ip in ips
if netaddr.IPAddress(ip).version == ip_version]
if vx_ips:
options.append(
self._format_option(
ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(ip_version,
vx_ips))))
name = self.get_conf_file_name('opts')
utils.replace_file(name, '\n'.join(options))
return name
def _make_subnet_interface_ip_map(self):
ip_dev = ip_lib.IPDevice(
self.interface_name,
self.root_helper,
self.network.namespace
)
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
for subnet in self.network.subnets
)
retval = {}
for addr in ip_dev.addr.list():
ip_net = netaddr.IPNetwork(addr['cidr'])
if ip_net in subnet_lookup:
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
return retval
def _format_option(self, ip_version, tag, option, *args):
"""Format DHCP option by option name or code."""
option = str(option)
if isinstance(tag, int):
tag = self._TAG_PREFIX % tag
if not option.isdigit():
if ip_version == 4:
option = 'option:%s' % option
else:
option = 'option6:%s' % option
return ','.join(('tag:' + tag, '%s' % option) + args)
@staticmethod
def _convert_to_literal_addrs(ip_version, ips):
if ip_version == 4:
return ips
return ['[' + ip + ']' for ip in ips]
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated
A subnet is considered non-isolated if there is a port connected to
the subnet, and the port's ip address matches that of the subnet's
gateway. The port must be owned by a nuetron router.
"""
isolated_subnets = collections.defaultdict(lambda: True)
subnets = dict((subnet.id, subnet) for subnet in network.subnets)
for port in network.ports:
if port.device_owner not in (constants.DEVICE_OWNER_ROUTER_INTF,
constants.DEVICE_OWNER_DVR_INTERFACE):
continue
for alloc in port.fixed_ips:
if subnets[alloc.subnet_id].gateway_ip == alloc.ip_address:
isolated_subnets[alloc.subnet_id] = False
return isolated_subnets
@classmethod
def should_enable_metadata(cls, conf, network):
"""Determine whether the metadata proxy is needed for a network
This method returns True for truly isolated networks (ie: not attached
to a router), when the enable_isolated_metadata flag is True.
This method also returns True when enable_metadata_network is True,
and the network passed as a parameter has a subnet in the link-local
CIDR, thus characterizing it as a "metadata" network. The metadata
network is used by solutions which do not leverage the l3 agent for
providing access to the metadata service via logical routers built
with 3rd party backends.
"""
if conf.enable_metadata_network and conf.enable_isolated_metadata:
# check if the network has a metadata subnet
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR)
if any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets):
return True
if not conf.use_namespaces or not conf.enable_isolated_metadata:
return False
isolated_subnets = cls.get_isolated_subnets(network)
return any(isolated_subnets[subnet.id] for subnet in network.subnets)
@classmethod
def lease_update(cls):
network_id = os.environ.get(cls.NEUTRON_NETWORK_ID_KEY)
dhcp_relay_socket = os.environ.get(cls.NEUTRON_RELAY_SOCKET_PATH_KEY)
action = sys.argv[1]
if action not in ('add', 'del', 'old'):
sys.exit()
mac_address = sys.argv[2]
ip_address = sys.argv[3]
if action == 'del':
lease_remaining = 0
else:
lease_remaining = int(os.environ.get('DNSMASQ_TIME_REMAINING', 0))
data = dict(network_id=network_id, mac_address=mac_address,
ip_address=ip_address, lease_remaining=lease_remaining)
if os.path.exists(dhcp_relay_socket):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(dhcp_relay_socket)
sock.send(jsonutils.dumps(data))
sock.close()
class DeviceManager(object):
def __init__(self, conf, root_helper, plugin):
self.conf = conf
self.root_helper = root_helper
self.plugin = plugin
if not conf.interface_driver:
LOG.error(_LE('An interface driver must be specified'))
raise SystemExit(1)
try:
self.driver = importutils.import_object(
conf.interface_driver, conf)
except Exception as e:
LOG.error(_LE("Error importing interface driver '%(driver)s': "
"%(inner)s"),
{'driver': conf.interface_driver,
'inner': e})
raise SystemExit(1)
def get_interface_name(self, network, port):
"""Return interface(device) name for use by the DHCP process."""
return self.driver.get_device_name(port)
def get_device_id(self, network):
"""Return a unique DHCP device ID for this host on the network."""
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
return commonutils.get_dhcp_agent_device_id(network.id, self.conf.host)
def _set_default_route(self, network, device_name):
"""Sets the default gateway for this dhcp namespace.
This method is idempotent and will only adjust the route if adjusting
it would change it from what it already is. This makes it safe to call
and avoids unnecessary perturbation of the system.
"""
device = ip_lib.IPDevice(device_name,
self.root_helper,
network.namespace)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway['gateway']
for subnet in network.subnets:
skip_subnet = (
subnet.ip_version != 4
or not subnet.enable_dhcp
or subnet.gateway_ip is None)
if skip_subnet:
continue
if gateway != subnet.gateway_ip:
LOG.debug('Setting gateway for dhcp netns on net %(n)s to '
'%(ip)s',
{'n': network.id, 'ip': subnet.gateway_ip})
device.route.add_gateway(subnet.gateway_ip)
return
# No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here.
if gateway is not None:
LOG.debug('Removing gateway for dhcp netns on net %s', network.id)
device.route.delete_gateway(gateway)
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
device_id = self.get_device_id(network)
subnets = {}
dhcp_enabled_subnet_ids = []
for subnet in network.subnets:
if subnet.enable_dhcp:
dhcp_enabled_subnet_ids.append(subnet.id)
subnets[subnet.id] = subnet
dhcp_port = None
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
port_fixed_ips = []
for fixed_ip in port.fixed_ips:
port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
# If there are dhcp_enabled_subnet_ids here that means that
# we need to add those to the port and call update.
if dhcp_enabled_subnet_ids:
port_fixed_ips.extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'fixed_ips': port_fixed_ips}})
if not dhcp_port:
raise exceptions.Conflict()
else:
dhcp_port = port
# break since we found port that matches device_id
break
# check for a reserved DHCP port
if dhcp_port is None:
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.',
{'device_id': device_id, 'network_id': network.id})
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'device_id': device_id}})
if dhcp_port:
break
# DHCP port has not yet been created.
if dhcp_port is None:
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist.', {'device_id': device_id,
'network_id': network.id})
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
if not dhcp_port:
raise exceptions.Conflict()
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item
for item in fixed_ips]
dhcp_port.fixed_ips = ips
return dhcp_port
def setup(self, network):
"""Create and initialize a device for network's DHCP on this host."""
port = self.setup_dhcp_port(network)
interface_name = self.get_interface_name(network, port)
if ip_lib.ensure_device_is_ready(interface_name,
self.root_helper,
network.namespace):
LOG.debug('Reusing existing device: %s.', interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
namespace=network.namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces):
ip_cidrs.append(METADATA_DEFAULT_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)
# ensure that the dhcp interface is first in the list
if network.namespace is None:
device = ip_lib.IPDevice(interface_name,
self.root_helper)
device.route.pullup_route(interface_name)
if self.conf.use_namespaces:
self._set_default_route(network, interface_name)
return interface_name
def update(self, network, device_name):
"""Update device settings for the network's DHCP on this host."""
if self.conf.use_namespaces:
self._set_default_route(network, device_name)
def destroy(self, network, device_name):
"""Destroy the device used for the network's DHCP on this host."""
self.driver.unplug(device_name, namespace=network.namespace)
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from oslo_vmware import vim_util as vutil
from jacket.compute import exception
from jacket.compute.network import model as network_model
from jacket.compute import test
from jacket.tests.compute.unit import fake_instance
from jacket.tests.compute.unit.virt.vmwareapi import fake
from jacket.tests.compute.unit.virt.vmwareapi import stubs
from jacket.compute.virt.vmwareapi import constants
from jacket.compute.virt.vmwareapi import driver
from jacket.compute.virt.vmwareapi import vm_util
class partialObject(object):
def __init__(self, path='fake-path'):
self.path = path
self.fault = fake.DataObject()
class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
stubs.set_stubs(self)
vm_util.vm_refs_cache_reset()
self._instance = fake_instance.fake_instance_obj(
None,
**{'id': 7, 'name': 'fake!',
'display_name': 'fake-display-name',
'uuid': uuidutils.generate_uuid(),
'vcpus': 2, 'memory_mb': 2048})
def _test_get_stats_from_cluster(self, connection_state="connected",
maintenance_mode=False):
ManagedObjectRefs = [fake.ManagedObjectReference("host1",
"HostSystem"),
fake.ManagedObjectReference("host2",
"HostSystem")]
hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
prop_dict = {'host': hosts, 'resourcePool': respool}
hardware = fake.DataObject()
hardware.numCpuCores = 8
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
runtime_host_1 = fake.DataObject()
runtime_host_1.connectionState = "connected"
runtime_host_1.inMaintenanceMode = False
runtime_host_2 = fake.DataObject()
runtime_host_2.connectionState = connection_state
runtime_host_2.inMaintenanceMode = maintenance_mode
prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_1)]
prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_2)]
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_1))
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_2))
respool_resource_usage = fake.DataObject()
respool_resource_usage.maxUsage = 5368709120
respool_resource_usage.overallUsage = 2147483648
def fake_call_method(*args):
if "get_object_properties_dict" in args:
return prop_dict
elif "get_properties_for_a_collection_of_objects" in args:
return fake_objects
else:
return respool_resource_usage
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', fake_call_method):
result = vm_util.get_stats_from_cluster(session, "cluster1")
mem_info = {}
if connection_state == "connected" and not maintenance_mode:
vcpus = 32
else:
vcpus = 16
mem_info['total'] = 5120
mem_info['free'] = 3072
expected_stats = {'vcpus': vcpus, 'mem': mem_info}
self.assertEqual(expected_stats, result)
def test_get_stats_from_cluster_hosts_connected_and_active(self):
self._test_get_stats_from_cluster()
def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
self._test_get_stats_from_cluster(connection_state="disconnected")
def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
self._test_get_stats_from_cluster(maintenance_mode=True)
def test_get_host_ref_no_hosts_in_cluster(self):
self.assertRaises(exception.NoValidHost,
vm_util.get_host_ref,
fake.FakeObjectRetrievalSession(""), 'fake_cluster')
def test_get_resize_spec(self):
vcpus = 2
memory_mb = 2048
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_resize_spec(fake_factory,
vcpus, memory_mb, extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.memoryMB = memory_mb
expected.numCPUs = vcpus
cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpuAllocation.reservation = 0
cpuAllocation.limit = -1
cpuAllocation.shares = fake_factory.create('ns0:SharesInfo')
cpuAllocation.shares.level = 'normal'
cpuAllocation.shares.shares = 0
expected.cpuAllocation = cpuAllocation
self.assertEqual(expected, result)
def test_get_resize_spec_with_limits(self):
vcpus = 2
memory_mb = 2048
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_resize_spec(fake_factory,
vcpus, memory_mb, extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.memoryMB = memory_mb
expected.numCPUs = vcpus
cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpuAllocation.reservation = 6
cpuAllocation.limit = 7
cpuAllocation.shares = fake_factory.create('ns0:SharesInfo')
cpuAllocation.shares.level = 'normal'
cpuAllocation.shares.shares = 0
expected.cpuAllocation = cpuAllocation
self.assertEqual(expected, result)
def test_get_cdrom_attach_config_spec(self):
fake_factory = fake.FakeFactory()
datastore = fake.Datastore()
result = vm_util.get_cdrom_attach_config_spec(fake_factory,
datastore,
"/tmp/foo.iso",
200, 0)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device_change.device = fake_factory.create('ns0:VirtualCdrom')
device_change.device.controllerKey = 200
device_change.device.unitNumber = 0
device_change.device.key = -1
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = False
connectable.startConnected = True
connectable.connected = True
device_change.device.connectable = connectable
backing = fake_factory.create('ns0:VirtualCdromIsoBackingInfo')
backing.fileName = '/tmp/foo.iso'
backing.datastore = datastore
device_change.device.backing = backing
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_lsilogic_controller_spec(self):
# Test controller spec returned for lsiLogic sas adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type=constants.ADAPTER_TYPE_LSILOGICSAS)
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
def test_paravirtual_controller_spec(self):
# Test controller spec returned for paraVirtual adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type=constants.ADAPTER_TYPE_PARAVIRTUAL)
self.assertEqual("ns0:ParaVirtualSCSIController",
config_spec.device.obj_name)
def test_create_controller_spec_with_specfic_bus_number(self):
# Test controller spec with specifc bus number rather default 0
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type=constants.ADAPTER_TYPE_LSILOGICSAS,
bus_number=1)
self.assertEqual(1, config_spec.device.busNumber)
def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk.capacityInBytes = 1024
if parent:
disk_backing.parent = parent
disk.backing = disk_backing
# Ephemeral disk
e_disk = fake.VirtualDisk()
e_disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = '[test_datastore] uuid/ephemeral_0.vmdk'
e_disk.capacityInBytes = 512
e_disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, e_disk, controller]
return devices
def test_get_vmdk_path_and_adapter_type(self):
filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None)
self.assertEqual(constants.ADAPTER_TYPE_LSILOGICSAS,
vmdk.adapter_type)
self.assertEqual('[test_datastore] uuid/ephemeral_0.vmdk',
vmdk.path)
self.assertEqual(512, vmdk.capacity_in_bytes)
self.assertEqual(devices[1], vmdk.device)
def test_get_vmdk_path_and_adapter_type_with_match(self):
n_filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid')
self.assertEqual(constants.ADAPTER_TYPE_LSILOGICSAS,
vmdk.adapter_type)
self.assertEqual(n_filename, vmdk.path)
self.assertEqual(1024, vmdk.capacity_in_bytes)
self.assertEqual(devices[0], vmdk.device)
def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
n_filename = '[test_datastore] diuu/diuu.vmdk'
session = fake.FakeSession()
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid')
self.assertIsNone(vmdk.adapter_type)
self.assertIsNone(vmdk.path)
self.assertEqual(0, vmdk.capacity_in_bytes)
self.assertIsNone(vmdk.device)
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
# Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic
# and ParaVirtual
vmdk_adapter_type = vm_util.get_vmdk_adapter_type(
constants.DEFAULT_ADAPTER_TYPE)
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type(
constants.ADAPTER_TYPE_LSILOGICSAS)
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type(
constants.ADAPTER_TYPE_PARAVIRTUAL)
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
self.assertEqual("dummyAdapter", vmdk_adapter_type)
def test_get_scsi_adapter_type(self):
vm = fake.VirtualMachine()
devices = vm.get("config.hardware.device").VirtualDevice
scsi_controller = fake.VirtualLsiLogicController()
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
fake._update_object("VirtualMachine", vm)
# return the scsi type, not ide
hardware_device = vm.get("config.hardware.device")
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
vm_util.get_scsi_adapter_type(hardware_device))
def test_get_scsi_adapter_type_with_error(self):
vm = fake.VirtualMachine()
devices = vm.get("config.hardware.device").VirtualDevice
scsi_controller = fake.VirtualLsiLogicController()
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
fake._update_object("VirtualMachine", vm)
# the controller is not suitable since the device under this controller
# has exceeded SCSI_MAX_CONNECT_NUMBER
for i in range(0, constants.SCSI_MAX_CONNECT_NUMBER):
scsi_controller.device.append('device' + str(i))
hardware_device = vm.get("config.hardware.device")
self.assertRaises(exception.StorageError,
vm_util.get_scsi_adapter_type,
hardware_device)
def test_find_allocated_slots(self):
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
disk3 = fake.VirtualDisk(201, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
taken = vm_util._find_allocated_slots(devices)
self.assertEqual([0, 1], sorted(taken[200]))
self.assertEqual([1], taken[201])
self.assertEqual([7], taken[1000])
def test_get_bus_number_for_scsi_controller(self):
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7,
busNumber=0),
fake.VirtualLsiLogicController(1002, scsiCtlrUnitNumber=7,
busNumber=2)]
bus_number = vm_util._get_bus_number_for_scsi_controller(devices)
self.assertEqual(1, bus_number)
def test_get_bus_number_for_scsi_controller_buses_used_up(self):
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7,
busNumber=0),
fake.VirtualLsiLogicController(1001, scsiCtlrUnitNumber=7,
busNumber=1),
fake.VirtualLsiLogicController(1002, scsiCtlrUnitNumber=7,
busNumber=2),
fake.VirtualLsiLogicController(1003, scsiCtlrUnitNumber=7,
busNumber=3)]
self.assertRaises(vexc.VMwareDriverException,
vm_util._get_bus_number_for_scsi_controller,
devices)
def test_allocate_controller_key_and_unit_number_ide_default(self):
# Test that default IDE controllers are used when there is a free slot
# on them
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [disk1, disk2, ide0, ide1]
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
None,
devices,
'ide')
self.assertEqual(201, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_ide(self):
# Test that a new controller is created when there is no free slot on
# the default IDE controllers
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [ide0, ide1]
for controller_key in [200, 201]:
for unit_number in [0, 1]:
disk = fake.VirtualDisk(controller_key, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'ide')
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNotNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
for unit_number in range(7):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
constants.DEFAULT_ADAPTER_TYPE)
self.assertEqual(1000, controller_key)
self.assertEqual(8, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi_new_controller(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=15)]
for unit_number in range(15):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
constants.DEFAULT_ADAPTER_TYPE)
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertEqual(1, controller_spec.device.busNumber)
def test_get_vnc_config_spec(self):
fake_factory = fake.FakeFactory()
result = vm_util.get_vnc_config_spec(fake_factory,
7)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
remote_display_vnc_enabled = fake_factory.create('ns0:OptionValue')
remote_display_vnc_enabled.value = 'true'
remote_display_vnc_enabled.key = 'RemoteDisplay.vnc.enabled'
expected.extraConfig.append(remote_display_vnc_enabled)
remote_display_vnc_port = fake_factory.create('ns0:OptionValue')
remote_display_vnc_port.value = 7
remote_display_vnc_port.key = 'RemoteDisplay.vnc.port'
expected.extraConfig.append(remote_display_vnc_port)
remote_display_vnc_keymap = fake_factory.create('ns0:OptionValue')
remote_display_vnc_keymap.value = 'en-us'
remote_display_vnc_keymap.key = 'RemoteDisplay.vnc.keyMap'
expected.extraConfig.append(remote_display_vnc_keymap)
self.assertEqual(expected, result)
def _create_fake_vms(self):
fake_vms = fake.FakeRetrieveResult()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
for i in range(10):
vm = fake.ManagedObject()
opt_val = OptionValue(key='', value=5900 + i)
vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
fake_vms.add_object(vm)
return fake_vms
def test_get_vnc_port(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10000, group='vmware')
actual = vm_util.get_vnc_port(
fake.FakeObjectRetrievalSession(fake_vms))
self.assertEqual(actual, 5910)
def test_get_vnc_port_exhausted(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10, group='vmware')
self.assertRaises(exception.ConsolePortRangeExhausted,
vm_util.get_vnc_port,
fake.FakeObjectRetrievalSession(fake_vms))
def test_get_cluster_ref_by_name_none(self):
fake_objects = fake.FakeRetrieveResult()
ref = vm_util.get_cluster_ref_by_name(
fake.FakeObjectRetrievalSession(fake_objects), 'fake_cluster')
self.assertIsNone(ref)
def test_get_cluster_ref_by_name_exists(self):
fake_objects = fake.FakeRetrieveResult()
cluster = fake.ClusterComputeResource(name='cluster')
fake_objects.add_object(cluster)
ref = vm_util.get_cluster_ref_by_name(
fake.FakeObjectRetrievalSession(fake_objects), 'cluster')
self.assertIs(cluster.obj, ref)
def test_get_cluster_ref_by_name_missing(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(partialObject(path='cluster'))
ref = vm_util.get_cluster_ref_by_name(
fake.FakeObjectRetrievalSession(fake_objects), 'cluster')
self.assertIsNone(ref)
def test_propset_dict_simple(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar")])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
def test_propset_dict_complex(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar"),
DynamicProperty(name='some.thing',
val=MoRef(value='else')),
DynamicProperty(name='another.thing', val='value')])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
self.assertTrue(hasattr(propdict['some.thing'], 'value'))
self.assertEqual("else", propdict['some.thing'].value)
self.assertEqual("value", propdict['another.thing'])
def _test_detach_virtual_disk_spec(self, destroy_disk=False):
virtual_device_config = vm_util.detach_virtual_disk_spec(
fake.FakeFactory(),
'fake_device',
destroy_disk)
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('fake_device', virtual_device_config.device)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy', virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
def test_detach_virtual_disk_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=False)
def test_detach_virtual_disk_destroy_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=True)
def _create_vm_config_spec(self):
fake_factory = fake.FakeFactory()
spec = fake_factory.create('ns0:VirtualMachineConfigSpec')
spec.name = self._instance.uuid
spec.instanceUuid = self._instance.uuid
spec.deviceChange = []
spec.numCPUs = 2
spec.version = None
spec.memoryMB = 2048
spec.guestId = 'otherGuest'
spec.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
spec.extraConfig.append(extra_config)
spec.files = fake_factory.create('ns0:VirtualMachineFileInfo')
spec.files.vmPathName = '[fake-datastore]'
spec.managedBy = fake_factory.create('ns0:ManagedByInfo')
spec.managedBy.extensionKey = 'org.openstack.compute'
spec.managedBy.type = 'instance'
spec.tools = fake_factory.create('ns0:ToolsConfigInfo')
spec.tools.afterPowerOn = True
spec.tools.afterResume = True
spec.tools.beforeGuestReboot = True
spec.tools.beforeGuestShutdown = True
spec.tools.beforeGuestStandby = True
return spec
def test_get_vm_extra_config_spec(self):
fake_factory = fake.FakeFactory()
extra_opts = {mock.sentinel.key: mock.sentinel.value}
res = vm_util.get_vm_extra_config_spec(fake_factory, extra_opts)
self.assertEqual(1, len(res.extraConfig))
self.assertEqual(mock.sentinel.key, res.extraConfig[0].key)
self.assertEqual(mock.sentinel.value, res.extraConfig[0].value)
def test_get_vm_create_spec(self):
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = self._create_vm_config_spec()
self.assertEqual(expected, result)
expected.version = None
expected.memoryMB = 2048
expected.guestId = constants.DEFAULT_OS_TYPE
expected.extraConfig = []
def test_get_vm_create_spec_with_serial_port(self):
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
self.flags(serial_port_service_uri='foobar', group='vmware')
self.flags(serial_port_proxy_uri='telnet://example.com:31337',
group='vmware')
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
serial_port_spec = vm_util.create_serial_port_spec(fake_factory)
expected = self._create_vm_config_spec()
expected.deviceChange = [serial_port_spec]
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_allocations(self):
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
expected.guestId = constants.DEFAULT_OS_TYPE
expected.instanceUuid = self._instance.uuid
expected.memoryMB = self._instance.memory_mb
expected.name = self._instance.uuid
expected.numCPUs = self._instance.vcpus
expected.version = None
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.limit = 7
cpu_allocation.reservation = 6
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'normal'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_limit(self):
cpu_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.version = None
expected.guestId = constants.DEFAULT_OS_TYPE
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.limit = 7
cpu_allocation.reservation = 0
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'normal'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share(self):
cpu_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.type = 'instance'
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.version = None
expected.guestId = constants.DEFAULT_OS_TYPE
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.beforeGuestStandby = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.afterResume = True
expected.tools.afterPowerOn = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.reservation = 0
cpu_allocation.limit = -1
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'high'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share_custom(self):
cpu_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.version = None
expected.guestId = constants.DEFAULT_OS_TYPE
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.beforeGuestStandby = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.afterResume = True
expected.tools.afterPowerOn = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.reservation = 0
cpu_allocation.limit = -1
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'custom'
cpu_allocation.shares.shares = 1948
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_metadata(self):
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs,
metadata='fake-metadata')
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.name = self._instance.uuid
expected.instanceUuid = self._instance.uuid
expected.deviceChange = []
expected.numCPUs = 2
expected.version = None
expected.memoryMB = 2048
expected.guestId = 'otherGuest'
expected.annotation = 'fake-metadata'
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
self.assertEqual(expected, result)
def test_create_vm(self):
def fake_call_method(module, method, *args, **kwargs):
if (method == 'CreateVM_Task'):
return 'fake_create_vm_task'
else:
self.fail('Should not get here....')
def fake_wait_for_task(self, *args):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
session = fake.FakeSession()
fake_call_mock = mock.Mock(side_effect=fake_call_method)
fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
with test.nested(
mock.patch.object(session, '_wait_for_task',
fake_wait_mock),
mock.patch.object(session, '_call_method',
fake_call_mock)
) as (wait_for_task, call_method):
vm_ref = vm_util.create_vm(
session,
self._instance,
'fake_vm_folder',
'fake_config_spec',
'fake_res_pool_ref')
self.assertEqual('fake_vm_ref', vm_ref)
call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
'fake_vm_folder', config='fake_config_spec',
pool='fake_res_pool_ref')
wait_for_task.assert_called_once_with('fake_create_vm_task')
@mock.patch.object(vm_util.LOG, 'warning')
def test_create_vm_invalid_guestid(self, mock_log_warn):
"""Ensure we warn when create_vm() fails after we passed an
unrecognised guestId
"""
found = [False]
def fake_log_warn(msg, values):
if not isinstance(values, dict):
return
if values.get('ostype') == 'invalid_os_type':
found[0] = True
mock_log_warn.side_effect = fake_log_warn
session = driver.VMwareAPISession()
config_spec = vm_util.get_vm_create_spec(
session.vim.client.factory,
self._instance, 'fake-datastore', [],
vm_util.ExtraSpecs(),
os_type='invalid_os_type')
self.assertRaises(vexc.VMwareDriverException,
vm_util.create_vm, session, self._instance,
'folder', config_spec, 'res-pool')
self.assertTrue(found[0])
def test_convert_vif_model(self):
expected = "VirtualE1000"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
self.assertEqual(expected, result)
expected = "VirtualE1000e"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
self.assertEqual(expected, result)
types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
"VirtualVmxnet", "VirtualVmxnet3"]
for type in types:
self.assertEqual(type,
vm_util.convert_vif_model(type))
self.assertRaises(exception.Invalid,
vm_util.convert_vif_model,
"InvalidVifModel")
def test_power_on_instance_with_vm_ref(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_without_vm_ref(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance)
fake_get_vm_ref.assert_called_once_with(session, self._instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_exception(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task",
side_effect=exception.NovaException('fake')),
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_on_instance,
session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_power_state_exception(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(
session, "_wait_for_task",
side_effect=vexc.InvalidPowerStateException),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_create_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with test.nested(
mock.patch.object(vm_util, "get_vmdk_create_spec",
return_value='fake-spec'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_spec, fake_call_method, fake_wait_for_task):
vm_util.create_virtual_disk(session, 'fake-dc-ref',
'fake-adapter-type', 'fake-disk-type',
'fake-path', 7)
fake_get_spec.assert_called_once_with(
session.vim.client.factory, 7,
'fake-adapter-type',
'fake-disk-type')
fake_call_method.assert_called_once_with(
session.vim,
"CreateVirtualDisk_Task",
dm,
name='fake-path',
datacenter='fake-dc-ref',
spec='fake-spec')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_copy_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.copy_virtual_disk(session, 'fake-dc-ref',
'fake-source', 'fake-dest')
fake_call_method.assert_called_once_with(
session.vim,
"CopyVirtualDisk_Task",
dm,
sourceName='fake-source',
sourceDatacenter='fake-dc-ref',
destName='fake-dest')
fake_wait_for_task.assert_called_once_with('fake-task')
def _create_fake_vm_objects(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.VirtualMachine())
return fake_objects
def test_reconfigure_vm(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake_reconfigure_task'),
mock.patch.object(session, '_wait_for_task')
) as (_call_method, _wait_for_task):
vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
_call_method.assert_called_once_with(mock.ANY,
'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
_wait_for_task.assert_called_once_with(
'fake_reconfigure_task')
def _get_network_attach_config_spec_opaque(self, network_ref,
vc6_onwards=False):
vif_info = {'network_name': 'fake-name',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': network_ref,
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = vm_util.get_network_attach_config_spec(
fake_factory, vif_info, 1)
card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = vif_info['iface_id']
extra_config.key = 'nvp.iface-id.1'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
if network_ref['use-external-id']:
if vc6_onwards:
device.externalId = vif_info['iface_id']
else:
dp = fake_factory.create('ns0:DynamicProperty')
dp.name = '__externalId__'
dp.val = vif_info['iface_id']
device.dynamicProperty = [dp]
device.addressType = 'manual'
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.startConnected = True
connectable.connected = True
device.connectable = connectable
backing = fake_factory.create(card)
backing.opaqueNetworkType = vif_info['network_ref']['network-type']
backing.opaqueNetworkId = vif_info['network_ref']['network-id']
device.backing = backing
device.key = -47
device.wakeOnLanEnabled = True
device_change.device = device
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_get_network_attach_config_spec_opaque_integration_bridge(self):
network_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'opaque',
'use-external-id': False}
self._get_network_attach_config_spec_opaque(network_ref)
def test_get_network_attach_config_spec_opaque(self):
network_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self._get_network_attach_config_spec_opaque(network_ref)
@mock.patch.object(fake, 'DataObject')
def test_get_network_attach_config_spec_opaque_vc6_onwards(self,
mock_object):
# Add new attribute externalId supported from VC6
class FakeVirtualE1000(fake.DataObject):
def __init__(self):
super(FakeVirtualE1000, self).__init__()
self.externalId = None
mock_object.return_value = FakeVirtualE1000
network_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self._get_network_attach_config_spec_opaque(network_ref,
vc6_onwards=True)
def test_get_network_attach_config_spec_dvs(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = vm_util.get_network_attach_config_spec(
fake_factory, vif_info, 1)
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = vif_info['iface_id']
extra_config.key = 'nvp.iface-id.1'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
device.key = -47
device.addressType = 'manual'
device.wakeOnLanEnabled = True
device.backing = fake_factory.create(backing)
device.backing.port = fake_factory.create(port)
device.backing.port.portgroupKey = vif_info['network_ref']['dvpg']
device.backing.port.switchUuid = vif_info['network_ref']['dvsw']
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.connected = True
connectable.startConnected = True
device.connectable = connectable
device_change.device = device
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_get_create_vif_spec(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
limits = vm_util.Limits()
limits.limit = 10
limits.reservation = 20
limits.shares_level = 'custom'
limits.shares_share = 40
result = vm_util._create_vif_spec(
fake_factory, vif_info, limits)
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
device.key = -47
device.addressType = 'manual'
device.wakeOnLanEnabled = True
device.backing = fake_factory.create(backing)
device.backing.port = fake_factory.create(port)
device.backing.port.portgroupKey = vif_info['network_ref']['dvpg']
device.backing.port.switchUuid = vif_info['network_ref']['dvsw']
device.resourceAllocation = fake_factory.create(
'ns0:VirtualEthernetCardResourceAllocation')
device.resourceAllocation.limit = 10
device.resourceAllocation.reservation = 20
device.resourceAllocation.share = fake_factory.create(
'ns0:SharesInfo')
device.resourceAllocation.share.level = 'custom'
device.resourceAllocation.share.shares = 40
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.connected = True
connectable.startConnected = True
device.connectable = connectable
device_change.device = device
self.assertEqual(device_change, result)
def test_get_network_detach_config_spec(self):
fake_factory = fake.FakeFactory()
result = vm_util.get_network_detach_config_spec(
fake_factory, 'fake-device', 2)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = 'free'
extra_config.key = 'nvp.iface-id.2'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.device = 'fake-device'
device_change.operation = 'remove'
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
def test_power_off_instance_no_vm_ref(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance)
fake_get_ref.assert_called_once_with(session, self._instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_with_exception(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task',
side_effect=exception.NovaException('fake'))
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_off_instance,
session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_power_state_exception(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(
session, '_wait_for_task',
side_effect=vexc.InvalidPowerStateException)
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
def test_get_vm_create_spec_updated_hw_version(self):
extra_specs = vm_util.ExtraSpecs(hw_version='vmx-08')
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
self._instance,
'fake-datastore', [],
extra_specs=extra_specs)
self.assertEqual('vmx-08', result.version)
def test_vm_create_spec_with_profile_spec(self):
datastore = ds_obj.Datastore('fake-ds-ref', 'fake-ds-name')
extra_specs = vm_util.ExtraSpecs()
create_spec = vm_util.get_vm_create_spec(fake.FakeFactory(),
self._instance,
datastore.name, [],
extra_specs,
profile_spec='fake_profile_spec')
self.assertEqual(['fake_profile_spec'], create_spec.vmProfile)
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_get_storage_profile_spec(self, mock_retrieve_profile_id):
fake_profile_id = fake.DataObject()
fake_profile_id.uniqueId = 'fake_unique_id'
mock_retrieve_profile_id.return_value = fake_profile_id
profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(),
'fake_policy')
self.assertEqual('ns0:VirtualMachineDefinedProfileSpec',
profile_spec.obj_name)
self.assertEqual(fake_profile_id.uniqueId, profile_spec.profileId)
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_storage_spec_empty_profile(self, mock_retrieve_profile_id):
mock_retrieve_profile_id.return_value = None
profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(),
'fake_policy')
self.assertIsNone(profile_spec)
def test_get_ephemeral_name(self):
filename = vm_util.get_ephemeral_name(0)
self.assertEqual('ephemeral_0.vmdk', filename)
def test_detach_and_delete_devices_config_spec(self):
fake_devices = ['device1', 'device2']
fake_factory = fake.FakeFactory()
result = vm_util._detach_and_delete_devices_config_spec(fake_factory,
fake_devices)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
device1 = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device1.device = 'device1'
device1.operation = 'remove'
device1.fileOperation = 'destroy'
expected.deviceChange.append(device1)
device2 = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device2.device = 'device2'
device2.operation = 'remove'
device2.fileOperation = 'destroy'
expected.deviceChange.append(device2)
self.assertEqual(expected, result)
@mock.patch.object(vm_util, 'reconfigure_vm')
def test_detach_devices_from_vm(self, mock_reconfigure):
fake_devices = ['device1', 'device2']
session = fake.FakeSession()
vm_util.detach_devices_from_vm(session,
'fake-ref',
fake_devices)
mock_reconfigure.assert_called_once_with(session, 'fake-ref', mock.ANY)
def test_get_vm_boot_spec(self):
disk = fake.VirtualDisk()
disk.key = 7
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_boot_spec(fake_factory,
disk)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
boot_disk = fake_factory.create(
'ns0:VirtualMachineBootOptionsBootableDiskDevice')
boot_disk.deviceKey = disk.key
boot_options = fake_factory.create('ns0:VirtualMachineBootOptions')
boot_options.bootOrder = [boot_disk]
expected.bootOptions = boot_options
self.assertEqual(expected, result)
def _get_devices(self, filename):
devices = fake._create_array_of_type('VirtualDevice')
devices.VirtualDevice = self._vmdk_path_and_adapter_type_devices(
filename)
return devices
def test_find_rescue_device(self):
filename = '[test_datastore] uuid/uuid-rescue.vmdk'
devices = self._get_devices(filename)
device = vm_util.find_rescue_device(devices, self._instance)
self.assertEqual(filename, device.backing.fileName)
def test_find_rescue_device_not_found(self):
filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._get_devices(filename)
self.assertRaises(exception.NotFound,
vm_util.find_rescue_device,
devices,
self._instance)
def test_validate_limits(self):
limits = vm_util.Limits(shares_level='high',
shares_share=1948)
self.assertRaises(exception.InvalidInput,
limits.validate)
limits = vm_util.Limits(shares_level='fira')
self.assertRaises(exception.InvalidInput,
limits.validate)
def test_get_vm_create_spec_with_console_delay(self):
extra_specs = vm_util.ExtraSpecs()
self.flags(console_delay_seconds=2, group='vmware')
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.name = self._instance.uuid
expected.instanceUuid = self._instance.uuid
expected.deviceChange = []
expected.numCPUs = 2
expected.version = None
expected.memoryMB = 2048
expected.guestId = constants.DEFAULT_OS_TYPE
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = 2000000
extra_config.key = 'keyboard.typematicMinDelay'
expected.extraConfig.append(extra_config)
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_cores_per_socket(self):
extra_specs = vm_util.ExtraSpecs(cores_per_socket=4)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
expected.guestId = 'otherGuest'
expected.instanceUuid = self._instance.uuid
expected.memoryMB = self._instance.memory_mb
expected.name = self._instance.uuid
expected.numCPUs = self._instance.vcpus
expected.numCoresPerSocket = 4
expected.version = None
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_memory_allocations(self):
memory_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
expected.guestId = 'otherGuest'
expected.instanceUuid = self._instance.uuid
expected.memoryMB = self._instance.memory_mb
expected.name = self._instance.uuid
expected.numCPUs = self._instance.vcpus
expected.version = None
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
memory_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
memory_allocation.limit = 7
memory_allocation.reservation = 6
memory_allocation.shares = fake_factory.create('ns0:SharesInfo')
memory_allocation.shares.level = 'normal'
memory_allocation.shares.shares = 0
expected.memoryAllocation = memory_allocation
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
self.assertEqual(expected, result)
def test_get_swap(self):
vm_ref = 'fake-vm-ref'
# Root disk
controller_key = 1000
root_disk = fake.VirtualDisk()
root_disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = '[test_datastore] uuid/uuid.vmdk'
root_disk.capacityInBytes = 1048576
root_disk.backing = disk_backing
# Swap disk
swap_disk = fake.VirtualDisk()
swap_disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = "swap"
swap_disk.capacityInBytes = 1024
swap_disk.backing = disk_backing
devices = [root_disk, swap_disk]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=devices) as mock_call:
device = vm_util.get_swap(session, vm_ref)
mock_call.assert_called_once_with(mock.ANY,
"get_object_property", vm_ref, "config.hardware.device")
self.assertEqual(swap_disk, device)
def test_create_folder_with_empty_vmfolder(self):
"""Test create_folder when the datacenter vmFolder is empty"""
child_folder = mock.sentinel.child_folder
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[None, child_folder]):
parent_folder = mock.sentinel.parent_folder
parent_folder.value = 'parent-ref'
child_name = 'child_folder'
ret = vm_util.create_folder(session, parent_folder, child_name)
self.assertEqual(child_folder, ret)
expected_calls = [mock.call(vutil, 'get_object_property',
parent_folder,
'childEntity'),
mock.call(session.vim, 'CreateFolder',
parent_folder, name=child_name)]
self.assertEqual(expected_calls,
session._call_method.call_args_list)
def test_create_folder_not_present(self):
"""Test create_folder when child not present."""
prop_val = mock.Mock()
prop_val.ManagedObjectReference = []
child_folder = mock.sentinel.child_folder
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val, child_folder]):
child_name = 'child_folder'
parent_folder = mock.sentinel.parent_folder
parent_folder.value = 'parent-ref'
ret = vm_util.create_folder(session, parent_folder, child_name)
self.assertEqual(child_folder, ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
parent_folder,
'childEntity'),
mock.call(session.vim, 'CreateFolder',
parent_folder, name=child_name)]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_create_folder_already_present(self):
"""Test create_folder when child already present."""
parent_folder = mock.sentinel.parent_folder
child_name = 'child_folder'
prop_val = mock.Mock()
child_entity_1 = mock.Mock()
child_entity_1._type = 'Folder'
child_entity_1_name = 'SomeOtherName'
child_entity_2 = mock.Mock()
child_entity_2._type = 'Folder'
child_entity_2_name = 'AnotherName'
child_entity_3 = mock.Mock()
child_entity_3._type = 'Folder'
child_entity_3_name = child_name
prop_val.ManagedObjectReference = [child_entity_1, child_entity_2,
child_entity_3]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val,
child_entity_1_name,
child_entity_2_name,
child_entity_3_name]):
ret = vm_util.create_folder(session, parent_folder, child_name)
self.assertEqual(child_entity_3, ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
parent_folder,
'childEntity'),
mock.call(vutil, 'get_object_property',
child_entity_1,
'name'),
mock.call(vutil, 'get_object_property',
child_entity_2,
'name'),
mock.call(vutil, 'get_object_property',
child_entity_3,
'name')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_create_folder_with_duplicate_name(self):
parent_folder = mock.sentinel.parent_folder
parent_folder.value = 'parent-ref'
child_name = 'child_folder'
prop_val_1 = mock.Mock()
prop_val_1.ManagedObjectReference = []
child_entity_2 = mock.Mock()
child_entity_2._type = 'Folder'
prop_val_2 = mock.Mock()
prop_val_2.ManagedObjectReference = [child_entity_2]
child_entity_2_name = child_name
details = {'object': 'folder-1'}
duplicate_exception = vexc.DuplicateName(details=details)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val_1,
duplicate_exception,
prop_val_2,
child_entity_2_name]):
ret = vm_util.create_folder(session, parent_folder, child_name)
self.assertEqual(child_entity_2._type, ret._type)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
parent_folder,
'childEntity'),
mock.call(session.vim, 'CreateFolder',
parent_folder, name=child_name)]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_does_not_exist(self):
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=None):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertIsNone(ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_child_entry_not_folder(self):
child_entity = mock.Mock()
child_entity._type = 'NotFolder'
prop_val = mock.Mock()
prop_val.ManagedObjectReference = [child_entity]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=prop_val):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertIsNone(ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_child_entry_not_matched(self):
child_entity = mock.Mock()
child_entity._type = 'Folder'
prop_val = mock.Mock()
prop_val.ManagedObjectReference = [child_entity]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val, 'fake-1-name']):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertIsNone(ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity'),
mock.call(vutil, 'get_object_property',
child_entity, 'name')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_child_entry_matched(self):
child_entity = mock.Mock()
child_entity._type = 'Folder'
prop_val = mock.Mock()
prop_val.ManagedObjectReference = [child_entity]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val, 'fake-name']):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertEqual(ret, child_entity)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity'),
mock.call(vutil, 'get_object_property',
child_entity, 'name')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_folder_path_ref_cache(self):
path = 'OpenStack/Project (e2b86092bf064181ade43deb3188f8e4)'
self.assertIsNone(vm_util.folder_ref_cache_get(path))
vm_util.folder_ref_cache_update(path, 'fake-ref')
self.assertEqual('fake-ref', vm_util.folder_ref_cache_get(path))
def test_get_vm_name(self):
uuid = uuidutils.generate_uuid()
expected = uuid
name = vm_util._get_vm_name(None, uuid)
self.assertEqual(expected, name)
display_name = 'fira'
expected = 'fira (%s)' % uuid
name = vm_util._get_vm_name(display_name, uuid)
self.assertEqual(expected, name)
display_name = 'X' * 255
expected = '%s (%s)' % ('X' * 41, uuid)
name = vm_util._get_vm_name(display_name, uuid)
self.assertEqual(expected, name)
self.assertEqual(len(name), 80)
@mock.patch.object(vm_util, '_get_vm_name', return_value='fake-name')
def test_rename_vm(self, mock_get_name):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake_rename_task'),
mock.patch.object(session, '_wait_for_task')
) as (_call_method, _wait_for_task):
vm_util.rename_vm(session, 'fake-ref', self._instance)
_call_method.assert_called_once_with(mock.ANY,
'Rename_Task', 'fake-ref', newName='fake-name')
_wait_for_task.assert_called_once_with(
'fake_rename_task')
mock_get_name.assert_called_once_with(self._instance.display_name,
self._instance.uuid)
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
# N.B. Mocking on the class only mocks test_*(), but we need
# VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
# setUp causes object initialisation to fail. Not mocking in tests results
# in vim calls not using FakeVim.
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
def setUp(self):
super(VMwareVMUtilGetHostRefTestCase, self).setUp()
fake.reset()
vm_util.vm_refs_cache_reset()
self.session = driver.VMwareAPISession()
# Create a fake VirtualMachine running on a known host
self.host_ref = list(fake._db_content['HostSystem'].keys())[0]
self.vm_ref = fake.create_vm(host_ref=self.host_ref)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_ref_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(self.host_ref, ret)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_name_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
host = fake._get_object(self.host_ref)
ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(host.name, ret)
|
|
import os
from facetools import json
import copy
import requests
from django.test import TestCase
from django.core import management
from django.conf import settings
from fandjango.models import User
from facetools.management.commands.sync_facebook_test_users import _get_test_user_relationships
from facetools.common import _get_app_access_token
from facetools.test.testusers import _delete_test_user_on_facebook
from facetools.models import TestUser
from facetools.management.commands.sync_facebook_test_users import _get_app_fixture_directory, _get_facetools_test_fixture_name, _clean_test_user_fixture
from facetools.signals import setup_facebook_test_client, sync_facebook_test_user
from facetools.integrations import fandjango
from test_project import testapp1, testapp2, testapp3
class SyncFacebookTestUsersTests(TestCase):
def tearDown(self):
for test_user in TestUser.objects.all():
test_user.delete() # should also delete facebook test user through delete method override
def test_get_test_user_relationships(self):
t1 = [{'name': 'Unittest Jacobs', 'friends': ['Unittest Deschain','Unittest Billows']},
{'name': 'Unittest Deschain', 'friends': ['Unittest Jacobs','Unittest Billows']},
{ 'name': 'Unittest Billows', 'friends': ['Unittest Deschain', 'Unittest Jacobs']}]
t2 = [{'name': 'Unittest Jacobs', 'friends': ['Unittest Deschain']},
{'name': 'Unittest Deschain', 'friends': ['Unittest Jacobs']},
{ 'name': 'Unittest Billows', 'friends': ['Unittest Deschain', 'Unittest Jacobs']}]
t3 = [{'name': 'Unittest Jacobs', 'friends': ['Unittest Deschain']},
{'name': 'Unittest Deschain', 'friends': []},
{ 'name': 'Unittest Billows', 'friends': ['Unittest Deschain', 'Unittest Jacobs']}]
t4 = [{'name': 'Unittest Jacobs', 'friends': []},
{'name': 'Unittest Deschain', 'friends': ['Unittest Jacobs']},
{ 'name': 'Unittest Billows', 'friends': ['Unittest Deschain', 'Unittest Jacobs']}]
t5 = [{'name': 'Unittest Jacobs', 'friends': ['Unittest Billows']},
{'name': 'Unittest Deschain', 'friends': ['Unittest Jacobs']},
{ 'name': 'Unittest Billows', 'friends': ['Unittest Deschain']}]
for t in [t1,t2,t3,t4,t5]:
relationships = _get_test_user_relationships(t)
self.assertEquals(3, len(relationships))
self.assertTrue((set([t[0]['name'], t[1]['name']])) in relationships)
self.assertTrue((set([t[0]['name'], t[2]['name']])) in relationships)
self.assertTrue((set([t[1]['name'], t[2]['name']])) in relationships)
def test_creating_one_user(self):
from test_project.testapp1.facebook_test_users import facebook_test_users
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users', 'testapp1')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (
settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
api_test_users = json.loads(requests.get(test_users_url).content)['data']
test_users = _merge_with_facebook_data(facebook_test_users, api_test_users, _get_app_access_token())
# Make sure the test user's information on facebook is correct
self.assertEquals(1, len(test_users))
self.assertEquals(1, len([u for u in test_users if u.get('graph_user_data') and u.get('graph_permission_data')]))
for permission in test_users[0]['permissions']:
self.assertTrue(permission.strip() in test_users[0]['graph_permission_data']['data'][0])
# Make sure the test user's information in facetools is correct
self.assertEquals(1, TestUser.objects.count())
user = TestUser.objects.get()
self.assertEquals(int(test_users[0]['graph_user_data']['id']), user.facebook_id)
self.assertEquals(test_users[0]['name'], user.name)
self.assertEquals(test_users[0]['graph_user_data']['login_url'], user.login_url)
self.assertEquals(test_users[0]['installed'], _has_access_code(user.access_token))
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp1, 'testapp1', facebook_test_users)
def test_overwrite_one_user(self):
from test_project.testapp1.facebook_test_users import facebook_test_users
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users', 'testapp1')
management.call_command('sync_facebook_test_users', 'testapp1')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (
settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
api_test_users = json.loads(requests.get(test_users_url).content)['data']
test_users = _merge_with_facebook_data(facebook_test_users, api_test_users, _get_app_access_token())
# Make sure the test user's information on facebook is correct
self.assertEquals(1, len(test_users))
self.assertEquals(1, len([u for u in test_users if u.get('graph_user_data') and u.get('graph_permission_data')]))
self.assertEquals(1, len([u for u in api_test_users if 'id' in u and u['id'] == test_users[0]['graph_user_data']['id']]))
for permission in test_users[0]['permissions']:
self.assertTrue(permission.strip() in test_users[0]['graph_permission_data']['data'][0])
# Make sure the test user's information in facetools is correct
self.assertEquals(1, TestUser.objects.count())
user = TestUser.objects.get()
self.assertEquals(int(test_users[0]['graph_user_data']['id']), user.facebook_id)
self.assertEquals(test_users[0]['graph_user_data']['name'], user.name)
self.assertEquals(test_users[0]['graph_user_data']['login_url'], user.login_url)
self.assertEquals(test_users[0]['installed'], _has_access_code(user.access_token))
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp1, 'testapp1', facebook_test_users)
def test_creating_many_users(self):
from test_project.testapp2.facebook_test_users import facebook_test_users as t2
facebook_test_users = t2()
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users', 'testapp2')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
# Make sure each test user's information on facebook is correct
self.assertEquals(3, len(test_users))
self.assertEquals(3, len([u for u in test_users if 'graph_user_data' in u and 'graph_permission_data' in u]))
for test_user in test_users:
for permission in test_user['permissions']:
self.assertTrue(permission.strip() in test_user['graph_permission_data']['data'][0])
friends_on_facebook = _get_friends_on_facebook(test_user)
for friend_name in test_user.get('friends', []):
self.assertTrue(friend_name in friends_on_facebook)
self.assertEqual(friends_on_facebook[friend_name],
TestUser.objects.get(name=friend_name).facebook_id)
# Make sure each test user's information in facetools is correct
self.assertEquals(3, TestUser.objects.count())
for user in TestUser.objects.all():
test_user = [t for t in test_users if int(t['graph_user_data']['id']) == user.facebook_id][0]
self.assertEquals(test_user['name'], user.name)
self.assertEquals(test_user['graph_user_data']['login_url'], user.login_url)
self.assertEquals(test_user['installed'], _has_access_code(user.access_token))
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp2, 'testapp2', t2())
def test_overwriting_many_users(self):
from test_project.testapp2.facebook_test_users import facebook_test_users as t2
facebook_test_users = t2()
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users', 'testapp2')
management.call_command('sync_facebook_test_users', 'testapp2')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
# Make sure each test user's information on facebook is correct
self.assertEquals(3, len(test_users))
self.assertEquals(3, len([u for u in test_users if 'graph_user_data' in u and 'graph_permission_data' in u]))
for test_user in test_users:
for permission in test_user['permissions']:
self.assertTrue(permission.strip() in test_user['graph_permission_data']['data'][0])
friends_on_facebook = _get_friends_on_facebook(test_user)
for friend_name in test_user.get('friends', []):
self.assertTrue(friend_name in friends_on_facebook)
self.assertEqual(friends_on_facebook[friend_name],
TestUser.objects.get(name=friend_name).facebook_id)
# Make sure each test user's information in facetools is correct
self.assertEquals(3, TestUser.objects.count())
for user in TestUser.objects.all():
test_user = [t for t in test_users if t['graph_user_data']['id'] == str(user.facebook_id)][0]
self.assertEquals(test_user['name'], user.name)
self.assertEquals(test_user['graph_user_data']['login_url'], user.login_url)
self.assertEquals(test_user['installed'], _has_access_code(user.access_token))
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp2, 'testapp2', t2())
def test_creating_many_users_mixed_installations(self):
from test_project.testapp3.facebook_test_users import facebook_test_users as t3
facebook_test_users = t3()
self.assertTrue(not all([u['installed'] for u in facebook_test_users])) # make sure all the users aren't set to have the app installed
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users', 'testapp3')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
# Make sure each test user's information on facebook is correct
self.assertEquals(3, len(test_users))
self.assertEquals(3, len([u for u in test_users if 'graph_user_data' in u and 'graph_permission_data' in u]))
for test_user in test_users:
for permission in test_user['permissions']:
self.assertTrue(permission.strip() in test_user['graph_permission_data']['data'][0])
friends_on_facebook = _get_friends_on_facebook(test_user)
for friend_name in test_user.get('friends', []):
self.assertTrue(friend_name in friends_on_facebook)
self.assertEqual(friends_on_facebook[friend_name],
TestUser.objects.get(name=friend_name).facebook_id)
# Make sure each test user's information in facetools is correct
self.assertEquals(3, TestUser.objects.count())
for user in TestUser.objects.all():
test_user = [t for t in test_users if int(t['graph_user_data']['id']) == user.facebook_id][0]
self.assertEquals(test_user['name'], user.name)
self.assertEquals(test_user['graph_user_data']['login_url'], user.login_url)
self.assertEquals(test_user['installed'], _has_access_code(user.access_token))
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp3, 'testapp3', t3())
def test_overwriting_many_users_mixed_installations(self):
from test_project.testapp3.facebook_test_users import facebook_test_users as t3
facebook_test_users = t3()
self.assertTrue(not all([u['installed'] for u in facebook_test_users])) # make sure all the users aren't set to have the app installed
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users', 'testapp3')
management.call_command('sync_facebook_test_users', 'testapp3')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
# Make sure each test user's information on facebook is correct
self.assertEquals(3, len(test_users))
self.assertEquals(3, len([u for u in test_users if 'graph_user_data' in u and 'graph_permission_data' in u]))
for test_user in test_users:
for permission in test_user['permissions']:
self.assertTrue(permission.strip() in test_user['graph_permission_data']['data'][0])
friends_on_facebook = _get_friends_on_facebook(test_user)
for friend_name in test_user.get('friends', []):
self.assertTrue(friend_name in friends_on_facebook)
self.assertEqual(friends_on_facebook[friend_name],
TestUser.objects.get(name=friend_name).facebook_id)
# Make sure each test user's information in facetools is correct
self.assertEquals(3, TestUser.objects.count())
for user in TestUser.objects.all():
test_user = [t for t in test_users if t['graph_user_data']['id'] == str(user.facebook_id)][0]
self.assertEquals(test_user['name'], user.name)
self.assertEquals(test_user['graph_user_data']['login_url'], user.login_url)
self.assertEquals(test_user['installed'], _has_access_code(user.access_token))
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp3, 'testapp3', t3())
def test_sync_where_in_facetools_missing_in_facebook(self):
from test_project.testapp3.facebook_test_users import facebook_test_users as t3
facebook_test_users = t3()
self.assertTrue(not all([u['installed'] for u in facebook_test_users])) # make sure all the users aren't set to have the app installed
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users', 'testapp3')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
# Make sure the data looks good
self.assertEquals(3, TestUser.objects.count())
self.assertEquals(3, len(test_users))
self.assertEquals(3, len([u for u in test_users if 'graph_user_data' in u]))
# Now remove the users from facebook, leaving them in facetools database
for test_user in test_users:
_delete_test_user_on_facebook(TestUser.objects.get(name=test_user['name']))
self.assertEquals(3, TestUser.objects.count())
check_users = json.loads(requests.get(test_users_url).content)['data']
old_ids = [u['graph_user_data']['id'] for u in test_users]
self.assertTrue(not any([c['id'] in old_ids for c in check_users]))
# After syncing again the data should be back to normal
management.call_command('sync_facebook_test_users', 'testapp3')
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
self.assertEquals(3, TestUser.objects.count())
self.assertEquals(3, len(test_users))
self.assertEquals(3, len([u for u in test_users if 'graph_user_data' in u]))
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp3, 'testapp3', t3())
def test_sync_where_in_facebook_missing_in_facetools(self):
from test_project.testapp3.facebook_test_users import facebook_test_users as t3
facebook_test_users = t3()
self.assertTrue(not all([u['installed'] for u in facebook_test_users])) # make sure all the users aren't set to have the app installed
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users', 'testapp3')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
# Make sure the data looks good
self.assertEquals(3, TestUser.objects.count())
self.assertEquals(3, len(test_users))
self.assertEquals(3, len([u for u in test_users if 'graph_user_data' in u]))
# Now remove the users from facetools, leaving them on facebook
TestUser.objects.all().delete()
self.assertEquals(0, TestUser.objects.count())
check_users = json.loads(requests.get(test_users_url).content)['data']
old_ids = [u['graph_user_data']['id'] for u in test_users]
self.assertEquals(3, len([c for c in check_users if c['id'] in old_ids]))
# After syncing again the data should be back to normal
management.call_command('sync_facebook_test_users', 'testapp3')
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
self.assertEquals(3, TestUser.objects.count())
self.assertEquals(3, len(test_users))
self.assertEquals(3, len([u for u in test_users if 'graph_user_data' in u]))
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp3, 'testapp3', t3())
def test_sync_where_in_facebook_and_in_facetools_but_data_not_synced(self):
from test_project.testapp3.facebook_test_users import facebook_test_users as t3
facebook_test_users = t3()
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users', 'testapp3')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
# Make sure the data looks good
self.assertEquals(3, TestUser.objects.count())
self.assertEquals(3, len(test_users))
self.assertEquals(3, len([u for u in test_users if 'graph_user_data' in u]))
# Now change the user data on facetools, leaving them out of sync with the facebook data
old_values = {}
try:
for test_user in TestUser.objects.all():
old_values[test_user.name] = {
'facebook_id': test_user.facebook_id,
'access_token': test_user.access_token
}
test_user.facebook_id = 0
test_user.access_token = "failbear"
test_user.save()
# After syncing again the data should be back to normal
management.call_command('sync_facebook_test_users', 'testapp3')
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
self.assertEquals(3, TestUser.objects.count())
self.assertEquals(3, len(test_users))
self.assertEquals(3, len([u for u in test_users if 'graph_user_data' in u]))
for test_user in TestUser.objects.all():
self.assertNotEquals(0, test_user.facebook_id)
self.assertNotEquals("failbear", test_user.access_token)
finally:
for test_user in TestUser.objects.all():
test_user.facebook_id = old_values[test_user.name]['facebook_id']
test_user.access_token = old_values[test_user.name]['access_token']
test_user.save()
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp3, 'testapp3', t3())
def test_sync_multiple_apps(self):
from test_project.testapp1.facebook_test_users import facebook_test_users as t1
from test_project.testapp2.facebook_test_users import facebook_test_users as t2
facebook_test_users = t1 + t2()
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users', 'testapp1', 'testapp2')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
# Make sure each test user's information on facebook is correct
self.assertEquals(4, len(test_users))
self.assertEquals(4, len([u for u in test_users if 'graph_user_data' in u and 'graph_permission_data' in u]))
for test_user in test_users:
for permission in test_user['permissions']:
self.assertTrue(permission.strip() in test_user['graph_permission_data']['data'][0])
friends_on_facebook = _get_friends_on_facebook(test_user)
for friend_name in test_user.get('friends', []):
self.assertTrue(friend_name in friends_on_facebook)
self.assertEqual(friends_on_facebook[friend_name],
TestUser.objects.get(name=friend_name).facebook_id)
# Make sure each test user's information in facetools is correct
self.assertEquals(4, TestUser.objects.count())
for user in TestUser.objects.all():
test_user = [t for t in test_users if int(t['graph_user_data']['id']) == user.facebook_id][0]
self.assertEquals(test_user['name'], user.name)
self.assertEquals(test_user['graph_user_data']['login_url'], user.login_url)
self.assertEquals(test_user['installed'], _has_access_code(user.access_token))
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp1, 'testapp1', t1)
self.assertTestUserFixture(testapp2, 'testapp2', t2())
def test_sync_all_apps(self):
from test_project.testapp1.facebook_test_users import facebook_test_users as t1
from test_project.testapp2.facebook_test_users import facebook_test_users as t2
from test_project.testapp3.facebook_test_users import facebook_test_users as t3
facebook_test_users = t1 + t2() + t3()
self.assertEquals(0, TestUser.objects.count())
management.call_command('sync_facebook_test_users')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
# Make sure each test user's information on facebook is correct
self.assertEquals(7, len(test_users))
self.assertEquals(7, len([u for u in test_users if 'graph_user_data' in u and 'graph_permission_data' in u]))
for test_user in test_users:
for permission in test_user['permissions']:
self.assertTrue(permission.strip() in test_user['graph_permission_data']['data'][0])
friends_on_facebook = _get_friends_on_facebook(test_user)
for friend_name in test_user.get('friends', []):
self.assertTrue(friend_name in friends_on_facebook)
self.assertEqual(friends_on_facebook[friend_name],
TestUser.objects.get(name=friend_name).facebook_id)
# Make sure each test user's information in facetools is correct
self.assertEquals(7, TestUser.objects.count())
for user in TestUser.objects.all():
test_user = [t for t in test_users if int(t['graph_user_data']['id']) == user.facebook_id][0]
self.assertEquals(test_user['name'], user.name)
self.assertEquals(test_user['graph_user_data']['login_url'], user.login_url)
self.assertEquals(test_user['installed'], _has_access_code(user.access_token))
# Make sure the generated fixture is correct
self.assertTestUserFixture(testapp1, 'testapp1', t1)
self.assertTestUserFixture(testapp2, 'testapp2', t2())
self.assertTestUserFixture(testapp3, 'testapp3', t3())
def assertTestUserFixture(self, app, app_name, test_users):
fixture_file_path = os.path.join(_get_app_fixture_directory(app),
_get_facetools_test_fixture_name(app_name))
fixture_test_users = json.loads(open(fixture_file_path).read())
fixture_user_names = set([u['pk'] for u in fixture_test_users])
expected_user_names = set([u['name'] for u in test_users])
self.assertEquals(expected_user_names, fixture_user_names)
class FixTestUserFixtureTests(TestCase):
def test_clean_test_user_fixture_1(self):
fixture_content = """[
{
"pk": "Unittest Smith",
"model": "facetools.testuser",
"fields": {
"access_token": "AAAESR2HSywMBAAZBgFVWohGpg0XtALkfga09fF4mZBwhtF2q0ORpYJ7tJdZBEj5cWw8wQzbMcZBZBFZAZBVuFnAIV7JxBaZAUAOOBa5a7e4Qrav5ZCndFWDmA",
"login_url": "https://www.facebook.com/platform/test_account_login.php?user_id=100003568662664&n=rx3Yb9ihtNlVHfT",
"facebook_id": "100003568662664"
}
},
{
"pk": "Unittest Jacobs",
"model": "facetools.testuser",
"fields": {
"access_token": "AAAESR2HSywMBAGQu1lzfZABZCCMq81JFPx4PP2KzR1IsLO7nZBTZCGU1szsdH2nn4aNmZB5FcvJcEDyv8Et9P8TDurZA2K522oJcYFEtETIAq6NrmKLbZBR",
"login_url": "https://www.facebook.com/platform/test_account_login.php?user_id=100003573522566&n=PB5kX2MF0VUJ2mn",
"facebook_id": "100003573522566"
}
}
]"""
expected_content = """[
{
"pk": "Unittest Jacobs",
"model": "facetools.testuser",
"fields": {
"access_token": "AAAESR2HSywMBAGQu1lzfZABZCCMq81JFPx4PP2KzR1IsLO7nZBTZCGU1szsdH2nn4aNmZB5FcvJcEDyv8Et9P8TDurZA2K522oJcYFEtETIAq6NrmKLbZBR",
"login_url": "https://www.facebook.com/platform/test_account_login.php?user_id=100003573522566&n=PB5kX2MF0VUJ2mn",
"facebook_id": "100003573522566"
}
}
]"""
test_users = [
{
'name': 'Unittest Jacobs',
'installed': True,
'permissions': []
}
]
new_content = _clean_test_user_fixture(fixture_content, test_users)
self.assertEquals(expected_content, new_content)
def test_clean_test_user_fixture_2(self):
fixture_content = """[
{
"pk": "Unittest Smith",
"model": "facetools.testuser",
"fields": {
"access_token": "AAAESR2HSywMBAAZBgFVWohGpg0XtALkfga09fF4mZBwhtF2q0ORpYJ7tJdZBEj5cWw8wQzbMcZBZBFZAZBVuFnAIV7JxBaZAUAOOBa5a7e4Qrav5ZCndFWDmA",
"login_url": "https://www.facebook.com/platform/test_account_login.php?user_id=100003568662664&n=rx3Yb9ihtNlVHfT",
"facebook_id": "100003568662664"
}
},
{
"pk": "Unittest Jacobs",
"model": "facetools.testuser",
"fields": {
"access_token": "AAAESR2HSywMBAGQu1lzfZABZCCMq81JFPx4PP2KzR1IsLO7nZBTZCGU1szsdH2nn4aNmZB5FcvJcEDyv8Et9P8TDurZA2K522oJcYFEtETIAq6NrmKLbZBR",
"login_url": "https://www.facebook.com/platform/test_account_login.php?user_id=100003573522566&n=PB5kX2MF0VUJ2mn",
"facebook_id": "100003573522566"
}
}
]"""
expected_content = """[
{
"pk": "Unittest Smith",
"model": "facetools.testuser",
"fields": {
"access_token": "AAAESR2HSywMBAAZBgFVWohGpg0XtALkfga09fF4mZBwhtF2q0ORpYJ7tJdZBEj5cWw8wQzbMcZBZBFZAZBVuFnAIV7JxBaZAUAOOBa5a7e4Qrav5ZCndFWDmA",
"login_url": "https://www.facebook.com/platform/test_account_login.php?user_id=100003568662664&n=rx3Yb9ihtNlVHfT",
"facebook_id": "100003568662664"
}
}
]"""
test_users = [
{
'name': 'Unittest Smith',
'installed': True,
'permissions': []
}
]
new_content = _clean_test_user_fixture(fixture_content, test_users)
self.assertEquals(expected_content, new_content)
class FandjangoIntegrationTest(TestCase):
def _pre_setup(self):
sync_facebook_test_user.connect(fandjango.sync_facebook_test_user)
super(FandjangoIntegrationTest, self)._pre_setup()
def _post_teardown(self):
sync_facebook_test_user.disconnect(fandjango.sync_facebook_test_user)
super(FandjangoIntegrationTest, self)._post_teardown()
def tearDown(self):
for test_user in TestUser.objects.all():
test_user.delete() # should also delete facebook test user through delete method override
def test_fandjango_users_created_correctly(self):
from test_project.testapp3.facebook_test_users import facebook_test_users as t3
facebook_test_users = t3()
self.assertTrue(not all([u['installed'] for u in facebook_test_users])) # make sure all the users aren't set to have the app installed
management.call_command('sync_facebook_test_users', 'testapp3')
# Get the test user data from facebook
test_users_url = "https://graph.facebook.com/%s/accounts/test-users?access_token=%s" % (settings.FACEBOOK_APPLICATION_ID, _get_app_access_token())
test_users = _merge_with_facebook_data(facebook_test_users, json.loads(requests.get(test_users_url).content)['data'], _get_app_access_token())
# Make sure only the test users that have the app installed have correpsonding Fandjango User records
self.assertEquals(2, User.objects.count())
for test_user in test_users:
if test_user['installed']:
user = User.objects.get(facebook_id=int(test_user['graph_user_data']['id']))
self.assertEquals(test_user['access_token'], user.oauth_token.token)
else:
self.assertEquals(0, User.objects.filter(facebook_id=int(test_user['graph_user_data']['id'])).count())
def _merge_with_facebook_data(facebook_test_users, graph_test_users, access_token):
"""
Creates a copy of the facebook_test_users dictionary, attaching each test user's user and permission data
from the open graph api.
"""
# Merge open graph data with the original facebook_test_users dictionary
facebook_test_users = copy.deepcopy(facebook_test_users)
for graph_test_user in graph_test_users:
if 'id' in graph_test_user:
facebook_id = graph_test_user['id']
test_user_url = "https://graph.facebook.com/%s?access_token=%s" % (facebook_id, access_token)
permissions_url = "https://graph.facebook.com/%s/permissions?access_token=%s" % (facebook_id, access_token)
user_data = json.loads(requests.get(test_user_url).content)
permissions_data = json.loads(requests.get(permissions_url).content)
for facebook_test_user in facebook_test_users:
if user_data and 'name' in user_data and facebook_test_user['name'] == user_data['name']:
facebook_test_user['access_token'] = graph_test_user.get('access_token')
facebook_test_user['graph_user_data'] = user_data
facebook_test_user['graph_user_data']['login_url'] = graph_test_user['login_url']
facebook_test_user['graph_permission_data'] = permissions_data if 'data' in permissions_data else None
# Remove any test users that didn't recieve any data from open graph
test_users = []
for test_user in facebook_test_users:
if 'graph_user_data' in test_user and 'graph_permission_data' in test_user:
test_users.append(test_user)
return test_users
def _has_access_code(access_code):
return access_code is not None and len(access_code) > 0
def _get_friends_on_facebook(test_user):
friends_url = "https://graph.facebook.com/%s/friends?access_token=%s" % (test_user['graph_user_data']['id'], _get_app_access_token())
friends_data = json.loads(requests.get(friends_url).content)
friends = {}
if type(friends_data) is not bool and 'data' in friends_data:
for friend in friends_data['data']:
friends[friend['name']] = int(friend['id'])
assert len(friends) == len(friends_data['data'])
return friends
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Uhd Adsb 5
# Generated: Sat May 27 23:53:23 2017
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import adsb
import mac
import sip
import sys
from gnuradio import qtgui
class uhd_adsb_5(gr.top_block, Qt.QWidget):
def __init__(self, dc_block_len=4):
gr.top_block.__init__(self, "Uhd Adsb 5")
Qt.QWidget.__init__(self)
self.setWindowTitle("Uhd Adsb 5")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "uhd_adsb_5")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Parameters
##################################################
self.dc_block_len = dc_block_len
##################################################
# Variables
##################################################
self.thresh = thresh = .1
self.samp_rate = samp_rate = 2e6
self.rx_gain = rx_gain = 40
self.freq = freq = 1090e6
self.alpha = alpha = .15
##################################################
# Message Queues
##################################################
adsb_decoder_0_msgq_out = blocks_message_source_0_msgq_in = gr.msg_queue(2)
adsb_decoder_0_msgq_out = mac_packet_to_pdu_0_msgq_in = gr.msg_queue(2)
adsb_framer_0_msgq_out = adsb_decoder_0_msgq_in = gr.msg_queue(2)
##################################################
# Blocks
##################################################
self._thresh_tool_bar = Qt.QToolBar(self)
self._thresh_tool_bar.addWidget(Qt.QLabel("thresh"+": "))
self._thresh_line_edit = Qt.QLineEdit(str(self.thresh))
self._thresh_tool_bar.addWidget(self._thresh_line_edit)
self._thresh_line_edit.returnPressed.connect(
lambda: self.set_thresh(eng_notation.str_to_num(str(self._thresh_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._thresh_tool_bar, 2,4,1,2)
self._rx_gain_tool_bar = Qt.QToolBar(self)
self._rx_gain_tool_bar.addWidget(Qt.QLabel("rx_gain"+": "))
self._rx_gain_line_edit = Qt.QLineEdit(str(self.rx_gain))
self._rx_gain_tool_bar.addWidget(self._rx_gain_line_edit)
self._rx_gain_line_edit.returnPressed.connect(
lambda: self.set_rx_gain(eng_notation.str_to_num(str(self._rx_gain_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._rx_gain_tool_bar, 0,4,1,2)
self.qtgui_time_sink_x_1 = qtgui.time_sink_f(
1024/8, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_1.set_update_time(0.10)
self.qtgui_time_sink_x_1.set_y_axis(-1, 1)
self.qtgui_time_sink_x_1.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_1.enable_tags(-1, True)
self.qtgui_time_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_1.enable_autoscale(True)
self.qtgui_time_sink_x_1.enable_grid(False)
self.qtgui_time_sink_x_1.enable_axis_labels(True)
self.qtgui_time_sink_x_1.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_1.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_1.set_line_label(i, labels[i])
self.qtgui_time_sink_x_1.set_line_width(i, widths[i])
self.qtgui_time_sink_x_1.set_line_color(i, colors[i])
self.qtgui_time_sink_x_1.set_line_style(i, styles[i])
self.qtgui_time_sink_x_1.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_1_win = sip.wrapinstance(self.qtgui_time_sink_x_1.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_time_sink_x_1_win)
self.qtgui_time_sink_x_0_0 = qtgui.time_sink_f(
256, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0.set_update_time(0.010)
self.qtgui_time_sink_x_0_0.set_y_axis(0, 1.2)
self.qtgui_time_sink_x_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_TAG, qtgui.TRIG_SLOPE_POS, .00001, .00002, 0, "adsb_preamble")
self.qtgui_time_sink_x_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_0_0.disable_legend()
labels = ['pre', 'post', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_0_win, 4,0,1,4)
self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
256, #size
samp_rate, #samp_rate
"", #name
2 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.010)
self.qtgui_time_sink_x_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, .1, .00002, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['pre', 'post', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_win, 0,0,4,1)
self.qtgui_time_raster_sink_x_0 = qtgui.time_raster_sink_b(
samp_rate,
20,
256,
([]),
([]),
"",
1,
)
self.qtgui_time_raster_sink_x_0.set_update_time(0.10)
self.qtgui_time_raster_sink_x_0.set_intensity_range(0, 128)
self.qtgui_time_raster_sink_x_0.enable_grid(False)
self.qtgui_time_raster_sink_x_0.enable_axis_labels(True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_raster_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_raster_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_raster_sink_x_0.set_color_map(i, colors[i])
self.qtgui_time_raster_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_raster_sink_x_0_win = sip.wrapinstance(self.qtgui_time_raster_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_time_raster_sink_x_0_win)
self.mac_packet_to_pdu_0 = mac.packet_to_pdu(msgq=mac_packet_to_pdu_0_msgq_in, dewhiten=False, output_invalid=False)
self.digital_correlate_access_code_tag_xx_0 = digital.correlate_access_code_tag_bb('1010000101000000', 0, 'adsb_preamble')
self.digital_binary_slicer_fb_0 = digital.binary_slicer_fb()
self.dc_blocker_xx_0 = filter.dc_blocker_ff(4, True)
self.blocks_uchar_to_float_1 = blocks.uchar_to_float()
self.blocks_uchar_to_float_0 = blocks.uchar_to_float()
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_socket_pdu_0 = blocks.socket_pdu("TCP_SERVER", '127.0.0.1', '52001', 10000, False)
self.blocks_message_source_0 = blocks.message_source(gr.sizeof_char*1, blocks_message_source_0_msgq_in)
self.blocks_message_debug_0 = blocks.message_debug()
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, '/home/zleffke/workspace/captures/adsb/adsb_20161212_2M.32fc', False)
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_char*1, '/dev/stdout', True)
self.blocks_file_sink_0.set_unbuffered(True)
self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(1)
self._alpha_tool_bar = Qt.QToolBar(self)
self._alpha_tool_bar.addWidget(Qt.QLabel("alpha"+": "))
self._alpha_line_edit = Qt.QLineEdit(str(self.alpha))
self._alpha_tool_bar.addWidget(self._alpha_line_edit)
self._alpha_line_edit.returnPressed.connect(
lambda: self.set_alpha(eng_notation.str_to_num(str(self._alpha_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._alpha_tool_bar, 1,4,1,2)
self.adsb_framer_0 = adsb.framer(tx_msgq=adsb_framer_0_msgq_out)
self.adsb_decoder_0 = adsb.decoder(rx_msgq=adsb_decoder_0_msgq_in,tx_msgq=adsb_decoder_0_msgq_out,output_type="hex",check_parity=False)
##################################################
# Connections
##################################################
self.msg_connect((self.mac_packet_to_pdu_0, 'pdu'), (self.blocks_message_debug_0, 'print_pdu'))
self.msg_connect((self.mac_packet_to_pdu_0, 'pdu'), (self.blocks_socket_pdu_0, 'pdus'))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.dc_blocker_xx_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.qtgui_time_sink_x_0, 1))
self.connect((self.blocks_file_source_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_message_source_0, 0), (self.blocks_file_sink_0, 0))
self.connect((self.blocks_message_source_0, 0), (self.blocks_uchar_to_float_1, 0))
self.connect((self.blocks_message_source_0, 0), (self.qtgui_time_raster_sink_x_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.blocks_complex_to_mag_squared_0, 0))
self.connect((self.blocks_uchar_to_float_0, 0), (self.qtgui_time_sink_x_0_0, 0))
self.connect((self.blocks_uchar_to_float_1, 0), (self.qtgui_time_sink_x_1, 0))
self.connect((self.dc_blocker_xx_0, 0), (self.digital_binary_slicer_fb_0, 0))
self.connect((self.dc_blocker_xx_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.digital_correlate_access_code_tag_xx_0, 0))
self.connect((self.digital_correlate_access_code_tag_xx_0, 0), (self.adsb_framer_0, 0))
self.connect((self.digital_correlate_access_code_tag_xx_0, 0), (self.blocks_uchar_to_float_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "uhd_adsb_5")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_dc_block_len(self):
return self.dc_block_len
def set_dc_block_len(self, dc_block_len):
self.dc_block_len = dc_block_len
def get_thresh(self):
return self.thresh
def set_thresh(self, thresh):
self.thresh = thresh
Qt.QMetaObject.invokeMethod(self._thresh_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.thresh)))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_time_sink_x_1.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
Qt.QMetaObject.invokeMethod(self._rx_gain_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.rx_gain)))
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
def get_alpha(self):
return self.alpha
def set_alpha(self, alpha):
self.alpha = alpha
Qt.QMetaObject.invokeMethod(self._alpha_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.alpha)))
def argument_parser():
parser = OptionParser(usage="%prog: [options]", option_class=eng_option)
parser.add_option(
"", "--dc-block-len", dest="dc_block_len", type="eng_float", default=eng_notation.num_to_str(4),
help="Set dc_block_len [default=%default]")
return parser
def main(top_block_cls=uhd_adsb_5, options=None):
if options is None:
options, _ = argument_parser().parse_args()
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls(dc_block_len=options.dc_block_len)
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
|
###
### generate_schedules.py
###
import os
import sys
import csv
import random
import datetime
from utils import LogWarning, LogMessage
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webApp.settings")
django.setup()
from django.shortcuts import render
from django.db import transaction
from pdfManage.models import NSFInvestigator, NSFProject, ProjectPIs, CollabProjects, \
Organizations, Programs, Words, Institutions, \
RapidMeeting, PosterPresenters, BreakoutParticipant
import json
from pprint import pprint
def generate_row(time, room, event):
return "\\row{" + time + "}{" + room + "}{" + event + "}\n"
def generate_brow(time, room, event):
return "\\brow{" + time + "}{" + room + "}{" + event + "}\n"
def generate_prow(time, room, event):
return "\\prow{" + time + "}{" + room + "}{" + event + "}\n"
def generate_block(msg):
return "\\end{tabular} \\begin{addmargin}[2em]{2em}\\small\n" + msg + "\\end{addmargin} \\begin{tabular}{x{1.5in}x{1.5in}x{3.5in}}"
def latex_schedule(pi, rm, poster, breakout1, breakout2, breakout3,
breakoutleader, breakout1leader, breakout2leader, breakout3leader):
LogMessage("Schedule for: " + pi.email + " . " + str(pi.id))
msg = """
\\documentclass[11pt, letterpaper]{article}
\\input{preamble.tex}
\\begin{document}
\\begin{center}
"""
msg += "{\\colorbox{black}{\\parbox{\\dimexpr\\textwidth-2\\fboxsep\\relax}{\\centering\\textcolor{white}{\\quad {\\large Schedule for {\\bf " + pi.displayName() + "}}}}}}\n"
msg += """
\\end{center}
Welcome to the National Science Foundation Secure and Trustworthy
Cyberspace (SaTC) Principal Investigators' Meeting! Your personal
schedule is below. See the conference program or website for more
details. You can also find a web version of your personal schedule
at: \\begin{center}\\url{http://www.rematchr.org/schedule/"""
msg += pi.email + "}\\end{center}\n\n"
msg += "\\bday{Monday 5 January 2015}{\n"
msg += generate_row('8:30-9:15am', 'Regency EF', 'Welcome: Jim Kurose and Jeryl Mumpower (NSF)')
msg += generate_row('9:15-10:30am', 'Regency EF', 'Keynote: \\ptitle{What\'s on the Horizon?}, Latanya Sweeney (Harvard University)')
msg += generate_brow('10:30-11:00am','Regency Foyer','Break')
msg += generate_row('11am-12:15pm','Regency EF','Panel: \\ptitle{Ethics in Security and Privacy Research} (Michael Bailey, Lujo Bauer, Stevan Savage; Rahul Telang)')
msg += generate_brow('12:15-1:30pm','Independence Center','Lunch')
if breakout1:
msg += generate_prow('1:30-3:00pm', breakout1.location, 'Breakout ' + str(breakout1.number) + ": \\ptitle{" + breakout1.title + '}')
if breakout1leader:
print "**** BREAKOUT LEADER ****"
msg += generate_block('Thank you for leading this breakout discussion. You should find the room set up with a projector and screen, flip chart, and power strips. If you need anything else for your breakout, please let the conference staff know right away.')
else:
msg += generate_row('1:30-3:00pm','Regency EF', 'Panel: \\title{Educating Everyone} (Diana Burley, Shriram Krishnamurthi, Zachary Peterson; Victor Piotrowski)')
msg += generate_brow('3:00-3:30pm','Regency Foyer','Break')
if rm:
msg += generate_prow('3:30-5:15pm', '{\\em various locations}',
'Rapid-Fire Cross Collaborations')
for rfm in rm:
if rfm.round == 1: time = "3:30-3:50pm"
elif rfm.round == 2: time = "3:55-4:15pm"
elif rfm.round == 3: time = "4:20-4:40pm"
elif rfm.round == 4: time = "4:45-5:15pm"
else: assert False
msg += generate_row(time, rfm.latexLocation(), ', '.join([pi.displayNameInst() for pi in rfm.getParticipants()]))
msg += generate_block("""
The goal of the rapid-fire cross collaborations is to instigate
discussions that would not otherwise happen and that may lead to
useful collaborations, or at least gaining insights and contacts. The
matches were generated according to several different criteria: (1)
expertise-interest compatibility (based on registration forms you
submited), (2) selfishly serving one individuals interests as well as
possible, (3) similar topics based on text analysis of NSF abstracts,
and (4) dis-similar topics based on text analysis of NSF abstract.
The matches you have may not cover all of these criteria, they are
selected to maximize some overall metrics. After the meeting, you'll
be able to see why you were matched for each meeting round. If you
are at a loss for how to get a conversation going, you can try to
figure out which of the four criteria were the reason you were
matched.
""")
else:
msg += generate_block("No rapid-fire cross collaborations scheduled. If you would like to participate in ad-hoc rapid matching, go to headquarters in Regency EF.")
if poster:
msg += generate_prow('6:00-8:00pm', 'Independence Center', 'Present poster: \\ptitle{' + poster.title + '}')
msg += generate_block("Easels and poster boards will be provided. Poster locations are not assigned; you may set up your poster at any open location.")
else:
msg += generate_row('6:00-8:00pm', 'Independence Center', 'Poster Session and Reception')
msg += "} \n" # end dayschedule
msg += "\\clearpage\n"
msg += "\\bday{Tuesday 6 January 2015}{\n"
msg += generate_row('8:30-9:00am', 'Regency EF', 'Opening: Keith Marzullo and Michael Vogelius (NSF)')
msg += generate_row('9:00-9:15am', 'Regency EF', 'SaTC Programmatics, Jeremy Epstein (NSF)')
msg += generate_row('9:15-10:30am','Regency EF', 'Keynote: \\ptitle{T.S. Kuhn Revisited}, Dan Geer (In-Q-Tel)')
msg += generate_brow('10:30-11:00am','Regency Foyer','Break')
msg += generate_row('11am-12:15pm', 'Regency EF', 'Panel: Ideas to Innovations (Farnam Jahanian, Angelos Stavrou, Giovanni Vigna; Patrick Traynor)')
msg += generate_brow('12:15-1:30pm', 'Independence Center', 'Lunch')
if breakout2:
msg += generate_prow('1:30-3:00pm', breakout2.location, 'Breakout ' + str(breakout2.number) + ": \\ptitle{" + breakout2.title + '}')
if breakout2leader:
print "**** BREAKOUT LEADER ****"
msg += generate_block('Thank you for leading this breakout discussion. You should find the room set up with a projector and screen, flip chart, and power strips. If you need anything else for your breakout, please let the conference staff know right away.')
else:
msg += generate_row('1:30-3:00pm', 'Regency EF', 'Panel: \\ptitle{Security and Privacy Challenges in Health Informatics} (Xiaoqian Jiang, David Kotz, XiaoFeng Wang; Elaine Shi)')
msg += generate_brow('3:00-3:30pm','Regency Foyer', 'Break')
if breakout3:
msg += generate_prow('1:30-3:00pm', breakout3.location, 'Breakout ' + str(breakout3.number) + ": \\ptitle{" + breakout3.title + '}')
if breakout3leader:
print "**** BREAKOUT LEADER ****"
msg += generate_block('Thank you for leading this breakout discussion. You should find the room set up with a projector and screen, flip chart, and power strips. If you need anything else for your breakout, please let the conference staff know right away.')
else:
msg += generate_row('3:30-5:00pm','Regency EF', 'Panel: \\ptitle{Future of Privacy} (Tamara Denning, Apu Kapadia, Arvind Narayanan; Christopher Clifton)')
msg += generate_row('5:30-7:30pm','Potomac Rooms','Birds-of-a-Feather Sessions (signup to host a BoF at the registration desk)')
msg += "} \n" # end dayschedule
msg += "\\par \n \\par \n"
msg += "\\bday{Wednesday 7 January 2015}{\n"
msg += generate_row('8:30-9:15am','Regency EF','Opening: Pramod Khargonekar and Joan Ferrini-Mundy (NSF)')
msg += generate_row('9:15-10:30am','Regency EF','Keynote: \\ptitle{The Coming Design Wars}, Deirdre Mulligan (UC Berkeley)')
if breakoutleader:
msg += generate_block('As breakout leader, you or someone else representing your breakout should be prepared to present a 4-minute report on your breakout in the 11am session. Please send slides for this as a simple PowerPoint file with no fancy template to {\\tt [email protected]} or get them to me on a USB stick before 10:30am (preferably earlier!). Breakout leaders will present in order by breakout number, and the slides will be combined to avoid presentation transitions.')
msg += generate_brow('10:30-11:00am','Regency Foyer','Break')
msg += generate_row('11am-12:15pm', 'Regency EF', 'Results of Breakout Discussions')
msg += generate_row('12:15-1:30pm', 'Independence Center', 'Lunch')
msg += generate_row('1:30-2:45pm', 'Regency EF', 'Panel: \\ptitle{SaTC 2029?} (Carl Landwehr, Patrick McDaniel, Amit Sahai; David Evans)')
msg += generate_row('2:45-3:00pm','Regency EF','Closing')
msg += "} \n" # end dayschedule
msg += "\\end{document}"
return msg
def schedule(pi):
rm = RapidMeeting.objects.filter(pi1=pi) | RapidMeeting.objects.filter(pi2=pi) | RapidMeeting.objects.filter(pi3=pi) | RapidMeeting.objects.filter(pi4=pi)
rm = rm.order_by('round')
# LogMessage("rapid meeting 1: " + ' / '.join([m.displayMeeting() for m in rm]))
# lots of sanity checking to do here...
posterobj = PosterPresenters.objects.filter(presenter=pi)
if posterobj:
poster = posterobj[0].poster
else:
poster = None
breakout1 = None
breakout2 = None
breakout3 = None
breakoutleader = False
breakout1leader = False
breakout2leader = False
breakout3leader = False
breakouts = BreakoutParticipant.objects.filter(pi=pi)
for bobj in breakouts:
breakout = bobj.breakout
leader = bobj.leader
if leader:
breakoutleader = True
if breakout.session == 1:
assert not breakout1
breakout1 = breakout
breakout1leader = leader
elif breakout.session == 2:
assert not breakout2
breakout2 = breakout
breakout2leader = leader
elif breakout.session == 3:
assert not breakout3
breakout3 = breakout
breakout3leader = leader
else:
LogWarning("Bad breakout session: " + breakout.title)
return latex_schedule(pi, rm, poster, breakout1, breakout2, breakout3, breakoutleader,
breakout1leader, breakout2leader, breakout3leader)
def generate_schedule(no, attendee):
msg = schedule(attendee)
fname = 'schedules/schedule-' + "%03d" % no + "-" + str(attendee.id) + '.tex'
print "Writing schedule for " + attendee.displayName() + " to " + fname + "..."
with open(fname, "w") as file:
file.write(msg)
def generate_schedules():
attendees = NSFInvestigator.objects.filter(attendee=True).order_by('lastname','firstname')
count = 1
for attendee in attendees:
if not attendee.email:
LogMessage("Bad pi: " + str(attendee.id))
continue
generate_schedule(count, attendee)
count += 1
# if count > 100:
# break # one for now
if __name__ == "__main__":
generate_schedules()
|
|
# Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Impala tests for Hive Metastore, covering the expected propagation
# of metadata from Hive to Impala or Impala to Hive. Each test
# modifies the metadata via Hive and checks that the modification
# succeeded by querying Impala, or vice versa.
#
# TODO: For each test, verify all the metadata available via Hive and
# Impala, in all the possible ways of validating that metadata.
import logging
import pytest
import random
import shlex
import string
import subprocess
from tests.common.test_result_verifier import *
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
from tests.common.skip import SkipIfS3, SkipIfIsilon
@SkipIfS3.hive
@SkipIfIsilon.hive
class TestHmsIntegration(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHmsIntegration, cls).add_test_dimensions()
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
cls.TestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def run_stmt_in_hive(self, stmt):
"""
Run a statement in Hive, returning stdout if successful and throwing
RuntimeError(stderr) if not.
"""
call = subprocess.Popen(
['beeline',
'--outputformat=csv2',
'-u', 'jdbc:hive2://' + pytest.config.option.hive_server2,
'-n', getuser(),
'-e', stmt],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = call.communicate()
call.wait()
if call.returncode != 0:
raise RuntimeError(stderr)
return stdout
class ImpalaDbWrapper(object):
"""
A wrapper class for using `with` guards with databases created through
Impala ensuring deletion even if an exception occurs.
"""
def __init__(self, impala, db_name):
self.impala = impala
self.db_name = db_name
def __enter__(self):
self.impala.client.execute(
'create database if not exists ' + self.db_name)
return self.db_name
def __exit__(self, typ, value, traceback):
self.impala.cleanup_db(self.db_name)
class ImpalaTableWrapper(object):
"""
A wrapper class for using `with` guards with tables created through Impala
ensuring deletion even if an exception occurs.
"""
def __init__(self, impala, table_name, table_spec):
self.impala = impala
self.table_name = table_name
self.table_spec = table_spec
def __enter__(self):
self.impala.client.execute(
'create table if not exists %s %s' %
(self.table_name, self.table_spec))
return self.table_name
def __exit__(self, typ, value, traceback):
self.impala.client.execute('drop table if exists %s' % self.table_name)
class HiveDbWrapper(object):
"""
A wrapper class for using `with` guards with databases created through Hive
ensuring deletion even if an exception occurs.
"""
def __init__(self, hive, db_name):
self.hive = hive
self.db_name = db_name
def __enter__(self):
self.hive.run_stmt_in_hive(
'create database if not exists ' + self.db_name)
return self.db_name
def __exit__(self, typ, value, traceback):
self.hive.run_stmt_in_hive(
'drop database if exists %s cascade' % self.db_name)
class HiveTableWrapper(object):
"""
A wrapper class for using `with` guards with tables created through Hive
ensuring deletion even if an exception occurs.
"""
def __init__(self, hive, table_name, table_spec):
self.hive = hive
self.table_name = table_name
self.table_spec = table_spec
def __enter__(self):
self.hive.run_stmt_in_hive(
'create table if not exists %s %s' %
(self.table_name, self.table_spec))
return self.table_name
def __exit__(self, typ, value, traceback):
self.hive.run_stmt_in_hive('drop table if exists %s' % self.table_name)
def impala_table_stats(self, table):
"""Returns a dictionary of stats for a table according to Impala."""
output = self.client.execute('show table stats %s' % table).get_data()
output_lines = output.split('\n')
result = {}
for line in output_lines:
parts = line.split('\t')
stats = {}
stats['location'] = parts[-1]
stats['incremental stats'] = parts[-2]
stats['format'] = parts[-3]
stats['cache replication'] = parts[-4]
stats['bytes cached'] = parts[-5]
stats['size'] = parts[-6]
stats['#files'] = parts[-7]
stats['#rows'] = parts[-8]
result[tuple(parts[:-8])] = stats
return result
def impala_all_column_stats(self, table):
"""Returns a dictionary of stats for columns according to Impala."""
output = self.client.execute('show column stats %s' % table).get_data()
output_lines = output.split('\n')
result = {}
for line in output_lines:
stats = line.split('\t')
attributes = {}
attributes['type'] = stats[1]
attributes['ndv'] = stats[2]
attributes['#nulls'] = stats[3]
attributes['max size'] = stats[4]
attributes['avg size'] = stats[5]
result[stats[0]] = attributes
return result
def hive_column_stats(self, table, column):
"""Returns a dictionary of stats for a column according to Hive."""
output = self.run_stmt_in_hive(
'describe formatted %s %s' %
(table, column))
result = {}
output_lines = output.split('\n')
stat_names = map(string.strip, output_lines[0].split(','))
stat_values = output_lines[3].split(',')
assert len(stat_names) == len(stat_values)
for i in range(0, len(stat_names)):
result[stat_names[i]] = stat_values[i]
return result
def impala_partition_names(self, table_name):
"""Find the names of the partitions of a table, as Impala sees them.
The return format is a list of lists of strings. Each string represents
a partition value of a given column.
"""
rows = self.client.execute('show partitions %s' %
table_name).get_data().split('\n')
rows.pop()
result = []
for row in rows:
fields = row.split('\t')
name = fields[0:-8]
result.append(name)
return result
def hive_partition_names(self, table_name):
"""Find the names of the partitions of a table, as Hive sees them.
The return format is a list of strings. Each string represents a partition
value of a given column in a format like 'column1=7/column2=8'.
"""
return self.run_stmt_in_hive(
'show partitions %s' % table_name).split('\n')[1:-1]
def impala_columns(self, table_name):
"""
Returns a dict with column names as the keys and dicts of type and comments
as the values.
"""
columns = self.client.execute('describe %s' %
table_name).get_data().split('\n')
result = {}
for column in columns:
attributes = column.split('\t')
result[attributes[0]] = {'type': attributes[1], 'comment': attributes[2]}
return result
def hive_columns(self, table_name):
"""
Returns a dict with column names as the keys and dicts of types and
comments as the values.
"""
columns = self.run_stmt_in_hive(
'describe %s' % table_name).split('\n')[1:-1]
result = {}
for column in columns:
attributes = column.split(',')
result[attributes[0]] = {'type': attributes[1], 'comment': attributes[2]}
return result
def unique_string(self):
return ''.join([random.choice(string.ascii_lowercase)
for i in range(0, 16)])
def assert_sql_error(self, engine, command, *strs_in_error):
reached_unreachable = False
try:
engine(command)
reached_unreachable = True
except Exception as e:
for str_in_error in strs_in_error:
assert str_in_error in str(e)
if reached_unreachable:
assert False, '%s should have triggered an error containing %s' % (
command, strs_in_error)
@pytest.mark.execute_serially
def test_hive_db_hive_table_add_partition(self, vector):
self.add_hive_partition_helper(vector, self.HiveDbWrapper,
self.HiveTableWrapper)
@pytest.mark.execute_serially
def test_hive_db_impala_table_add_partition(self, vector):
self.add_hive_partition_helper(vector, self.HiveDbWrapper,
self.ImpalaTableWrapper)
@pytest.mark.execute_serially
def test_impala_db_impala_table_add_partition(self, vector):
self.add_hive_partition_helper(vector, self.ImpalaDbWrapper,
self.ImpalaTableWrapper)
@pytest.mark.execute_serially
def test_impala_db_hive_table_add_partition(self, vector):
self.add_hive_partition_helper(vector, self.ImpalaDbWrapper,
self.HiveTableWrapper)
@pytest.mark.xfail(run=False, reason="This is a bug: IMPALA-2426")
@pytest.mark.execute_serially
def test_incremental_stats_new_partition(self, vector):
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int)') as table_name:
self.client.execute('insert into table %s partition (y=42) values (2)'
% table_name)
self.run_stmt_in_hive('alter table %s add partition (y = 333)'
% table_name)
self.client.execute('compute incremental stats %s' % table_name)
table_stats = self.impala_table_stats(table_name)
assert 'true' == table_stats[('333',)]['incremental stats']
assert '0' == table_stats[('333',)]['#rows']
assert '0' == table_stats[('333',)]['#files']
def add_hive_partition_helper(self, vector, DbWrapper, TableWrapper):
"""
Partitions added in Hive can be viewed in Impala after computing stats.
"""
with DbWrapper(self, self.unique_string()) as db_name:
self.client.execute('invalidate metadata')
with TableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int, z int)') as table_name:
# Invalidate metadata so Impala can see the table
self.client.execute('invalidate metadata')
self.run_stmt_in_hive(
'alter table %s add partition (y = 333, z = 5309)' %
table_name)
self.client.execute('compute incremental stats %s' % table_name)
# Impala can see the partition's name
assert [['333', '5309']] == self.impala_partition_names(table_name)
# Impala's compute stats didn't alter Hive's knowledge of the partition
assert ['y=333/z=5309'] == self.hive_partition_names(table_name)
self.add_hive_partition_table_stats_helper(vector, DbWrapper, TableWrapper)
def add_hive_partition_table_stats_helper(
self, vector, DbWrapper, TableWrapper):
"""
Partitions added in Hive don't make Impala's table stats incorrect.
"""
# TODO: check the same thing with column stats
with DbWrapper(self, self.unique_string()) as db_name:
self.client.execute('invalidate metadata')
with TableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int, z int)') as table_name:
# Invalidate metadata so Impala can see the table
self.client.execute('invalidate metadata')
self.client.execute(
'insert into table %s partition (y=42, z=867) values (2)'
% table_name)
self.client.execute('compute incremental stats %s' % table_name)
impala_table_stats = self.impala_table_stats(table_name)
self.run_stmt_in_hive(
'alter table %s add partition (y = 333, z = 5309)' %
table_name)
self.client.execute('compute incremental stats %s' % table_name)
assert impala_table_stats[
('42', '867')] == self.impala_table_stats(table_name)[
('42', '867')]
@pytest.mark.execute_serially
def test_add_impala_partition(self, vector):
"""
Partitions added in Impala can be viewed in Hive immediately
"""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int, z int)') as table_name:
self.client.execute(
'insert into table %s partition (y=42, z=867) values (2)'
% table_name)
assert [['42', '867']] == self.impala_partition_names(table_name)
assert ['y=42/z=867'] == self.hive_partition_names(table_name)
@pytest.mark.execute_serially
def test_drop_column_maintains_stats(self, vector):
"""
Dropping a column in Impala doesn't alter the stats of other columns in Hive
or Impala.
"""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int, z int)') as table_name:
self.run_stmt_in_hive('select * from %s' % table_name)
self.run_stmt_in_hive(
'use %s; analyze table %s compute statistics for columns' %
(db_name, table_name.split('.')[1]))
self.client.execute('compute stats %s' % table_name)
hive_x_stats = self.hive_column_stats(table_name, 'x')
hive_y_stats = self.hive_column_stats(table_name, 'y')
impala_stats = self.impala_all_column_stats(table_name)
self.client.execute('alter table %s drop column z' % table_name)
assert hive_x_stats == self.hive_column_stats(table_name, 'x')
assert hive_y_stats == self.hive_column_stats(table_name, 'y')
assert impala_stats['x'] == self.impala_all_column_stats(table_name)[
'x']
assert impala_stats['y'] == self.impala_all_column_stats(table_name)[
'y']
self.run_stmt_in_hive(
'alter table %s replace columns (x int)' %
table_name)
assert hive_x_stats == self.hive_column_stats(table_name, 'x')
assert impala_stats['x'] == self.impala_all_column_stats(table_name)[
'x']
@pytest.mark.execute_serially
def test_select_without_compute_stats(self, vector):
"""
Data added in Hive shows up in Impala 'select *', and if the table is not
partitioned, 'compute incremental stats' is not required.
"""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int)') as table_name:
# In the unpartitioned case, 'compute incremental stats' is not
# required.
self.run_stmt_in_hive(
'insert into table %s values (66)'
% table_name)
assert '66' == self.client.execute(
'select * from %s' % table_name).get_data()
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int)') as table_name:
assert [] == self.impala_partition_names(table_name)
self.run_stmt_in_hive(
'insert into table %s partition (y=33) values (44)'
% table_name)
self.client.execute('compute incremental stats %s' % table_name)
assert '44\t33' == self.client.execute(
'select * from %s' % table_name).get_data()
@pytest.mark.xfail(run=False, reason="This is a bug: IMPALA-2458")
@pytest.mark.execute_serially
def test_overwrite_added_column(self, vector):
"""
Impala can't overwrite Hive's column types, and vice versa.
"""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int)') as table_name:
inttype = {'comment': '', 'type': 'int'}
hive_expected = {'x': inttype, 'y': inttype}
impala_expected = {'x': inttype, 'y': inttype}
# Hive and Impala both know all columns:
assert hive_expected == self.hive_columns(table_name)
assert impala_expected == self.impala_columns(table_name)
# Add column in Hive but don't tell Impala
self.run_stmt_in_hive(
'alter table %s add columns (z int)' % table_name)
hive_expected['z'] = inttype
assert hive_expected == self.hive_columns(table_name)
# Overwriting an Hive-created column in Impala does not work
self.assert_sql_error(
self.client.execute,
'alter table %s add columns (z string)' %
table_name,
'Column already exists: z')
# Overwriting an Impala-created column in Hive does not work
self.client.execute(
'alter table %s add columns (v string)' % table_name)
self.assert_sql_error(
self.run_stmt_in_hive,
'alter table %s add columns (v string)' %
table_name,
'Duplicate column name: v')
@pytest.mark.execute_serially
def test_compute_stats_get_to_hive(self, vector):
"""Stats computed in Impala are also visible in Hive."""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int)') as table_name:
self.run_stmt_in_hive(
'insert into table %s values (33)' % table_name)
hive_stats = self.hive_column_stats(table_name, 'x')
impala_stats = self.client.execute('show column stats %s' % table_name)
self.client.execute('compute stats %s' % table_name)
assert impala_stats != self.client.execute(
'show column stats %s' % table_name)
assert hive_stats != self.hive_column_stats(table_name, 'x')
@pytest.mark.execute_serially
def test_compute_stats_get_to_impala(self, vector):
"""Column stats computed in Hive are also visible in Impala."""
with self.HiveDbWrapper(self, self.unique_string()) as db_name:
with self.HiveTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int)') as table_name:
hive_stats = self.hive_column_stats(table_name, 'x')
self.client.execute('invalidate metadata')
self.client.execute('refresh %s' % table_name)
impala_stats = self.impala_all_column_stats(table_name)
self.run_stmt_in_hive(
'insert into table %s values (33)' % table_name)
self.run_stmt_in_hive(
'use %s; analyze table %s compute statistics for columns' %
(db_name, table_name.split('.')[1]))
new_hive_stats = self.hive_column_stats(table_name, 'x')
assert hive_stats != new_hive_stats
assert '33' == new_hive_stats['min']
assert '33' == new_hive_stats['max']
assert '0' == new_hive_stats['num_nulls']
self.client.execute('refresh %s' % table_name)
new_impala_stats = self.impala_all_column_stats(table_name)
assert impala_stats != new_impala_stats
assert '0' == new_impala_stats['x']['#nulls']
@pytest.mark.execute_serially
def test_drop_partition(self, vector):
"""
Impala can see that a partitions was dropped by Hive by invalidating
metadata.
"""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int) partitioned by (y int)') as table_name:
self.run_stmt_in_hive(
'insert into table %s partition(y=33) values (44)' % table_name)
self.client.execute('compute stats %s' % table_name)
self.run_stmt_in_hive(
'alter table %s drop partition (y=33)' % table_name)
self.client.execute('invalidate metadata %s' % table_name)
assert '' == self.client.execute(
'select * from %s' % table_name).get_data()
@pytest.mark.execute_serially
def test_drop_column_with_data(self, vector):
"""Columns dropped by Hive are ignored in Impala 'select *'."""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int)') as table_name:
self.run_stmt_in_hive(
'insert into table %s values (33,44)' % table_name)
self.run_stmt_in_hive(
'alter table %s replace columns (x int)' % table_name)
assert '33' == self.client.execute(
'select * from %s' % table_name).get_data()
@pytest.mark.execute_serially
def test_add_column(self, vector):
"""Columns added in one engine are visible in the other via DESCRIBE."""
with self.ImpalaDbWrapper(self, self.unique_string()) as db_name:
with self.ImpalaTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int)') as table_name:
int_column = {'type': 'int', 'comment': ''}
expected = {'x': int_column}
assert expected == self.hive_columns(table_name)
assert expected == self.impala_columns(table_name)
self.client.execute('alter table %s add columns (y int)' % table_name)
expected['y'] = int_column
assert expected == self.hive_columns(table_name)
assert expected == self.impala_columns(table_name)
self.run_stmt_in_hive(
'alter table %s add columns (z int)' %
table_name)
self.client.execute('invalidate metadata %s' % table_name)
expected['z'] = int_column
assert expected == self.hive_columns(table_name)
assert expected == self.impala_columns(table_name)
@pytest.mark.execute_serially
def test_drop_database(self, vector):
"""
If a DB is created, then dropped, in Hive, Impala can create one with the
same name without invalidating metadata.
"""
test_db = self.unique_string()
with self.HiveDbWrapper(self, test_db) as db_name:
pass
self.assert_sql_error(
self.client.execute,
'create table %s.%s (x int)' %
(test_db,
self.unique_string()),
'Database does not exist: %s' %
test_db)
with self.ImpalaDbWrapper(self, test_db) as db_name:
pass
@pytest.mark.execute_serially
def test_table_format_change(self, vector):
"""
Hive storage format changes propagate to Impala.
"""
# TODO: check results of insert, then select * before and after
# storage format change.
with self.HiveDbWrapper(self, self.unique_string()) as db_name:
with self.HiveTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int) stored as parquet') as table_name:
self.client.execute('invalidate metadata')
self.client.execute('invalidate metadata %s' % table_name)
print self.impala_table_stats(table_name)
assert 'PARQUET' == self.impala_table_stats(table_name)[()]['format']
self.run_stmt_in_hive(
'alter table %s set fileformat avro' % table_name)
self.client.execute('invalidate metadata %s' % table_name)
assert 'AVRO' == self.impala_table_stats(table_name)[()]['format']
@pytest.mark.execute_serially
def test_change_column_type(self, vector):
"""Hive column type changes propagate to Impala."""
with self.HiveDbWrapper(self, self.unique_string()) as db_name:
with self.HiveTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int)') as table_name:
self.run_stmt_in_hive(
'insert into table %s values (33,44)' % table_name)
self.run_stmt_in_hive('alter table %s change y y string' % table_name)
assert '33,44' == self.run_stmt_in_hive(
'select * from %s' % table_name).split('\n')[1]
self.client.execute('invalidate metadata %s' % table_name)
assert '33\t44' == self.client.execute(
'select * from %s' % table_name).get_data()
assert 'string' == self.impala_columns(table_name)['y']['type']
@pytest.mark.execute_serially
def test_change_parquet_column_type(self, vector):
"""
Changing column types in Parquet doesn't work in Hive and it causes
'select *' to fail in Impala as well, after invalidating metadata. This is a
known issue with changing column types in Hive/parquet.
"""
with self.HiveDbWrapper(self, self.unique_string()) as db_name:
with self.HiveTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int) stored as parquet') as table_name:
self.run_stmt_in_hive(
'insert into table %s values (33,44)' % table_name)
assert '33,44' == self.run_stmt_in_hive(
'select * from %s' % table_name).split('\n')[1]
self.client.execute('invalidate metadata')
assert '33\t44' == self.client.execute(
'select * from %s' % table_name).get_data()
self.run_stmt_in_hive('alter table %s change y y string' % table_name)
self.assert_sql_error(
self.run_stmt_in_hive, 'select * from %s' %
table_name, 'Cannot inspect org.apache.hadoop.io.IntWritable')
self.client.execute('invalidate metadata %s' % table_name)
self.assert_sql_error(
self.client.execute,
'select * from %s' %
table_name,
"Column type: STRING, Parquet schema:")
@pytest.mark.execute_serially
def test_change_table_name(self, vector):
"""
Changing the table name in Hive propagates to Impala after 'invalidate
metadata'.
"""
with self.HiveDbWrapper(self, self.unique_string()) as db_name:
with self.HiveTableWrapper(self, db_name + '.' + self.unique_string(),
'(x int, y int)') as table_name:
self.client.execute('invalidate metadata')
int_column = {'type': 'int', 'comment': ''}
expected_columns = {'x': int_column, 'y': int_column}
assert expected_columns == self.impala_columns(table_name)
new_name = table_name + '2'
self.run_stmt_in_hive('alter table %s rename to %s' %
(table_name, new_name))
self.client.execute('invalidate metadata')
assert expected_columns == self.impala_columns(new_name)
self.assert_sql_error(self.client.execute,
'describe %s' % table_name,
'Could not resolve path')
|
|
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = open(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
width: 0px;
overflow: hidden;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = open(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
""" % (ref_name, snippet))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_mngr.num)
plt.savefig(image_path % fig_mngr.num)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
example_code_obj = identify_names(open(example_file).read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
|
|
import os
import sys
import pickle
import datetime
from utils import LogWarning, LogMessage
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webApp.settings")
django.setup()
from django.db import transaction
from forNSF.wordcounts import process_text
from webApp.models import NSFInvestigator, NSFProject, ProjectPIs, CollabProjects, \
Organizations, Programs, Words, Institutions
from nsfmodels import Institution
def loadInvestigators(file):
LogMessage("Loading investigators: " + file)
pis = None
with open(file, 'r') as f:
pis = pickle.load(f)
count = 0
total = len(pis.all())
transaction.set_autocommit(True)
with transaction.atomic():
for pi in pis.all():
current = NSFInvestigator.objects.filter(email = pi.email)
if current:
# update institution
if len(current) == 1:
piobj = current[0]
update = False
if not piobj.firstname == pi.firstname:
piobj.firstname = pi.firstname
update = True
LogWarning("Updated firstname: " + pi.email + " -> " + pi.firstname)
if not piobj.lastname == pi.lastname:
piobj.lastname = pi.lastname
update = True
LogWarning("Updated lastname: " + pi.email + " -> " + pi.lastname)
if update:
piobj.save()
else:
LogError("Multiple matching investigators for email: " + pi.email)
total -= 1
else:
NSFInvestigator.objects.create(email = pi.email, firstname = pi.firstname,
lastname = pi.lastname)
if count % 100 == 0:
LogMessage("... " + str(count) + " of " + str(total) + "...")
count += 1
LogMessage("Finished loading investigators: " + str(count))
def loadInstitutions(file):
LogMessage("Loading institutions: " + file)
pis = None
with open(file, 'r') as f:
institutions = pickle.load(f)
count = 0
total = len(institutions.keys())
transaction.set_autocommit(True)
with transaction.atomic():
for itag, institution in institutions.items():
match = lookupExactMatchingInstitution(institution)
if not match:
Institutions.objects.create(name = institution.name,
cityname = institution.cityname,
statename = institution.statename)
LogMessage("New institution: " + str(institution))
count += 1
if count % 100 == 0:
LogMessage("... " + str(count) + " of " + str(total) + "...")
LogMessage("Finished loading institutions: " + str(count))
def lookupExactMatchingInstitution(institution, quiet=False):
assert institution
institutions = Institutions.objects.filter(name = institution.name).filter(cityname = institution.cityname).filter(statename = institution.statename)
if len(institutions) == 1:
return institutions[0]
elif len(institutions) > 1:
LogWarning("Multiple matching institutions: " + institution.name)
return institutions[0]
else:
assert not institutions
return None
def lookupMatchingInstitution(institution, quiet=False):
assert institution
exact = lookupExactMatchingInstitution(institution)
if not exact:
LogWarning("Cannot find exact match for institution: " + str(institution))
institutions = Institutions.objects.filter(name = institution.name)
if len(institutions) >= 1:
LogWarning("Found approximate match: " + str(institutions[0]))
return institutions[0]
else:
return None
else:
return exact
def lookupMatchingPI(email):
if email:
pis = NSFInvestigator.objects.filter(email = email)
if len(pis) == 1:
pi = pis[0]
else:
if len(pis) == 0:
LogWarning("No matching pi for: " + email)
pi = None
else:
LogWarning("Multiple matching pis for: " + email)
pi = pis[0]
return pi
return None
def lookupMatchingProject(awardID):
project = NSFProject.objects.filter(awardID = awardID)
if len(project) == 1:
return project[0]
else:
if len(project) == 0:
LogWarning("No matching project for: " + awardID)
else:
LogWarning("Multiple matching project for: " + awardID)
return project[0]
def addWords(project, nsfproject):
wcounts = process_text(project.abstract)
with transaction.atomic():
for (word, count) in wcounts.items():
Words.objects.create(project = nsfproject, word = word, count = count, title = False)
with transaction.atomic():
for (word, count) in wcounts.items():
Words.objects.create(project = nsfproject, word = word, count = count, title = True)
def loadProjects(file):
projects = None
with open(file, 'r') as f:
projects = pickle.load(f)
count = 0
total = len(projects)
LogMessage("Loading projects: " + str(total))
transaction.set_autocommit(True)
with transaction.atomic():
for project in projects:
oldproject = NSFProject.objects.filter(awardID = project.awardID)
if oldproject:
LogWarning("Project duplicate: " + project.awardID)
oldobj = oldproject[0]
oldobj.delete()
else:
LogWarning("New project: " + project.awardID)
startdate = datetime.datetime.strptime(project.startDate, "%m/%d/%Y").date()
expiredate = datetime.datetime.strptime(project.expirationDate, "%m/%d/%Y").date()
pi = None
piemail = project.pi
pi = lookupMatchingPI(piemail)
copis = []
copiemails = project.copis
for copiemail in copiemails:
copi = lookupMatchingPI(copiemail)
if copi:
copis.append(copi)
iname = project.institution
segments = iname.split('%')
while len(segments) > 3:
LogMessage("Shortening bad institution: " + iname)
segments[1] = segments[0] + "%" + segments[1]
segments = segments[1:]
if not len(segments) == 3:
LogWarning("Bad Institution: " + iname)
institution = None
else:
institution = lookupMatchingInstitution(Institution(segments[0], segments[1], segments[2]))
# LogMessage("Found matching institution: " + institution.name)
# LogMessage("Add project: " + project.title)
nsfproject = NSFProject.objects.create(awardID = project.awardID,
title = project.title,
startDate = startdate,
expirationDate = expiredate,
amount = int(project.amount),
satc = project.isSaTC(),
institution = institution,
abstract = project.abstract)
for organization in project.organizations:
Organizations.objects.create(project = nsfproject, organization = organization)
for program in project.programs:
Programs.objects.create(project = nsfproject, program = program)
if pi:
if project.isSaTC():
pi.satc = True
pi.save()
pimember = ProjectPIs.objects.create(
investigator = pi,
project = nsfproject,
institution = institution,
role = 'PI')
for copi in copis:
if project.isSaTC():
copi.satc = True
ProjectPIs.objects.create(investigator = copi,
project = nsfproject,
institution = institution,
role = 'CoPI')
# LogMessage("Adding project: " + str(project.awardID))
assert lookupMatchingProject(project.awardID)
addWords(project, nsfproject)
count += 1
if count % 100 == 0:
LogMessage("... " + str(count) + " of " + str(total) + "...")
LogMessage("Adding collaborative projects...")
# Add collaborative projects once project table is complete
with transaction.atomic():
for project in projects:
pobj = lookupMatchingProject(project.awardID)
if not pobj:
LogWarning("No matching project! " + str(project.awardID))
continue
collabs = project.collabs
for collab in collabs:
cobj = lookupMatchingProject(collab)
CollabProjects.objects.create(project1 = pobj, project2 = cobj)
# check collab symmetry
def loadFile(f):
if 'projects' in f:
loadProjects(f)
elif 'institutions' in f:
loadInstitutions(f)
else:
loadInvestigators(f)
if __name__ == "__main__":
for arg in sys.argv[1:]:
loadFile(arg)
|
|
from __future__ import absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal as assert_almost
from pytest import raises
from ...mesher import Prism
from .. import _prism_numpy, prism
from ... import utils, gridder
def test_fails_if_shape_mismatch():
'gravmag.prism fails if given computation points with different shapes'
inc, dec = 10, 0
model = [Prism(-6000, -2000, 2000, 4000, 0, 3000,
{'density': 1000,
'magnetization': utils.ang2vec(10, inc, dec)})]
area = [-5000, 5000, -10000, 10000]
x, y, z = gridder.regular(area, (101, 51), z=-1)
raises(ValueError, prism.potential, x[:-2], y, z, model)
raises(ValueError, prism.potential, x, y[:-2], z, model)
raises(ValueError, prism.potential, x, y, z[:-2], model)
raises(ValueError, prism.potential, x[:-5], y, z[:-2], model)
raises(ValueError, prism.gx, x[:-2], y, z, model)
raises(ValueError, prism.gx, x, y[:-2], z, model)
raises(ValueError, prism.gx, x, y, z[:-2], model)
raises(ValueError, prism.gx, x[:-5], y, z[:-2], model)
raises(ValueError, prism.gy, x[:-2], y, z, model)
raises(ValueError, prism.gy, x, y[:-2], z, model)
raises(ValueError, prism.gy, x, y, z[:-2], model)
raises(ValueError, prism.gy, x[:-5], y, z[:-2], model)
raises(ValueError, prism.gz, x[:-2], y, z, model)
raises(ValueError, prism.gz, x, y[:-2], z, model)
raises(ValueError, prism.gz, x, y, z[:-2], model)
raises(ValueError, prism.gz, x[:-5], y, z[:-2], model)
raises(ValueError, prism.gxx, x[:-2], y, z, model)
raises(ValueError, prism.gxx, x, y[:-2], z, model)
raises(ValueError, prism.gxx, x, y, z[:-2], model)
raises(ValueError, prism.gxx, x[:-5], y, z[:-2], model)
raises(ValueError, prism.gxy, x[:-2], y, z, model)
raises(ValueError, prism.gxy, x, y[:-2], z, model)
raises(ValueError, prism.gxy, x, y, z[:-2], model)
raises(ValueError, prism.gxy, x[:-5], y, z[:-2], model)
raises(ValueError, prism.gxz, x[:-2], y, z, model)
raises(ValueError, prism.gxz, x, y[:-2], z, model)
raises(ValueError, prism.gxz, x, y, z[:-2], model)
raises(ValueError, prism.gxz, x[:-5], y, z[:-2], model)
raises(ValueError, prism.gyy, x[:-2], y, z, model)
raises(ValueError, prism.gyy, x, y[:-2], z, model)
raises(ValueError, prism.gyy, x, y, z[:-2], model)
raises(ValueError, prism.gyy, x[:-5], y, z[:-2], model)
raises(ValueError, prism.gyz, x[:-2], y, z, model)
raises(ValueError, prism.gyz, x, y[:-2], z, model)
raises(ValueError, prism.gyz, x, y, z[:-2], model)
raises(ValueError, prism.gyz, x[:-5], y, z[:-2], model)
raises(ValueError, prism.gzz, x[:-2], y, z, model)
raises(ValueError, prism.gzz, x, y[:-2], z, model)
raises(ValueError, prism.gzz, x, y, z[:-2], model)
raises(ValueError, prism.gzz, x[:-5], y, z[:-2], model)
raises(ValueError, prism.bx, x[:-2], y, z, model)
raises(ValueError, prism.bx, x, y[:-2], z, model)
raises(ValueError, prism.bx, x, y, z[:-2], model)
raises(ValueError, prism.bx, x[:-5], y, z[:-2], model)
raises(ValueError, prism.by, x[:-2], y, z, model)
raises(ValueError, prism.by, x, y[:-2], z, model)
raises(ValueError, prism.by, x, y, z[:-2], model)
raises(ValueError, prism.by, x[:-5], y, z[:-2], model)
raises(ValueError, prism.bz, x[:-2], y, z, model)
raises(ValueError, prism.bz, x, y[:-2], z, model)
raises(ValueError, prism.bz, x, y, z[:-2], model)
raises(ValueError, prism.bz, x[:-5], y, z[:-2], model)
raises(ValueError, prism.tf, x[:-2], y, z, model, inc, dec)
raises(ValueError, prism.tf, x, y[:-2], z, model, inc, dec)
raises(ValueError, prism.tf, x, y, z[:-2], model, inc, dec)
raises(ValueError, prism.tf, x[:-5], y, z[:-2], model, inc, dec)
raises(ValueError, prism.kernelxx, x[:-2], y, z, model[0])
raises(ValueError, prism.kernelxx, x, y[:-2], z, model[0])
raises(ValueError, prism.kernelxx, x, y, z[:-2], model[0])
raises(ValueError, prism.kernelxx, x[:-5], y, z[:-2], model[0])
raises(ValueError, prism.kernelxy, x[:-2], y, z, model[0])
raises(ValueError, prism.kernelxy, x, y[:-2], z, model[0])
raises(ValueError, prism.kernelxy, x, y, z[:-2], model[0])
raises(ValueError, prism.kernelxy, x[:-5], y, z[:-2], model[0])
raises(ValueError, prism.kernelxz, x[:-2], y, z, model[0])
raises(ValueError, prism.kernelxz, x, y[:-2], z, model[0])
raises(ValueError, prism.kernelxz, x, y, z[:-2], model[0])
raises(ValueError, prism.kernelxz, x[:-5], y, z[:-2], model[0])
raises(ValueError, prism.kernelyy, x[:-2], y, z, model[0])
raises(ValueError, prism.kernelyy, x, y[:-2], z, model[0])
raises(ValueError, prism.kernelyy, x, y, z[:-2], model[0])
raises(ValueError, prism.kernelyy, x[:-5], y, z[:-2], model[0])
raises(ValueError, prism.kernelyz, x[:-2], y, z, model[0])
raises(ValueError, prism.kernelyz, x, y[:-2], z, model[0])
raises(ValueError, prism.kernelyz, x, y, z[:-2], model[0])
raises(ValueError, prism.kernelyz, x[:-5], y, z[:-2], model[0])
raises(ValueError, prism.kernelzz, x[:-2], y, z, model[0])
raises(ValueError, prism.kernelzz, x, y[:-2], z, model[0])
raises(ValueError, prism.kernelzz, x, y, z[:-2], model[0])
raises(ValueError, prism.kernelzz, x[:-5], y, z[:-2], model[0])
def test_force_physical_property():
'gravmag.prism gives correct results when passed a property value as arg'
inc, dec = 10, 0
model = [Prism(-6000, -2000, 2000, 4000, 0, 3000,
{'density': 1000,
'magnetization': utils.ang2vec(10, inc, dec)}),
Prism(2000, 6000, 2000, 4000, 0, 1000,
{'density': -1000,
'magnetization': utils.ang2vec(15, inc, dec)})]
density = -500
mag = utils.ang2vec(-5, -30, 15)
reference = [
Prism(-6000, -2000, 2000, 4000, 0, 3000,
{'density': density, 'magnetization': mag}),
Prism(2000, 6000, 2000, 4000, 0, 1000,
{'density': density, 'magnetization': mag})]
area = [-10000, 10000, -5000, 5000]
x, y, z = gridder.regular(area, (51, 101), z=-1)
for mod in [prism, _prism_numpy]:
# Test gravity functions
funcs = ['potential', 'gx', 'gy', 'gz',
'gxx', 'gxy', 'gxz', 'gyy', 'gyz', 'gzz']
for f in funcs:
forced = getattr(mod, f)(x, y, z, model, dens=density)
ref = getattr(mod, f)(x, y, z, reference)
precision = 10
assert_almost(forced, ref, precision, 'Field = %s' % (f))
# Test magnetic functions
funcs = ['tf', 'bx', 'by', 'bz']
for f in funcs:
if f == 'tf':
forced = getattr(mod, f)(x, y, z, model, inc, dec, pmag=mag)
ref = getattr(mod, f)(x, y, z, reference, inc, dec)
else:
forced = getattr(mod, f)(x, y, z, model, pmag=mag)
ref = getattr(mod, f)(x, y, z, reference)
precision = 10
assert_almost(forced, ref, precision, 'Field = %s' % (f))
def test_ignore_none_and_missing_properties():
'gravmag.prism ignores None and prisms without the required property'
inc, dec = 50, -30
model = [None,
Prism(-6000, -2000, 2000, 4000, 0, 3000,
{'density': 1000,
'magnetization': utils.ang2vec(10, inc, dec)}),
Prism(2000, 6000, 2000, 4000, 0, 1000,
{'magnetization': utils.ang2vec(15, inc, dec)}),
None,
Prism(-6000, -2000, -4000, -2000, 500, 2000,
{'density': -1000})]
area = [-10000, 10000, -5000, 5000]
x, y, z = gridder.regular(area, (101, 51), z=-1)
for mod in [prism, _prism_numpy]:
# Test gravity functions
funcs = ['potential', 'gx', 'gy', 'gz',
'gxx', 'gxy', 'gxz', 'gyy', 'gyz', 'gzz']
for f in funcs:
combined = getattr(mod, f)(x, y, z, model)
separate = getattr(mod, f)(x, y, z, [model[1], model[4]])
precision = 10
assert_almost(separate, combined, precision, 'Field = %s' % (f))
# Test magnetic functions
funcs = ['tf', 'bx', 'by', 'bz']
for f in funcs:
mag_only = [model[1], model[2]]
if f == 'tf':
combined = getattr(mod, f)(x, y, z, model, inc, dec)
separate = getattr(mod, f)(x, y, z, mag_only, inc, dec)
else:
combined = getattr(mod, f)(x, y, z, model)
separate = getattr(mod, f)(x, y, z, mag_only)
precision = 10
assert_almost(separate, combined, precision, 'Field = %s' % (f))
def test_cython_agains_numpy():
"gravmag.prism numpy and cython implementations give same result"
inc, dec = -30, 50
model = [
Prism(100, 300, -100, 100, 0, 400,
{'density': -1000,
'magnetization': utils.ang2vec(-2, inc, dec)}),
Prism(-300, -100, -100, 100, 0, 200,
{'density': 2000, 'magnetization': utils.ang2vec(5, 25, -10)})]
tmp = np.linspace(-500, 500, 101)
xp, yp = [i.ravel() for i in np.meshgrid(tmp, tmp)]
zp = -1 * np.ones_like(xp)
kernels = ['xx', 'xy', 'xz', 'yy', 'yz', 'zz']
for comp in kernels:
for p in model:
py = getattr(_prism_numpy, 'kernel' + comp)(xp, yp, zp, p)
cy = getattr(prism, 'kernel' + comp)(xp, yp, zp, p)
assert_almost(py, cy, 10,
'Kernel = %s, max field %.15g max diff %.15g'
% (comp, np.abs(cy).max(), np.abs(py - cy).max()))
funcs = ['potential', 'gx', 'gy', 'gz',
'gxx', 'gxy', 'gxz', 'gyy', 'gyz', 'gzz',
'bx', 'by', 'bz', 'tf']
for f in funcs:
if f == 'tf':
py = getattr(_prism_numpy, f)(xp, yp, zp, model, inc, dec)
cy = getattr(prism, f)(xp, yp, zp, model, inc, dec)
else:
py = getattr(_prism_numpy, f)(xp, yp, zp, model)
cy = getattr(prism, f)(xp, yp, zp, model)
if f in ['bx', 'by', 'bz', 'tf']:
precision = 8
else:
precision = 10
assert_almost(py, cy, precision,
'Field = %s, max field %.15g max diff %.15g'
% (f, np.abs(cy).max(), np.abs(py - cy).max()))
def test_around():
"gravmag.prism gravitational results are consistent around the prism"
model = [Prism(-300, 300, -300, 300, -300, 300, {'density': 1000})]
# Make the computation points surround the prism
shape = (101, 101)
area = [-600, 600, -600, 600]
distance = 310
grids = [gridder.regular(area, shape, z=-distance),
gridder.regular(area, shape, z=distance),
gridder.regular(area, shape, z=distance)[::-1],
gridder.regular(area, shape, z=-distance)[::-1],
np.array(gridder.regular(area, shape, z=distance))[[0, 2, 1]],
np.array(gridder.regular(area, shape, z=-distance))[[0, 2, 1]]]
xp, yp, zp = grids[0]
# Test if each component is consistent
# POTENTIAL
face = [prism.potential(x, y, z, model) for x, y, z in grids]
for i in range(6):
for j in range(i + 1, 6):
assert_almost(face[i], face[j], 10,
'Failed potential, faces %d and %d' % (i, j))
# GX
top, bottom, north, south, east, west = [prism.gx(x, y, z, model)
for x, y, z in grids]
assert_almost(top, bottom, 10, 'Failed gx, top and bottom')
assert_almost(north, -south, 10, 'Failed gx, north and south')
assert_almost(east, west, 10, 'Failed gx, east and west')
assert_almost(east, top, 10, 'Failed gx, east and top')
assert_almost(north, -prism.gz(xp, yp, zp, model), 10,
'Failed gx, north and gz')
assert_almost(south, prism.gz(xp, yp, zp, model), 10,
'Failed gx, south and gz')
# GY
top, bottom, north, south, east, west = [prism.gy(x, y, z, model)
for x, y, z in grids]
assert_almost(top, bottom, 10, 'Failed gy, top and bottom')
assert_almost(north, south, 10, 'Failed gy, north and south')
assert_almost(east, -west, 10, 'Failed gy, east and west')
assert_almost(north, top, 10, 'Failed gy, north and top')
assert_almost(east, -prism.gz(xp, yp, zp, model), 10,
'Failed gy, east and gz')
assert_almost(west, prism.gz(xp, yp, zp, model), 10,
'Failed gy, west and gz')
# GZ
top, bottom, north, south, east, west = [prism.gz(x, y, z, model)
for x, y, z in grids]
assert_almost(top, -bottom, 10, 'Failed gz, top and bottom')
assert_almost(north, south, 10, 'Failed gz, north and south')
assert_almost(east, west, 10, 'Failed gz, east and west')
assert_almost(north, prism.gx(xp, yp, zp, model), 10,
'Failed gz, north and gx')
assert_almost(south, prism.gx(xp, yp, zp, model), 10,
'Failed gz, south and gx')
assert_almost(east, prism.gy(xp, yp, zp, model), 10,
'Failed gz, east and gy')
assert_almost(west, prism.gy(xp, yp, zp, model), 10,
'Failed gz, west and gy')
# GXX
top, bottom, north, south, east, west = [prism.gxx(x, y, z, model)
for x, y, z in grids]
assert_almost(top, bottom, 10, 'Failed gxx, top and bottom')
assert_almost(north, south, 10, 'Failed gxx, north and south')
assert_almost(east, west, 10, 'Failed gxx, east and west')
assert_almost(east, top, 10, 'Failed gxx, east and top')
assert_almost(north, prism.gzz(xp, yp, zp, model), 10,
'Failed gxx, north and gzz')
assert_almost(south, prism.gzz(xp, yp, zp, model), 10,
'Failed gxx, south and gzz')
# GXY
top, bottom, north, south, east, west = [prism.gxy(x, y, z, model)
for x, y, z in grids]
assert_almost(top, bottom, 4, 'Failed gxy, top and bottom')
assert_almost(north, -south, 10, 'Failed gxy, north and south')
assert_almost(east, -west, 10, 'Failed gxy, east and west')
assert_almost(north, -prism.gyz(xp, yp, zp, model), 10,
'Failed gxy, north and gyz')
assert_almost(south, prism.gyz(xp, yp, zp, model), 10,
'Failed gxy, south and gyz')
# GXZ
top, bottom, north, south, east, west = [prism.gxz(x, y, z, model)
for x, y, z in grids]
assert_almost(top, -bottom, 10, 'Failed gxz, top and bottom')
assert_almost(north, -south, 10, 'Failed gxz, north and south')
assert_almost(east, west, 4, 'Failed gxz, east and west')
assert_almost(bottom, north, 10, 'Failed gxz, bottom and north')
assert_almost(top, south, 10, 'Failed gxz, top and south')
assert_almost(east, prism.gxy(xp, yp, zp, model), 4,
'Failed gxz, east and gxy')
assert_almost(west, prism.gxy(xp, yp, zp, model), 10,
'Failed gxz, west and gxy')
# GYY
top, bottom, north, south, east, west = [prism.gyy(x, y, z, model)
for x, y, z in grids]
assert_almost(top, bottom, 10, 'Failed gyy, top and bottom')
assert_almost(north, south, 10, 'Failed gyy, north and south')
assert_almost(east, west, 10, 'Failed gyy, east and west')
assert_almost(top, north, 10, 'Failed gyy, top and north')
assert_almost(east, prism.gzz(xp, yp, zp, model), 10,
'Failed gyy, east and gzz')
assert_almost(west, prism.gzz(xp, yp, zp, model), 10,
'Failed gyy, west and gzz')
# GYZ
top, bottom, north, south, east, west = [prism.gyz(x, y, z, model)
for x, y, z in grids]
assert_almost(top, -bottom, 10, 'Failed gyz, top and bottom')
assert_almost(north, south, 4, 'Failed gyz, north and south')
assert_almost(east, -west, 10, 'Failed gyz, east and west')
assert_almost(top, west, 10, 'Failed gyz, top and west')
assert_almost(bottom, east, 10, 'Failed gyz, bottom and east')
assert_almost(north, prism.gxy(xp, yp, zp, model), 4,
'Failed gyz, north and gxy')
assert_almost(south, prism.gxy(xp, yp, zp, model), 10,
'Failed gyz, south and gxy')
# GZZ
top, bottom, north, south, east, west = [prism.gzz(x, y, z, model)
for x, y, z in grids]
assert_almost(top, bottom, 10, 'Failed gzz, top and bottom')
assert_almost(north, south, 10, 'Failed gzz, north and south')
assert_almost(east, west, 10, 'Failed gzz, east and west')
assert_almost(north, prism.gxx(xp, yp, zp, model), 10,
'Failed gzz, north and gxx')
assert_almost(south, prism.gxx(xp, yp, zp, model), 10,
'Failed gzz, south and gxx')
assert_almost(east, prism.gyy(xp, yp, zp, model), 10,
'Failed gzz, east and gyy')
assert_almost(west, prism.gyy(xp, yp, zp, model), 10,
'Failed gzz, west and gyy')
|
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import time
import argparse
import os
import six
import sys
sys.path.append("..")
import subprocess
import traceback
import functools
import pickle
from contextlib import closing
import paddle.fluid as fluid
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
from six import string_types
import paddle
from paddle.fluid.tests.unittests.op_test import OpTest, _set_use_system_allocator
from paddle.fluid.tests.unittests.test_sync_batch_norm_op import create_or_get_tensor
_set_use_system_allocator(False)
paddle.enable_static()
SEED = 10
class TestSyncBatchNormRunnerBase(object):
def get_model(self,
main,
startup,
place,
layout,
seed,
sync_bn=False,
only_forward=False):
raise NotImplementedError(
"get model should be implemented by child class.")
def wait_server_ready(self, endpoints):
assert not isinstance(endpoints, string_types)
while True:
all_ok = True
not_ready_endpoints = []
for ep in endpoints:
ip_port = ep.split(":")
with closing(
socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, 'SO_REUSEPORT'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT,
1)
result = sock.connect_ex((ip_port[0], int(ip_port[1])))
if result != 0:
all_ok = False
not_ready_endpoints.append(ep)
if not all_ok:
sys.stderr.write("server not ready, wait 3 sec to retry...\n")
sys.stderr.write("not ready endpoints:" + str(
not_ready_endpoints) + "\n")
sys.stderr.flush()
time.sleep(3)
else:
break
#endpoints should be ["ip1:port1","ip2:port2"]
def initCommunicator(self, program, rank, nranks, wait_port,
current_endpoint, endpoints):
other_endpoints = endpoints[:]
other_endpoints.remove(current_endpoint)
if rank == 0 and wait_port:
self.wait_server_ready(other_endpoints)
block = program.global_block()
hccl_id_var = block.create_var(
name=nameGen.generate('hccl_id'),
persistable=True,
type=core.VarDesc.VarType.RAW)
block.append_op(
type='c_gen_hccl_id',
inputs={},
outputs={'Out': hccl_id_var},
attrs={
'rank': rank,
'endpoint': current_endpoint,
'other_endpoints': other_endpoints
})
block.append_op(
type='c_comm_init_hccl',
inputs={'X': hccl_id_var},
outputs={},
attrs={
'rank': rank,
'ring_id': self.global_ring_id,
'device_id': int(os.getenv("FLAGS_selected_npus")),
'rank_ids': nranks
})
def run_trainer(self, args):
device_id = int(os.getenv("FLAGS_selected_npus", "0"))
place = fluid.NPUPlace(device_id)
places = [place]
# Test training
for place in places:
for layout in ["NCHW", "NHWC"]:
self._compare(args, place, layout, False)
# Test inference
for place in places:
for layout in ["NCHW", "NHWC"]:
self._compare(args, place, layout, True)
# Test FP16 - @TODO
# self.dtype = np.float16
# self.atol = 1e-2
# Test training
# for place in places:
# for layout in ["NCHW", "NHWC"]:
# self._compare(args, place, layout, False)
# Test inference
# for place in places:
# for layout in ["NCHW", "NHWC"]:
# self._compare(args, place, layout, True)
sys.stdout.buffer.write(
pickle.dumps(
'training, inference, fp32, fp16, NCHW, NHWC all passed'))
def _compare(self, args, place, layout, only_forward):
scope = core.Scope()
np.random.seed(SEED)
data = np.random.random(size=self.dshape).astype(self.dtype) * 4. - 2
sys.stderr.write("data: " + str(data) + "\n")
data = create_or_get_tensor(scope, "input",
OpTest.np_dtype_to_fluid_dtype(data), place)
bn_fetches = self._cal_single_card(args, data, place, layout,
only_forward)
fetch_names, sync_bn_fetches = self._cal_multiple_cards(
args, data, place, layout, only_forward)
sys.stderr.write("len(sync_bn_fetches): " + str(len(sync_bn_fetches)) +
"\n")
for i in six.moves.xrange(0, len(sync_bn_fetches)):
sys.stderr.write("i: " + str(i) + "\n")
sys.stderr.write("fetch_names[i]): " + fetch_names[i] + "\n")
bn_val = bn_fetches[i]
sync_bn_val = sync_bn_fetches[i]
if sync_bn_val.shape != bn_val.shape:
sync_bn_val = sync_bn_val[:bn_val.shape[0]]
# i = 0
if fetch_names[i] == 'reduce_sum_0.tmp_0':
# sys.stderr.write("skip reduce_sum_0.tmp_0 (Out of reduce_sum op)" + "\n")
sys.stderr.write("reduce_sum_0.tmp_0 (Out of reduce_sum op)" +
"\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
# continue
# i = 1
if fetch_names[i] == 'conv2d_0.tmp_0':
# sys.stderr.write("skip conv2d_0.tmp_0 (X)" + "\n")
sys.stderr.write("conv2d_0.tmp_0 (X)" + "\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
# continue
# i = 2
if fetch_names[i] == 'batch_norm_0.tmp_3':
# sys.stderr.write("skip batch_norm_0.tmp_3 (Y)" + "\n")
sys.stderr.write("batch_norm_0.tmp_3 (Y)" + "\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
# continue
# i = 2
if fetch_names[i] == 'batch_norm_0.tmp_2':
# sys.stderr.write("skip batch_norm_0.tmp_2 (ReserveSpace of batch_norm)" + "\n")
sys.stderr.write(
"batch_norm_0.tmp_2 (ReserveSpace of batch_norm)" + "\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
# continue
# i = 3
if fetch_names[i] == 'bn_moving_mean':
sys.stderr.write("skip bn_moving_mean (MeanOut)" + "\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
continue
# i = 4
if fetch_names[i] == 'bn_moving_variance':
sys.stderr.write("skip bn_moving_variance (VarianceOut)" + "\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
continue
# i = 7
if fetch_names[i] == 'batch_norm_0.tmp_0':
# sys.stderr.write("skip batch_norm_0.tmp_0 (SavedMean)" + "\n")
sys.stderr.write("batch_norm_0.tmp_0 (SavedMean)" + "\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
# continue
# i = 8
if fetch_names[i] == 'batch_norm_0.tmp_1':
sys.stderr.write("skip batch_norm_0.tmp_1 (SavedVariance)" +
"\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
continue
# i = 9
if fetch_names[i] == 'bn_scale@GRAD':
# sys.stderr.write("skip bn_scale@GRAD (Scale@GRAD)" + "\n")
sys.stderr.write("bn_scale@GRAD (Scale@GRAD)" + "\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
# continue
# i = 10
if fetch_names[i] == 'bn_bias@GRAD':
# sys.stderr.write("skip bn_bias@GRAD (Bias@GRAD)" + "\n")
sys.stderr.write("bn_bias@GRAD (Bias@GRAD)" + "\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
# continue
# i = 11
if fetch_names[i] == 'batch_norm_0.tmp_3@GRAD':
# sys.stderr.write("skip batch_norm_0.tmp_3@GRAD (Y@GRAD)" + "\n")
sys.stderr.write("batch_norm_0.tmp_3@GRAD (Y@GRAD)" + "\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
# continue
# i = 12
if fetch_names[i] == 'conv2d_0.tmp_0@GRAD':
# sys.stderr.write("skip conv2d_0.tmp_0@GRAD (X@GRAD)" + "\n")
sys.stderr.write("conv2d_0.tmp_0@GRAD (X@GRAD)" + "\n")
sys.stderr.write("bn_val: " + str(bn_val) + "\n")
sys.stderr.write("sync_bn_val: " + str(sync_bn_val) + "\n")
# continue
atol = self.atol
if fetch_names[i] == 'conv2d_0.tmp_0@GRAD':
atol = 1e-2
assert np.allclose(
bn_val, sync_bn_val, atol=atol), "Output (" + fetch_names[
i] + ") has diff. \n" + "\nBN " + str(
bn_val) + "\n" + "Sync BN " + str(sync_bn_val)
def _cal_single_card(self, args, data, place, layout, only_forward):
# Single-NPU, N = 32 per NPU
train_prog = fluid.Program()
startup_prog = fluid.Program()
train_prog.global_seed(SEED)
startup_prog.global_seed(SEED)
paddle.seed(SEED)
outs = self.get_model(train_prog, startup_prog, place, layout, SEED,
False, only_forward)
exe = fluid.Executor(place)
exe.run(startup_prog)
fetch_names = [v.name for v in outs] + [
'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias'
]
if not only_forward:
others = [
'batch_norm_0.tmp_0', 'batch_norm_0.tmp_1', 'bn_scale@GRAD',
'bn_bias@GRAD', 'batch_norm_0.tmp_3@GRAD', 'conv2d_0.tmp_0@GRAD'
]
fetch_names += others
bn_fetches = exe.run(program=train_prog,
feed={'input': data},
fetch_list=fetch_names)
return bn_fetches
def _cal_multiple_cards(self, args, data, place, layout, only_forward):
# Multi-NPUs, self.N per NPU
# return
assert core.get_npu_device_count() > 1
train_prog = fluid.Program()
startup_prog = fluid.Program()
train_prog.global_seed(SEED)
startup_prog.global_seed(SEED)
paddle.seed(SEED)
sys.stderr.write("train_prog: " + train_prog.to_string(True) + "\n")
sys.stderr.write("startup_prog: " + startup_prog.to_string(True) + "\n")
endpoints = args["endpoints"].split(",")
rank = args["trainerid"]
current_endpoint = args["currentendpoint"]
nranks = 2
self.initCommunicator(startup_prog, rank, nranks, True,
current_endpoint, endpoints)
sys.stderr.write("after init, startup_prog: " + startup_prog.to_string(
True) + "\n")
train_prog.global_seed(SEED)
train_prog._sync_with_cpp()
startup_prog.global_seed(SEED)
startup_prog._sync_with_cpp()
paddle.seed(SEED)
self.rank = rank
outs = self.get_model(train_prog, startup_prog, place, layout, SEED,
True, only_forward)
sys.stderr.write("after get_model, train_prog: " + train_prog.to_string(
True) + "\n")
sys.stderr.write("after get_model, startup_prog: " +
startup_prog.to_string(True) + "\n")
ops = train_prog.blocks[0].ops
for i, op in enumerate(ops):
if op.type == 'batch_norm':
sys.stderr.write("i: " + str(i) + "\n")
sys.stderr.write("op type: " + op.type + "\n")
op.desc.set_type('sync_batch_norm')
if op.type == 'batch_norm_grad':
sys.stderr.write("i: " + str(i) + "\n")
sys.stderr.write("op type: " + op.type + "\n")
op.desc.set_type('sync_batch_norm_grad')
sys.stderr.write("after update sync_batch_norm, train_prog: " +
train_prog.to_string(True) + "\n")
exe = fluid.Executor(place)
exe.run(startup_prog)
fetch_names = [v.name for v in outs] + [
'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias'
]
if not only_forward:
others = [
'batch_norm_0.tmp_0', 'batch_norm_0.tmp_1', 'bn_scale@GRAD',
'bn_bias@GRAD', 'batch_norm_0.tmp_3@GRAD', 'conv2d_0.tmp_0@GRAD'
]
fetch_names += others
sync_bn_fetches = exe.run(program=train_prog,
feed={'input': data},
fetch_list=fetch_names)
return fetch_names, sync_bn_fetches
def runtime_main(test_class, col_type, sub_type):
args = {}
model = test_class()
args["deviceid"] = os.getenv("FLAGS_selected_npus")
args["trainerid"] = int(os.getenv("PADDLE_TRAINER_ID"))
args["trainernum"] = int(os.getenv("PADDLE_TRAINERS_NUM"))
args["endpoints"] = os.getenv('PADDLE_TRAINER_ENDPOINTS')
args["currentendpoint"] = os.getenv("PADDLE_CURRENT_ENDPOINT")
args["col_type"] = col_type
model.run_trainer(args)
import paddle.compat as cpt
import socket
from contextlib import closing
class TestDistBase(unittest.TestCase):
def setUp(self):
self._port_set = set()
self._trainers = 2
self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._python_interp = sys.executable
def _find_free_port(self):
def __free_port():
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
while True:
port = __free_port()
if port not in self._port_set:
self._port_set.add(port)
return port
def _run_cluster(self, model_file, envs):
worker_endpoints = self._ps_endpoints.split(",")
w0_ep, w1_ep = worker_endpoints
# print("w0_ep:", w0_ep, " w1_ep:", w1_ep)
env0 = {
"FLAGS_selected_npus": "0",
"PADDLE_TRAINER_ID": "0",
"PADDLE_TRAINERS_NUM": "2",
"PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
"PADDLE_CURRENT_ENDPOINT": w0_ep,
}
env1 = {
"FLAGS_selected_npus": "1",
"PADDLE_TRAINER_ID": "1",
"PADDLE_TRAINERS_NUM": "2",
"PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
"PADDLE_CURRENT_ENDPOINT": w1_ep,
}
#update environment
env0.update(envs)
env1.update(envs)
tr_cmd = "%s %s"
tr0_cmd = tr_cmd % (self._python_interp, model_file)
tr1_cmd = tr_cmd % (self._python_interp, model_file)
tr0_pipe = open("/tmp/tr0_err.log", "wb")
tr1_pipe = open("/tmp/tr1_err.log", "wb")
# print(tr0_cmd)
# print(tr1_cmd)
tr0_proc = subprocess.Popen(
tr0_cmd.strip().split(),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=env0)
tr1_proc = subprocess.Popen(
tr0_cmd.strip().split(),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=env1)
tr0_out, tr0_err = tr0_proc.communicate()
tr1_out, tr1_err = tr1_proc.communicate()
sys.stderr.write('trainer 0 stderr: %s\n' % tr0_err)
sys.stderr.write('trainer 1 stderr: %s\n' % tr1_err)
# close trainer file
tr0_pipe.close()
tr1_pipe.close()
return pickle.loads(tr0_out), pickle.loads(
tr1_out), tr0_proc.pid, tr1_proc.pid
def check_with_place(self, model_file, col_type, need_envs={}):
tr0_out, tr1_out, pid0, pid1 = self._run_cluster(model_file, need_envs)
self.assertEqual(
tr0_out, 'training, inference, fp32, fp16, NCHW, NHWC all passed')
self.assertEqual(
tr1_out, 'training, inference, fp32, fp16, NCHW, NHWC all passed')
|
|
# Shikha Chaganti
# Kunal Nabar
# Vanderbilt University
# Medical-image Analysis and Statistical Interpretation Lab
# newphewas
# v2.0
import numpy as np
# import matplotlib.pyplot as plt
# from matplotlib.backends.backend_pdf import PdfPages
# from matplotlib import rcParams
# import matplotlib.lines as mlines
from collections import Counter
import time, math, scipy.stats
import pandas as pd
import os
import statsmodels.formula.api as smf
import statsmodels.discrete.discrete_model as sm
"""
I/O Reading Input From Files
"""
def get_codes(): # same
"""
Gets the PheWAS codes from a local csv file and load it into a pandas DataFrame.
:returns: All of the codes from the resource file.
:rtype: pandas DataFrame
"""
sep = os.sep
path = os.path.dirname(os.path.abspath(__file__))
filename = os.sep.join([path, 'resources', 'codes.csv'])
return pd.read_csv(filename)
def get_input(path, filename,reg_type): # diff -done - add duration
"""
Read all of the phenotype data from the given file and load it into a pandas DataFrame.
:param path: The path to the file that contains the phenotype data
:param filename: The name of the file that contains the phenotype data.
:type path: string
:type filename: string
:returns: The data from the phenotype file.
:rtype: pandas DataFrame
"""
wholefname = path + filename
icdfile = pd.read_csv(wholefname)
icdfile['icd9'] = icdfile['icd9'].str.strip()
# if reg_type == 0:
# # g=icdfile.groupby(['id','icd9'])
# phenotypes = pd.merge(icdfile, codes, on='icd9')
# phenotypes['MaxAgeAtICD'] = 0
# phenotypes['MaxAgeAtICD'] = phenotypes.groupby(['id', 'phewas_code'])['AgeAtICD9'].transform('max')
# else:
"""
This needs to be changed, need to adjust for a variety of different naming conventions
in the phenotype file, not simply 'AgeAtICD', 'id', 'icd9', etc.
Either we need to adjust for different names in the code, or state explicitly in the
documentation that we cannot do things like this.
"""
phenotypes = pd.merge(icdfile, codes, on='icd9')
phenotypes['count'] = 0
phenotypes['count'] = phenotypes.groupby(['id', 'phewas_code'])['count'].transform('count')
phenotypes['MaxAgeAtICD'] = 0
phenotypes['MaxAgeAtICD'] = phenotypes.groupby(['id', 'icd9'])['AgeAtICD9'].transform('max')
phenotypes['MaxAgeAtPhe'] = 0
phenotypes['MaxAgeAtPhe'] = phenotypes.groupby(['id', 'phewas_code'])['AgeAtICD9'].transform('max')
phenotypes['duration'] = phenotypes.groupby(['id', 'phewas_code'])['AgeAtICD9'].transform('max') - \
phenotypes.groupby(['id', 'phewas_code'])['AgeAtICD9'].transform('min') + 1
phenotypes['lor'] = phenotypes.groupby('id')['AgeAtICD9'].transform('max') - \
phenotypes.groupby('id')['AgeAtICD9'].transform('min') + 1
return phenotypes
def get_phewas_info(p_index): # same
"""
Returns all of the info of the phewas code at the given index.
:param p_index: The index of the desired phewas code
:type p_index: int
:returns: A list including the code, the name, and the rollup of the phewas code. The rollup is a list of all of the ICD-9 codes that are grouped into this phewas code.
:rtype: list of strings
"""
p_code = phewas_codes.loc[p_index].phewas_code
corresponding = codes[codes.phewas_code == p_code]
p_name = corresponding.iloc[0].phewas_string
p_rollup = ','.join(codes[codes.phewas_code == p_code].icd9.tolist())
return [p_code, p_name, p_rollup]
def get_icd_info(i_index): # same
"""
Returns all of the info of the phewas code at the given index.
:param p_index: The index of the desired phewas code
:type p_index: int
:returns: A list including the code, the name, and the rollup of the phewas code. The rollup is a list of all of the ICD-9 codes that are grouped into this phewas code.
:rtype: list of strings
"""
p_code = icd_codes.loc[i_index].icd9
corresponding = codes[codes.icd9 == p_code]
p_name = corresponding.iloc[0].icd9_string
p_rollup = ','.join(codes[codes.icd9 == p_code].icd9.tolist())
return [p_code, p_name, p_rollup]
def get_group_file(path, filename): # same
"""
Read all of the genotype data from the given file and load it into a pandas DataFrame.
:param path: The path to the file that contains the phenotype data
:param filename: The name of the file that contains the phenotype data.
:type path: string
:type filename: string
:returns: The data from the genotype file.
:rtype: pandas DataFrame
"""
wholefname = path + filename
genotypes = pd.read_csv(wholefname)
return genotypes
def get_imbalances(regressions):
"""
Generates a numpy array of the imbalances.
For a value *x* where *x* is the beta of a regression:
========= ====== =======================================================
*x* < 0 **-1** The regression had a negative beta value
*x* = nan **0** The regression had a nan beta value (and a nan p-value)
*x* > 0 **+1** The regression had a positive beta value
========= ====== =======================================================
These values are then used to get the correct colors using the imbalance_colors.
:param regressions: DataFrame containing a variety of different output values from the regression performed. The only one used for this function are the 'beta' values.
:type regressions: pandas DataFrame
:returns: A list that is the length of the number of regressions performed. Each element in the list is either a -1, 0, or +1. These are used as explained above.
:rtype: numpy array
"""
imbalance = np.array(regressions['beta'])
imbalance[np.isnan(imbalance)] = 0
imbalance[imbalance > 0] = 1
imbalance[imbalance < 0] = -1
return imbalance
def generate_feature_matrix(genotypes, phenotypes, reg_type,phewas_cov=''): # diff - done
"""
Generates the feature matrix that will be used to run the regressions.
:param genotypes:
:param phenotypes:
:type genotypes:
:type phenotypes:
:returns:
:rtype:
"""
feature_matrix = np.zeros((3, genotypes.shape[0], phewas_codes.shape[0]), dtype=float)
count = 0
for i in genotypes['id']:
if reg_type == 0:
temp = pd.DataFrame(phenotypes[phenotypes['id'] == i][['phewas_code', 'MaxAgeAtPhe','count']]).drop_duplicates()
match = phewas_codes['phewas_code'].isin(list(phenotypes[phenotypes['id'] == i]['phewas_code']))
cts = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['count']
cts[np.isnan(cts)] = 0
match = (match)&(cts>0)
feature_matrix[0][count, match[match == True].index] = 1
age = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['MaxAgeAtPhe']
#assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled"
age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx']
feature_matrix[1][count, :] = age
if phewas_cov:
feature_matrix[2][count, :] = int(phewas_cov in list(phenotypes[phenotypes['id'] == i]['phewas_code']))
else:
if reg_type == 1:
temp = pd.DataFrame(
phenotypes[phenotypes['id'] == i][['phewas_code', 'MaxAgeAtPhe', 'count','lor']]).drop_duplicates()
cts = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['count']
cts[np.isnan(cts)] = 0
# if temp.empty!=1:
# cts=cts/temp['lor'].iloc[0]
feature_matrix[0][count, :] = cts
age = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['MaxAgeAtPhe']
#assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled"
age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx']
feature_matrix[1][count, :] = age
if phewas_cov:
feature_matrix[2][count, :] = int(
phewas_cov in list(phenotypes[phenotypes['id'] == i]['phewas_code']))
elif reg_type == 2:
temp = pd.DataFrame(
phenotypes[phenotypes['id'] == i][['phewas_code', 'MaxAgeAtPhe', 'duration','lor']]).drop_duplicates()
dura = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['duration']
dura[np.isnan(dura)] = 0
# if temp.empty!=1:
# dura=dura/temp['lor'].iloc[0]
feature_matrix[0][count, :] = dura
age = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['MaxAgeAtPhe']
#assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled"
age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx']
feature_matrix[1][count, :] = age
if phewas_cov:
feature_matrix[2][count, :] = int(
phewas_cov in list(phenotypes[phenotypes['id'] == i]['phewas_code']))
count += 1
return feature_matrix
def generate_icdfeature_matrix(genotypes, phenotypes, reg_type,phewas_cov=''): # diff - done
"""
Generates the feature matrix that will be used to run the regressions.
:param genotypes:
:param phenotypes:
:type genotypes:
:type phenotypes:
:returns:
:rtype:
"""
feature_matrix = np.zeros((3, genotypes.shape[0], icd_codes.shape[0]), dtype=float)
count = 0
for i in genotypes['id']:
if reg_type == 0:
temp = pd.DataFrame(phenotypes[phenotypes['id'] == i][['icd9', 'MaxAgeAtICD','count']]).drop_duplicates()
match = icd_codes['icd9'].isin(list(phenotypes[phenotypes['id'] == i]['icd9']))
cts = pd.merge(icd_codes, temp, on='icd9', how='left')['count']
cts[np.isnan(cts)] = 0
match = (match)&(cts>0)
feature_matrix[0][count, match[match == True].index] = 1
age = pd.merge(icd_codes, temp, on='icd9', how='left')['MaxAgeAtICD']
#assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled"
age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx']
feature_matrix[1][count, :] = age
if phewas_cov:
feature_matrix[2][count, :] = int(phewas_cov in list(phenotypes[phenotypes['id'] == i]['icd9']))
else:
if reg_type == 1:
temp = pd.DataFrame(
phenotypes[phenotypes['id'] == i][['icd9', 'MaxAgeAtICD', 'count','lor']]).drop_duplicates()
cts = pd.merge(icd_codes, temp, on='icd9', how='left')['count']
cts[np.isnan(cts)] = 0
# if temp.empty!=1:
# cts=cts/temp['lor'].iloc[0]
feature_matrix[0][count, :] = cts
age = pd.merge(icd_codes, temp, on='icd9', how='left')['MaxAgeAtICD']
#assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled"
age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx']
feature_matrix[1][count, :] = age
if phewas_cov:
feature_matrix[2][count, :] = int(
phewas_cov in list(phenotypes[phenotypes['id'] == i]['icd9']))
elif reg_type == 2:
temp = pd.DataFrame(
phenotypes[phenotypes['id'] == i][['icd9', 'MaxAgeAtICD', 'duration','lor']]).drop_duplicates()
dura = pd.merge(icd_codes, temp, on='icd9', how='left')['duration']
dura[np.isnan(dura)] = 0
# if temp.empty!=1:
# dura=dura/temp['lor'].iloc[0]
feature_matrix[0][count, :] = dura
age = pd.merge(icd_codes, temp, on='icd9', how='left')['MaxAgeAtICD']
#assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled"
age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx']
feature_matrix[1][count, :] = age
if phewas_cov:
feature_matrix[2][count, :] = int(
phewas_cov in list(phenotypes[phenotypes['id'] == i]['icd9']))
count += 1
return feature_matrix
def get_bon_thresh(normalized, power): # same
"""
Calculate the bonferroni correction threshold.
Divide the power by the sum of all finite values (all non-nan values).
:param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value.
:param power: the threshold power being used (usually 0.05)
:type normalized: numpy array
:type power: float
:returns: The bonferroni correction
:rtype: float
"""
return power / sum(np.isfinite(normalized))
def get_fdr_thresh(p_values, power):
"""
Calculate the false discovery rate threshold.
:param p_values: a list of p-values obtained by executing the regression
:param power: the thershold power being used (usually 0.05)
:type p_values: numpy array
:type power: float
:returns: the false discovery rate
:rtype: float
"""
sn = np.sort(p_values)
sn = sn[np.isfinite(sn)]
sn = sn[::-1]
for i in range(len(sn)):
thresh = power * i / len(sn)
if sn[i] <= thresh:
break
return sn[i]
def get_bhy_thresh(p_values, power):
"""
Calculate the false discovery rate threshold.
:param p_values: a list of p-values obtained by executing the regression
:param power: the thershold power being used (usually 0.05)
:type p_values: numpy array
:type power: float
:returns: the false discovery rate
:rtype: float
"""
sn = np.sort(p_values)
sn = sn[np.isfinite(sn)]
sn = sn[::-1]
for i in range(len(sn)):
thresh = power * i / (8.1*len(sn))
if sn[i] <= thresh:
break
return sn[i]
def run_icd_phewas(fm, genotypes, covariates, reg_type, response='', phewas_cov=''): # same
"""
For each phewas code in the feature matrix, run the specified type of regression and save all of the resulting p-values.
:param fm: The phewas feature matrix.
:param genotypes: A pandas DataFrame of the genotype file.
:param covariates: The covariates that the function is to be run on.
:returns: A tuple containing indices, p-values, and all the regression data.
"""
m = len(fm[0, 0])
p_values = np.zeros(m, dtype=float)
icodes = []
# store all of the pertinent data from the regressions
regressions = pd.DataFrame(columns=output_columns)
control = fm[0][genotypes.genotype == 0, :]
disease = fm[0][genotypes.genotype == 1, :]
inds = np.where((control.any(axis=0) & ~disease.any(axis=0)) | (~control.any(axis=0) & disease.any(axis=0)))[0]
# genotypes.loc[genotypes['sex'] == 'M', 'sex'] = 1
# genotypes.loc[genotypes['sex'] == 'F', 'sex'] = 0
for index in range(m):
phen_vector1 = fm[0][:, index]
phen_vector2 = fm[1][:, index]
phen_vector3 = fm[2][:, index]
if np.where(phen_vector1>0)[0].shape[0]>5:
# if index in inds:
# # print index
# res = calculate_odds_ratio(genotypes, phen_vector1, phen_vector2, reg_type, covariates, lr=1, response=response,
# phen_vector3=phen_vector3)
# else:
res = calculate_odds_ratio(genotypes, phen_vector1, phen_vector2, reg_type, covariates, lr=0,
response=response,
phen_vector3=phen_vector3)
else:
odds = 0
p = 1
od = [-0.0, 1.0, 0.0, np.nan]
res = (odds, p, od)
# save all of the regression data
phewas_info = get_icd_info(index)
stat_info = res[2]
info = phewas_info[0:2] +stat_info + [phewas_info[2]]
regressions.loc[index] = info
p_values[index] = res[1]
return (np.array(range(m)), p_values, regressions)
def run_phewas(fm, genotypes, covariates, reg_type, response='', phewas_cov=''): # same
"""
For each phewas code in the feature matrix, run the specified type of regression and save all of the resulting p-values.
:param fm: The phewas feature matrix.
:param genotypes: A pandas DataFrame of the genotype file.
:param covariates: The covariates that the function is to be run on.
:returns: A tuple containing indices, p-values, and all the regression data.
"""
m = len(fm[0, 0])
p_values = np.zeros(m, dtype=float)
icodes = []
# store all of the pertinent data from the regressions
regressions = pd.DataFrame(columns=output_columns)
control = fm[0][genotypes.genotype == 0, :]
disease = fm[0][genotypes.genotype == 1, :]
inds = np.where((control.any(axis=0) & ~disease.any(axis=0)) | (~control.any(axis=0) & disease.any(axis=0)))[0]
for index in range(m):
phen_vector1 = fm[0][:, index]
phen_vector2 = fm[1][:, index]
phen_vector3 = fm[2][:, index]
if np.where(phen_vector1>0)[0].shape[0]>5:
if index in inds:
# print index
res = calculate_odds_ratio(genotypes, phen_vector1, phen_vector2, reg_type, covariates, lr=1, response=response,
phen_vector3=phen_vector3)
else:
res = calculate_odds_ratio(genotypes, phen_vector1, phen_vector2, reg_type, covariates, lr=0,
response=response,
phen_vector3=phen_vector3)
else:
odds = 0
p = 1
od = [-0.0, 1.0, 0.0, np.nan]
res = (odds, p, od)
# save all of the regression data
phewas_info = get_phewas_info(index)
stat_info = res[2]
info = phewas_info[0:2] +stat_info + [phewas_info[2]]
regressions.loc[index] = info
p_values[index] = res[1]
return (np.array(range(m)), p_values, regressions)
"""
Plotting
"""
def get_x_label_positions(categories, lines=True): # same
"""
This method is used get the position of the x-labels and the lines between the columns
:param categories: list of the categories
:param lines: a boolean which determines the locations returned (either the center of each category or the end)
:type categories:
:type lines: bool
:returns: A list of positions
:rtype: list of ints
"""
tt = Counter(categories)
s = 0
label_positions = []
for _, v in tt.items():
if lines:
inc = v // 2
else:
inc = v
label_positions.append(s + inc)
s += v
return label_positions
def plot_data_points(x, y, thresh0, thresh1, thresh2, thresh_type, save='', path='', imbalances=np.array([])): # same
"""
Plots the data with a variety of different options.
This function is the primary plotting function for pyPhewas.
:param x: an array of indices
:param y: an array of p-values
:param thresh: the threshold power
:param save: the output file to save to (if empty, display the plot)
:param imbalances: a list of imbalances
:type x: numpy array
:type y: numpy array
:type thresh: float
:type save: str
:type imbalances: numpy array
"""
# Determine whether or not to show the imbalance.
fig = plt.figure()
ax = plt.subplot(111)
show_imbalance = imbalances.size != 0
# Sort the phewas codes by category.
c = codes.loc[phewas_codes['index']]
c = c.reset_index()
idx = c.sort_values(by='category').index
# Get the position of the lines and of the labels
# linepos = get_x_label_positions(c['category'].tolist(), False)
# x_label_positions = get_x_label_positions(c['category'].tolist(), True)
# x_labels = c.sort_values('category').category_string.drop_duplicates().tolist()
# Plot each of the points, if necessary, label the points.
e = 1
artists = []
frame1 = plt.gca()
# ax.axhline(y=-math.log10(0.05), color='yellow', ls='dotted')
ax.axhline(y=thresh0, color='red', ls='dotted')
ax.axhline(y=thresh1, color='yellow', ls='dotted')
ax.axhline(y=thresh2, color='orange', ls='dotted')
# ax.xticks(x_label_positions, x_labels, rotation=70, fontsize=10)
# ax.xlim(xmin=0, xmax=len(c))
plt.ylabel('-log10(p)')
if thresh_type == 0:
thresh = thresh0
elif thresh_type == 1:
thresh = thresh1
else:
thresh = thresh2
y_label_positions = [thresh0, thresh1,thresh2]
plt.yticks(y_label_positions, ['Bonf p = ' + '{:.2e}'.format(np.power(10, -thresh0)),
'Benj-Hoch p = ' + str(round(np.power(10, -thresh1), 3)),
'Benj-Hoch-Yek p = ' + str(round(np.power(10, -thresh2), 3))], rotation=10, fontsize=10)
for i in idx:
if y[i] > thresh:
e += 15
if show_imbalance: # and imbalances[i]>0:
# if imbalances[i]>0:
artists.append(ax.text(e, y[i], c['phewas_string'][i], rotation=89, va='bottom', fontsize=8))
# else:
# artists.append(ax.text(e, -y[i], c['phewas_string'][i], rotation=271, va='top',fontsize=8))
elif not show_imbalance:
artists.append(ax.text(e, y[i], c['phewas_string'][i], rotation=40, va='bottom'))
else:
e += 0
if show_imbalance:
if y[i] > thresh:
if imbalances[i] > 0:
ax.plot(e, y[i], '+', color=plot_colors[c[i:i + 1].category_string.values[0]], fillstyle='full',
markeredgewidth=1.5)
else:
# ax.plot(e,y[i],'o', color=plot_colors[c[i:i+1].category_string.values[0]], fillstyle='full', markeredgewidth=0.0)
ax.plot(e, y[i], '_', color=plot_colors[c[i:i + 1].category_string.values[0]], fillstyle='full',
markeredgewidth=1.5)
else:
ax.plot(e, y[i], 'o', color=plot_colors[c[i:i + 1].category_string.values[0]], fillstyle='full',
markeredgewidth=0.0)
line1 = []
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height*0.05, box.width, box.height*0.95])
for lab in plot_colors.keys():
line1.append(
mlines.Line2D(range(1), range(1), color="white", marker='o', markerfacecolor=plot_colors[lab], label=lab))
artists.append(ax.legend(handles=line1, bbox_to_anchor=(0.5, 0), loc='upper center', fancybox=True, ncol=4, prop={'size': 6}))
ax.axhline(y=0, color='black')
frame1.axes.get_xaxis().set_visible(False)
# If the imbalance is to be shown, draw lines to show the categories.
# if show_imbalance:
# for pos in linepos:
# ax.axvline(x=pos, color='black', ls='dotted')
# Determine the type of output desired (saved to a plot or displayed on the screen)
if save:
pdf = PdfPages(path + save)
pdf.savefig(bbox_extra_artists=artists, bbox_inches='tight')
pdf.close()
else:
ax.subplots_adjust(left=0.05, right=0.85)
ax.show()
# Clear the plot in case another plot is to be made.
plt.clf()
def plot_odds_ratio(y, p, thresh0, thresh1, thresh2, thresh_type, save='', path='', imbalances=np.array([])): # same
"""
Plots the data with a variety of different options.
This function is the primary plotting function for pyPhewas.
:param x: an array of indices
:param y: an array of p-values
:param thresh: the threshold power
:param save: the output file to save to (if empty, display the plot)
:param imbalances: a list of imbalances
:type x: numpy array
:type y: numpy array
:type thresh: float
:type save: str
:type imbalances: numpy array
"""
# Determine whether or not to show the imbalance.
fig = plt.figure()
ax = plt.subplot(111)
show_imbalance = imbalances.size != 0
# Sort the phewas codes by category.
c = codes.loc[phewas_codes['index']]
c = c.reset_index()
idx = c.sort_values(by='category').index
# Get the position of the lines and of the labels
# linepos = get_x_label_positions(c['category'].tolist(), False)
# x_label_positions = get_x_label_positions(c['category'].tolist(), True)
# x_labels = c.sort_values('category').category_string.drop_duplicates().tolist()
# Plot each of the points, if necessary, label the points.
e = 1
artists = []
frame1 = plt.gca()
# ax.xticks(x_label_positions, x_labels, rotation=70, fontsize=10)
plt.xlabel('Log odds ratio')
if thresh_type == 0:
thresh = thresh0
elif thresh_type == 1:
thresh = thresh1
else:
thresh = thresh2
# plt.xlim(xmin=min(y[p>thresh,1]), xmax=max(y[p>thresh,2]))
for i in idx:
if p[i] > thresh:
e += 15
if show_imbalance: # and imbalances[i]>0:
if imbalances[i] > 0:
artists.append(ax.text(y[i][0], e, c['phewas_string'][i], rotation=0, ha='left', fontsize=6))
else:
artists.append(ax.text(y[i][0], e, c['phewas_string'][i], rotation=0, ha='right', fontsize=6))
elif not show_imbalance:
artists.append(ax.text(e, y[i][0], c['phewas_string'][i], rotation=40, va='bottom'))
else:
e += 0
if show_imbalance:
if p[i] > thresh:
ax.plot(y[i][0], e, 'o', color=plot_colors[c[i:i + 1].category_string.values[0]], fillstyle='full',
markeredgewidth=0.0)
ax.plot([y[i, 1], y[i, 2]], [e, e], color=plot_colors[c[i:i + 1].category_string.values[0]])
# else:
# ax.plot(e,y[i],'o', color=plot_colors[c[i:i+1].category_string.values[0]], fillstyle='full', markeredgewidth=0.0)
# ax.plot(e,-y[i],'o', color=plot_colors[c[i:i+1].category_string.values[0]], fillstyle='full', markeredgewidth=0.0)
else:
ax.plot(e, y[i], 'o', color=plot_colors[c[i:i + 1].category_string.values[0]], fillstyle='full',
markeredgewidth=0.0)
line1 = []
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height*0.05, box.width, box.height*0.95])
for lab in plot_colors.keys():
line1.append(
mlines.Line2D(range(1), range(1), color="white", marker='o', markerfacecolor=plot_colors[lab], label=lab))
artists.append(ax.legend(handles=line1, bbox_to_anchor=(0.5, -0.15), loc='upper center', fancybox=True, ncol=4, prop={'size': 6}))
ax.axvline(x=0, color='black')
frame1.axes.get_yaxis().set_visible(False)
# If the imbalance is to be shown, draw lines to show the categories.
# if show_imbalance:
# for pos in linepos:
# ax.axvline(x=pos, color='black', ls='dotted')
# Determine the type of output desired (saved to a plot or displayed on the screen)
if save:
pdf = PdfPages(path + save)
pdf.savefig(bbox_extra_artists=artists, bbox_inches='tight')
pdf.close()
else:
ax.subplots_adjust(left=0.05, right=0.85)
ax.show()
# Clear the plot in case another plot is to be made.
plt.clf()
def calculate_odds_ratio(genotypes, phen_vector1, phen_vector2, reg_type, covariates, lr=0, response='',
phen_vector3=''): # diff - done
"""
Runs the regression for a specific phenotype vector relative to the genotype data and covariates.
:param genotypes: a DataFrame containing the genotype information
:param phen_vector: a array containing the phenotype vecto
:param covariates: a string containing all desired covariates
:type genotypes: pandas DataFrame
:type phen_vector: numpy array
:type covariates: string
.. note::
The covariates must be a string that is delimited by '+', not a list.
If you are using a list of covariates and would like to convert it to the pyPhewas format, use the following::
l = ['genotype', 'age'] # a list of your covariates
covariates = '+'.join(l) # pyPhewas format
The covariates that are listed here *must* be headers to your genotype CSV file.
"""
data = genotypes
data['y'] = phen_vector1
data['MaxAgeAtICD'] = phen_vector2
# data[data['SEX']>0]=0
if response:
f = response + '~ genotype + ' + covariates
if phen_vector3.any():
data['phe'] = phen_vector3
f = response + '~ genotype + phe +' + covariates
else:
f = 'genotype~y+' + covariates
if phen_vector3.any():
data['phe'] = phen_vector3
f = 'genotype ~ phe +' + covariates
try:
#if reg_type == 0:
if lr == 0:
logreg = smf.logit(f, data).fit(disp=False)
# logit = sm.Logit(data['genotype'], data[['y', 'MaxAgeAtVisit', 'sex']])
# lf = logit.fit_regularized(method='l1', alpha=0.9,disp=0,trim_mode='size',qc_verbose=0)
p = logreg.pvalues.y
# p = lf.pvalues.y
odds = 0 # logreg.deviance
conf = logreg.conf_int()
# conf = lf.conf_int()
od = [-math.log10(p), p, logreg.params.y, '[%s,%s]' % (conf[0]['y'], conf[1]['y'])]
# od = [-math.log10(p), p, lf.params.y, '[%s,%s]' % (conf[0]['y'], conf[1]['y'])]
# od=[np.nan,np.nan,np.nan]
#elif reg_type > 3:
elif lr == 1:
# linreg = smf.logit(f, data).fit(disp=False)
logit = sm.Logit(data['genotype'], data[['y', 'MaxAgeAtICD', 'sex']])
lf = logit.fit_regularized(method='l1', alpha=1, disp=0,trim_mode='size',qc_verbose=0)
# p = linreg.pvalues.y
p = lf.pvalues.y
odds = 0
# conf = linreg.conf_int()
conf = lf.conf_int()
# od = [-math.log10(p), p, linreg.params.y, '[%s,%s]' % (conf[0]['y'], conf[1]['y'])]
od = [-math.log10(p), p, lf.params.y, '[%s,%s]' % (conf[0]['y'], conf[1]['y'])]
else:
linreg = smf.logit(f, data).fit(method='bfgs', disp=False)
# logit = sm.Logit(data['genotype'], data[['y', 'MaxAgeAtVisit', 'sex']])
# lf = logit.fit_regularized(method='l1', alpha=0.7, disp=0,trim_mode='size',qc_verbose=0)
p = linreg.pvalues.y
# p = lf.pvalues.y
odds = 0
conf = linreg.conf_int()
# conf = lf.conf_int()
od = [-math.log10(p), p, linreg.params.y, '[%s,%s]' % (conf[0]['y'], conf[1]['y'])]
# od = [-math.log10(p), p, lf.params.y, '[%s,%s]' % (conf[0]['y'], conf[1]['y'])]
except:
odds = 0
p = np.nan
od = [np.nan, np.nan, np.nan, np.nan]
return (odds, p, od)
"""
Begin init code
"""
test = 1
codes = get_codes()
phewas_codes = pd.DataFrame(codes['phewas_code'].drop_duplicates());
phewas_codes = phewas_codes.reset_index()
icd_codes = pd.DataFrame(codes['icd9'].drop_duplicates());
icd_codes = icd_codes.reset_index()
output_columns = ['PheWAS Code',
'PheWAS Name',
'\"-log(p)\"',
'p-val',
'beta',
'Conf-interval beta',
'ICD-9']
plot_colors = {'-': 'gold',
'circulatory system': 'red',
'congenital anomalies': 'mediumspringgreen',
'dermatologic': 'seagreen',
'digestive': 'yellowgreen',
'endocrine/metabolic': 'darkred',
'genitourinary': 'darkkhaki',
'hematopoietic': 'orange',
'infectious diseases': 'blue',
'injuries & poisonings': 'slategray',
'mental disorders': 'fuchsia',
'musculoskeletal': 'darkgreen',
'neoplasms': 'teal',
'neurological': 'olive',
'pregnancy complications': 'peachpuff',
'respiratory': 'brown',
'sense organs': 'darkviolet',
'symptoms': 'aqua'}
imbalance_colors = {
0: 'white',
1: 'deepskyblue',
-1: 'red'
}
gen_ftype = 0
neglogp = np.vectorize(lambda x: -math.log10(x) if x != 0 else 0)
def phewas(path, filename, groupfile, covariates, response='', phewas_cov='', reg_type=0, thresh_type=0, control_age=0,
save='', saveb='', output='', show_imbalance=False): # same
"""
The main phewas method. Takes a path, filename, groupfile, and a variety of different options.
:param path: the path to the file that contains the phenotype data
:param filename: the name of the phenotype file.
:param groupfile: the name of the genotype file.
:param covariates: a list of covariates.
:param reg_type: the type of regression to be used
:param thresh_type: the type of threshold to be used
:param save: the desired filename to save the phewas plot
:param output: the desired filename to save the regression output
:param show_imbalance: determines whether or not to show the imbalance
:type path: st
:type filename: str
:type groupfile: str
:type covariates: str
:type reg_type: int
:type thresh_type: int
:type save: str
:type output: str
:type show_imbalance: bool
"""
start_time = time.time()
global codes, phewas_codes, icd_codes, gen_ftype, neglogp
print("reading in data")
gen_ftype = reg_type
phenotypes = get_input(path, filename,reg_type)
genotypes = get_group_file(path, groupfile)
fm = generate_feature_matrix(genotypes, phenotypes, reg_type, phewas_cov)
# print(len(fm))
if response:
results = run_phewas(fm, genotypes, covariates, reg_type, response=response, phewas_cov=phewas_cov)
else:
results = run_phewas(fm, genotypes, covariates, reg_type, phewas_cov=phewas_cov)
regressions = results[2]
normalized = neglogp(results[1])
# if thresh_type==0:
thresh0 = get_bon_thresh(normalized, 0.05)
# elif thresh_type==1:
thresh1 = get_fdr_thresh(results[1], 0.05)
thresh2 = get_bhy_thresh(results[1], 0.05)
imbalances = np.array([])
if show_imbalance:
imbalances = get_imbalances(regressions)
plot_data_points(results[0], normalized, -math.log10(thresh0), -math.log10(thresh1),-math.log10(thresh2), thresh_type, save, path,
imbalances)
try:
regressions[['lowlim', 'uplim']] = regressions['Conf-interval beta'].str.split(',', expand=True)
regressions.uplim = regressions.uplim.str.replace(']', '')
regressions.lowlim = regressions.lowlim.str.replace('[', '')
y = regressions[['beta', 'lowlim', 'uplim']].as_matrix()
y = y.astype(float)
plot_odds_ratio(y, normalized, -math.log10(thresh0), -math.log10(thresh1), -math.log10(thresh2), thresh_type,
saveb, path, imbalances)
except:
print('no corr')
sig_regressions = regressions.dropna(subset=['"-log(p)"']).sort_values('"-log(p)"', ascending=False)
if thresh_type == 0:
sig_regressions = sig_regressions[sig_regressions['"-log(p)"'] > -math.log10(thresh0)]
elif thresh_type==1:
sig_regressions = sig_regressions[sig_regressions['"-log(p)"'] > -math.log10(thresh1)]
else:
sig_regressions = sig_regressions[sig_regressions['"-log(p)"'] > -math.log10(thresh2)]
if output:
sig_regressions.to_csv(path + output, index=False)
return (results[0], results[1], regressions)
|
|
"""Solvers for the linear and quadratic programs in active subspaces."""
import numpy as np
import logging
from scipy.optimize import linprog, minimize
# checking to see if system has gurobi
try:
HAS_GUROBI = True
import gurobipy as gpy
except ImportError, e:
HAS_GUROBI = False
pass
# string constants for QP solver names
solver_SCIPY = 'SCIPY'
solver_GUROBI = 'GUROBI'
class QPSolver():
"""
A class for solving linear and quadratic programs.
:cvar str solver: Identifies which linear program software to use.
**Notes**
The class checks to see if Gurobi is present. If it is, it uses Gurobi to
solve the linear and quadratic programs. Otherwise, it uses scipy
implementations to solve the linear and quadratic programs.
"""
solver = None
def __init__(self, solver='GUROBI'):
"""
Initialize a QPSolver.
:param str solver: Identifies which linear program software to use.
Options are 'GUROBI' and 'SCIPY'.
"""
if solver==solver_GUROBI and HAS_GUROBI:
self.solver = solver_GUROBI
elif solver=='SCIPY':
self.solver = solver_SCIPY
else:
logging.getLogger(__name__).debug('QP solver {} is not available. Using scipy optimization package.'.format(solver))
self.solver = solver_SCIPY
def linear_program_eq(self, c, A, b, lb, ub):
"""
Solves an equality constrained linear program with variable bounds.
:param ndarray c: m-by-1 matrix for the linear objective function.
:param ndarray A: M-by-m matrix that contains the coefficients of the
linear equality constraints.
:param ndarray b: M-by-1 matrix that is the right hand side of the
equality constraints.
:param ndarray lb: m-by-1 matrix that contains the lower bounds on the
variables.
:param ndarray ub: m-by-1 matrix that contains the upper bounds on the
variables.
:return: x, m-by-1 matrix that is the minimizer of the linear program.
:rtype: ndarray
**Notes**
This method returns the minimizer of the following linear program.
minimize c^T x
subject to A x = b
lb <= x <= ub
"""
logging.getLogger(__name__).debug('Linear program with {:d} variables and {:d} equality constraints using {}'.format(A.shape[1], A.shape[0], self.solver))
if self.solver == solver_SCIPY:
return _scipy_linear_program_eq(c, A, b, lb, ub)
elif self.solver == solver_GUROBI:
return _gurobi_linear_program_eq(c, A, b, lb, ub)
else:
raise ValueError('QP solver {} not available'.format(self.solver))
def linear_program_ineq(self, c, A, b):
"""
Solves an inequality constrained linear program.
:param ndarray c: m-by-1 matrix for the linear objective function.
:param ndarray A: M-by-m matrix that contains the coefficients of the
linear equality constraints.
:param ndarray b: size M-by-1 matrix that is the right hand side of the
equality constraints.
:return: x, m-by-1 matrix that is the minimizer of the linear program.
:rtype: ndarray
**Notes**
This method returns the minimizer of the following linear program.
minimize c^T x
subject to A x >= b
"""
logging.getLogger(__name__).debug('Linear program with {:d} variables and {:d} inequality constraints using {}'.format(A.shape[1], A.shape[0], self.solver))
if self.solver == solver_SCIPY:
return _scipy_linear_program_ineq(c, A, b)
elif self.solver == solver_GUROBI:
return _gurobi_linear_program_ineq(c, A, b)
else:
raise ValueError('QP solver {} not available'.format(self.solver))
def quadratic_program_bnd(self, c, Q, lb, ub):
"""
Solves a quadratic program with variable bounds.
:param ndarray c: m-by-1 matrix that contains the coefficients of the
linear term in the objective function.
:param ndarray Q: m-by-m matrix that contains the coefficients of the
quadratic term in the objective function.
:param ndarray lb: m-by-1 matrix that contains the lower bounds on the
variables.
:param ndarray ub: m-by-1 matrix that contains the upper bounds on the
variables.
:return: x, m-by-1 matrix that is the minimizer of the quadratic program.
:rtype: ndarray
**Notes**
This method returns the minimizer of the following linear program.
minimize c^T x + x^T Q x
subject to lb <= x <= ub
"""
logging.getLogger(__name__).debug('Quadratic program with {:d} variables using {}'.format(Q.shape[0], self.solver))
if self.solver == solver_SCIPY:
return _scipy_quadratic_program_bnd(c, Q, lb, ub)
elif self.solver == solver_GUROBI:
return _gurobi_quadratic_program_bnd(c, Q, lb, ub)
else:
raise ValueError('QP solver {} not available'.format(self.solver))
def quadratic_program_ineq(self, c, Q, A, b):
"""
Solves an inequality constrained quadratic program with variable bounds.
:param ndarray c: m-by-1 matrix that contains the coefficients of the
linear term in the objective function.
:param ndarray Q: m-by-m matrix that contains the coefficients of the
quadratic term in the objective function.
:param ndarray A: M-by-m matrix that contains the coefficients of the
linear equality constraints.
:param ndarray b: M-by-1 matrix that is the right hand side of the
equality constraints.
:return: x, m-by-1 matrix that is the minimizer of the quadratic program.
:rtype: ndarray
**Notes**
This method returns the minimizer of the following linear program.
minimize c^T x + x^T Q x
subject to A x >= b
"""
logging.getLogger(__name__).debug('Quadratic program with {:d} variables and {:d} inequality constraints using {}'.format(A.shape[1], A.shape[0], self.solver))
if self.solver == solver_SCIPY:
return _scipy_quadratic_program_ineq(c, Q, A, b)
elif self.solver == solver_GUROBI:
return _gurobi_quadratic_program_ineq(c, Q, A, b)
else:
raise ValueError('QP solver {} not available'.format(self.solver))
def _scipy_linear_program_eq(c, A, b, lb, ub):
c = c.reshape((c.size,))
b = b.reshape((b.size,))
# make bounds
bounds = []
for i in range(lb.size):
bounds.append((lb[i,0], ub[i,0]))
res = linprog(c, A_eq=A, b_eq=b, bounds=bounds, options={"disp": False})
if res.success:
return res.x.reshape((c.size,1))
else:
np.savez('bad_scipy_lp_eq_{:010d}'.format(np.random.randint(int(1e9))),
c=c, A=A, b=b, lb=lb, ub=ub, res=res)
raise Exception('Scipy did not solve the LP. Blame Scipy.')
return None
def _scipy_linear_program_ineq(c, A, b):
c = c.reshape((c.size,))
b = b.reshape((b.size,))
# make unbounded bounds
bounds = []
for i in range(c.size):
bounds.append((None, None))
A_ub, b_ub = -A, -b
res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, options={"disp": False})
if res.success:
return res.x.reshape((c.size,1))
else:
np.savez('bad_scipy_lp_ineq_{:010d}'.format(np.random.randint(int(1e9))),
c=c, A=A, b=b, res=res)
raise Exception('Scipy did not solve the LP. Blame Scipy.')
return None
def _scipy_quadratic_program_bnd(c, Q, lb, ub):
# define the objective and gradient
def fun(x):
f = np.dot(x, c) + np.dot(x, np.dot(Q, x.T))
return f[0]
def jac(x):
j = c.T + 2.0*np.dot(x, Q)
return j[0]
# make bounds
bounds = []
for i in range(lb.size):
bounds.append((lb[i,0],ub[i,0]))
x0 = np.zeros((c.size,))
res = minimize(fun, x0, method='L-BFGS-B', jac=jac,
bounds=bounds, options={"disp": False})
if res.success:
xstar = res.x
if isinstance(xstar, float):
xstar = np.array([[xstar]])
return xstar.reshape((c.size,1))
else:
np.savez('bad_scipy_qp_bnd_{:010d}'.format(np.random.randint(int(1e9))),
c=c, Q=Q, lb=lb, ub=ub, res=res)
raise Exception('Scipy did not solve the LP. Blame Scipy.')
return None
def _scipy_quadratic_program_ineq(c, Q, A, b):
b = b.reshape((b.size,))
# define the objective and gradient
def fun(x):
f = np.dot(x, c) + np.dot(x, np.dot(Q, x.T))
return f[0]
def jac(x):
j = c.T + 2.0*np.dot(x, Q)
return j[0]
# inequality constraints
cons = ({'type':'ineq',
'fun' : lambda x: np.dot(A, x) - b,
'jac' : lambda x: A})
x0 = np.zeros((c.size,))
res = minimize(fun, x0, method='SLSQP', jac=jac,
constraints=cons, options={"disp": False})
if res.success:
xstar = res.x
if isinstance(xstar, float):
xstar = np.array([[xstar]])
return xstar.reshape((c.size,1))
else:
np.savez('bad_scipy_qp_ineq_{:010d}'.format(np.random.randint(int(1e9))),
c=c, Q=Q, A=A, b=b, res=res)
raise Exception('Scipy did not solve the LP. Blame Scipy.')
return None
def _gurobi_linear_program_eq(c, A, b, lb, ub):
m,n = A.shape
model = gpy.Model()
model.setParam('OutputFlag', 0)
# Add variables to model
vars = []
for j in range(n):
vars.append(model.addVar(lb=lb[j,0], ub=ub[j,0], vtype=gpy.GRB.CONTINUOUS))
model.update()
# Populate linear constraints
for i in range(m):
expr = gpy.LinExpr()
for j in range(n):
expr += A[i,j]*vars[j]
model.addConstr(expr, gpy.GRB.EQUAL, b[i,0])
# Populate objective
obj = gpy.LinExpr()
for j in range(n):
obj += c[j,0]*vars[j]
model.setObjective(obj)
model.update()
# Solve
model.optimize()
if model.status == gpy.GRB.OPTIMAL:
return np.array(model.getAttr('x', vars)).reshape((n,1))
else:
np.savez('bad_gurobi_lp_eq_{:010d}'.format(np.random.randint(int(1e9))),
c=c, A=A, b=b, lb=lb, ub=ub, model=model)
raise Exception('Gurobi did not solve the LP. Blame Gurobi.')
return None
def _gurobi_linear_program_ineq(c, A, b):
m,n = A.shape
model = gpy.Model()
model.setParam('OutputFlag', 0)
# Add variables to model
vars = []
for j in range(n):
vars.append(model.addVar(lb=-gpy.GRB.INFINITY,
ub=gpy.GRB.INFINITY, vtype=gpy.GRB.CONTINUOUS))
model.update()
# Populate linear constraints
for i in range(m):
expr = gpy.LinExpr()
for j in range(n):
expr += A[i,j]*vars[j]
model.addConstr(expr, gpy.GRB.GREATER_EQUAL, b[i,0])
# Populate objective
obj = gpy.LinExpr()
for j in range(n):
obj += c[j,0]*vars[j]
model.setObjective(obj)
model.update()
# Solve
model.optimize()
if model.status == gpy.GRB.OPTIMAL:
return np.array(model.getAttr('x', vars)).reshape((n,1))
else:
np.savez('bad_gurobi_lp_ineq_{:010d}'.format(np.random.randint(int(1e9))),
c=c, A=A, b=b, model=model)
raise Exception('Gurobi did not solve the LP. Blame Gurobi.')
return None
def _gurobi_quadratic_program_bnd(c, Q, lb, ub):
n = Q.shape[0]
model = gpy.Model()
model.setParam('OutputFlag', 0)
# Add variables to model
vars = []
for j in range(n):
vars.append(model.addVar(lb=lb[j,0], ub=ub[j,0], vtype=gpy.GRB.CONTINUOUS))
model.update()
# Populate objective
obj = gpy.QuadExpr()
for i in range(n):
for j in range(n):
obj += Q[i,j]*vars[i]*vars[j]
for j in range(n):
obj += c[j,0]*vars[j]
model.setObjective(obj)
model.update()
# Solve
model.optimize()
if model.status == gpy.GRB.OPTIMAL:
return np.array(model.getAttr('x', vars)).reshape((n,1))
else:
np.savez('bad_gurobi_qp_bnd_{:010d}'.format(np.random.randint(int(1e9))),
c=c, Q=Q, lb=lb, ub=ub, model=model)
raise Exception('Gurobi did not solve the QP. Blame Gurobi.')
return None
def _gurobi_quadratic_program_ineq(c, Q, A, b):
m,n = A.shape
model = gpy.Model()
model.setParam('OutputFlag', 0)
# Add variables to model
vars = []
for j in range(n):
vars.append(model.addVar(lb=-gpy.GRB.INFINITY,
ub=gpy.GRB.INFINITY, vtype=gpy.GRB.CONTINUOUS))
model.update()
# Populate linear constraints
for i in range(m):
expr = gpy.LinExpr()
for j in range(n):
expr += A[i,j]*vars[j]
model.addConstr(expr, gpy.GRB.GREATER_EQUAL, b[i,0])
# Populate objective
obj = gpy.QuadExpr()
for i in range(n):
for j in range(n):
obj += Q[i,j]*vars[i]*vars[j]
for j in range(n):
obj += c[j,0]*vars[j]
model.setObjective(obj)
model.update()
# Solve
model.optimize()
if model.status == gpy.GRB.OPTIMAL:
return np.array(model.getAttr('x', vars)).reshape((n,1))
else:
np.savez('bad_gurobi_qp_ineq_{:010d}'.format(np.random.randint(int(1e9))),
c=c, Q=Q, A=A, b=b, model=model)
raise Exception('Gurobi did not solve the QP. Blame Gurobi.')
return None
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import sys
import unittest
from iptest import IronPythonTestCase, is_cli, run_test
def ifilter(iterable):
def predicate(x):
return x % 3
for x in iterable:
if predicate(x):
yield x
def ifilterfalse(iterable):
def predicate(x):
return x % 3
for x in iterable:
if not predicate(x):
yield x
class GeneratorTest(IronPythonTestCase):
def test_simple_generators(self):
ll = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
x = ifilter(ll)
l = []
for i in x: l.append(i)
x = ifilterfalse(ll)
self.assertTrue(l == [1,2,4,5,7,8,10,11,13,14,16,17,19,20])
l = []
for i in x: l.append(i)
self.assertTrue(l == [3,6,9,12,15,18])
def test_generator_expressions(self):
self.assertEqual(sum(i+i for i in range(100) if i < 50), 2450)
self.assertEqual(list((i,j) for i in range(2) for j in range(3)), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)])
self.assertEqual(list((i,j) for i in range(2) for j in range(i+1)), [(0, 0), (1, 0), (1, 1)])
self.assertEqual([x for x, in [(1,)]], [1])
i = 10
self.assertEqual(sum(i+i for i in range(1000) if i < 50), 2450)
self.assertEqual(i, 10)
g = (i+i for i in range(10))
self.assertEqual(list(g), [0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
g = (i+i for i in range(3))
self.assertEqual(next(g), 0)
self.assertEqual(next(g), 2)
self.assertEqual(next(g), 4)
self.assertRaises(StopIteration, g.__next__)
self.assertRaises(StopIteration, g.__next__)
self.assertRaises(StopIteration, g.__next__)
self.assertEqual(list(g), [])
def f(n):
return (i+i for i in range(n) if i < 50)
self.assertEqual(sum(f(100)), 2450)
self.assertEqual(list(f(10)), [0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
self.assertEqual(sum(f(10)), 90)
def f(n):
return ((i,j) for i in range(n) for j in range(i))
self.assertEqual(list(f(3)), [(1, 0), (2, 0), (2, 1)])
def test_nested_generators(self):
def outergen():
def innergen():
yield i
for j in range(i):
yield j
for i in range(10):
yield (i, innergen())
for a,b in outergen():
self.assertEqual(a, next(b))
self.assertEqual(list(range(a)), list(b))
def f():
yield "Import inside generator"
self.assertEqual(next(f()), "Import inside generator")
def xgen():
try:
yield 1
except:
pass
else:
yield 2
self.assertEqual([ i for i in xgen()], [1,2])
def xgen2(x):
yield "first"
try:
yield "try"
if x > 3:
raise AssertionError("x > 10")
100 / x
yield "try 2"
except AssertionError:
yield "error"
yield "error 2"
except:
yield "exc"
yield "exc 2"
else:
yield "else"
yield "else 2"
yield "last"
def testxgen2(x, r):
self.assertEqual(list(xgen2(x)), r)
testxgen2(0, ['first', 'try', 'exc', 'exc 2', 'last'])
testxgen2(1, ['first', 'try', 'try 2', 'else', 'else 2', 'last'])
testxgen2(2, ['first', 'try', 'try 2', 'else', 'else 2', 'last'])
testxgen2(3, ['first', 'try', 'try 2', 'else', 'else 2', 'last'])
testxgen2(4, ['first', 'try', 'error', 'error 2', 'last'])
def xgen3():
yield "first"
try:
pass
finally:
yield "fin"
yield "fin 2"
yield "last"
self.assertEqual(list(xgen3()), ['first', 'fin', 'fin 2', 'last'])
self.assertEqual(type(xgen), type(xgen2))
self.assertEqual(type(ifilter), type(xgen3))
def test_more_nested_generators(self):
def f():
def g():
def xx():
return x
def yy():
return y
def zz():
return z
def ii():
return i
yield xx()
yield yy()
yield zz()
for i in [11, 12, 13]:
yield ii()
x = 1
y = 2
z = 3
return g()
self.assertEqual(list(f()), [1, 2, 3, 11, 12, 13])
def test_generator_finally(self):
def yield_in_finally_w_exception():
try:
1/0
finally:
yield 1
yield 2
yield 3
n = yield_in_finally_w_exception()
self.assertEqual(next(n), 1)
self.assertEqual(next(n), 2)
self.assertEqual(next(n), 3)
self.assertRaises(ZeroDivisionError, n.__next__)
def yield_in_finally_w_exception_2():
try:
1/0
finally:
yield 1
yield 2
raise AssertionError()
yield 3
n = yield_in_finally_w_exception_2()
self.assertEqual(next(n), 1)
self.assertEqual(next(n), 2)
self.assertRaises(AssertionError, n.__next__)
def test_generator_exp():
l = ((1,2),(3, 4))
return (x for x,y in l)
self.assertEqual(list(test_generator_exp()), [1, 3])
def test_generator_exceptions(self):
def nested_yield_1():
try:
yield 1
try:
yield 2
yield 3
except:
raise AssertionError()
else:
yield 4
yield 5
finally:
yield 6
yield 7
1/0
except:
yield 8
yield 9
try:
yield 10
yield 11
except:
raise AssertionError()
else:
yield 12
yield 13
finally:
yield 14
yield 15
yield 32
else:
raise AssertionError()
finally:
yield 30
try:
yield 23
yield 24
except:
raise AssertionError()
else:
yield 25
yield 26
finally:
yield 27
yield 28
yield 29
yield 33
self.assertEqual(list(nested_yield_1()), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32, 30, 23, 24, 25, 26, 27, 28, 29, 33])
def nested_yield_2():
try:
pass
except:
raise AssertionError()
else:
yield 1
try:
yield 2
yield 3
except:
raise AssertionError()
else:
yield 4
yield 5
finally:
yield 6
yield 7
finally:
yield 8
yield 9
yield 10
self.assertEqual(list(nested_yield_2()), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
def nested_yield_3():
yield 1
try:
yield 2
try:
yield 3
try:
yield 4
try:
yield 5
except:
pass
yield 6
except:
pass
yield 7
except:
pass
yield 8
except:
pass
yield 9
self.assertEqual(list(nested_yield_3()), [1, 2, 3, 4, 5, 6, 7, 8, 9])
def nested_yield_4():
yield 1
try:
1/0
except:
yield 2
try:
yield 3
try:
yield 4
except:
pass
except:
pass
else:
raise AssertionError()
yield 5
self.assertEqual(list(nested_yield_4()), [1, 2, 3, 4, 5])
def test_generator_arg_counts(self):
# Generator methods with varying amounts of local state
def lstate(size):
args = ''
for i in range(size-1):
args = args+('a%i, ' % i)
args = args+('a%i' % (size-1))
func = """
def fetest(%s):
ret = 0
for i in range(%i):
exec('a%%i = a%%i*a%%i' %% (i,i,i))
exec('ret = a%%i' %% i)
yield ret
""" % (args, size)
#print func
d = {'assertEqual':self.assertEqual}
exec(func, d, d)
args = list(range(size)) if is_cli else [0]*size
exec("assertEqual(list(fetest(%s)),%s)" % (str(args)[1:-1], str([x*x for x in args])), d, d)
lstate(1)
lstate(2)
lstate(4)
lstate(8)
lstate(16)
lstate(32)
lstate(64)
lstate(122)
#lstate(123) # CLR bug, can't handle 127 arguments in DynamicMethod
#lstate(124)
#lstate(125) # CLR bug, can't handle 127 arguments in DynamicMethod
lstate(128)
if is_cli:
# CPython doesn't support more than 255 arguments
lstate(256)
#
lstate(512)
def test_iterate_closed(self):
#
# Test that calling on a closed generator throws a StopIteration exception and does not
# do any further execution of the generator. (See codeplex workitem 1402)
#
#
# 1) Test exiting by normal return
l=[0, 0]
def f(l):
l[0] += 1 # side effect
yield 'a'
l[1] += 1 # side effect statement
g=f(l)
self.assertTrue(next(g) == 'a')
self.assertTrue(l == [1,0]) # should not have executed past yield
self.assertRaises(StopIteration, g.__next__)
self.assertTrue(l == [1,1]) # now should have executed
# Generator is now closed, future calls should just keep throwing
self.assertRaises(StopIteration, g.__next__)
self.assertTrue(l == [1,1]) # verify that we didn't execute any more statements
# 2) Now test with exiting via StopIteration exception
l = [0,0]
def f(l):
yield 'c'
l[0] += 1
raise StopIteration
l[1] += 1
g=f(l)
self.assertTrue(next(g) == 'c')
self.assertTrue(l == [0,0])
self.assertRaises(StopIteration, g.__next__)
self.assertTrue(l == [1,0])
# generator is now closed from unhandled exception. Future calls should throw StopIteration
self.assertRaises(StopIteration, g.__next__)
self.assertTrue(l == [1,0]) # verify that we didn't execute any more statements
# repeat enumeration in a comprehension.
# This tests that StopIteration is properly caught and gracefully terminates the generator.
l=[0,0]
self.assertEqual([x for x in f(l)], ['c'])
self.assertEqual(l,[1,0])
# 3) Now test with exiting via throwing an unhandled exception
class MyError(Exception):
pass
l=[0, 0]
def f(l):
l[0] += 1 # side effect
yield 'b'
l[1] += 1 # side effect statement
raise MyError
g=f(l)
self.assertTrue(next(g) == 'b')
self.assertTrue(l == [1,0])
self.assertRaises(MyError, g.__next__)
self.assertTrue(l == [1,1])
# generator is now closed from unhandled exception. Future calls should throw StopIteration
self.assertRaises(StopIteration, g.__next__)
self.assertTrue(l == [1,1]) # verify that we didn't execute any more statements
# repeat enumeration in a comprehension. Unlike case 2, this now fails since the exception
# is MyError instead of StopIteration
l=[0,0]
def g():
return [x for x in f(l)]
self.assertRaises(MyError, g)
self.assertEqual(l,[1,1])
def test_generator_empty_tuple(self):
def f():
yield ()
self.assertEqual(list(f()), [()])
def test_generator_reentrancy(self):
# Test that generator can't be called re-entrantly. This is explicitly called out in Pep 255.
# Any operation should throw a ValueError if called.
def f():
try:
i = next(me) # error: reentrant call! Should throw ValueError, which we can catch.
except ValueError:
yield 7
yield 10
# try again, should still throw
me.send(None)
self.assertTrue(False) # unreachable!
me = f()
self.assertEqual(next(me), 7)
# Generator should still be alive
self.assertEqual(next(me), 10)
self.assertRaises(ValueError, me.__next__)
# since the last call went unhandled, the generator is now closed.
self.assertRaises(StopIteration, me.__next__)
def test_generator_expr_in(self):
self.assertEqual('abc' in (x for x in ('abc', )), True)
def f(): yield 2
self.assertEqual(2 in f(), True)
def test_generator_attrs(self):
expectedAttributes = ['gi_running', 'send', '__next__', '__iter__', '__name__', 'close', 'throw', 'gi_frame', 'gi_code']
if not is_cli: expectedAttributes += ['__del__']
expectedAttributes.sort()
def f(): yield 2
got = set(dir(f())) - set(dir(object))
got = list(got)
got.sort()
self.assertEqual(got, expectedAttributes)
temp_gen = f()
self.assertEqual(f.__code__, temp_gen.gi_code)
def test_cp24031(self):
def f(*args):
return args
self.assertEqual(f(*(x for x in range(2))),
(0, 1))
class KNew(object):
pass
self.assertRaisesMessage(TypeError, "object() takes no parameters",
lambda: KNew(*(i for i in range(10))) != None)
def test_ipy3_gh260(self):
"""https://github.com/IronLanguages/ironpython3/issues/260"""
def gen():
yield 1
x = gen()
self.assertEqual(next(x), 1)
with self.assertRaises(StopIteration) as cm:
next(x)
self.assertIsNone(cm.exception.value)
run_test(__name__)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
analyze assortativity of the graphs in terms of sentiment
'''
from igraph import *
import networkx as nx
import os
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os
import matplotlib.cm as cm
from collections import defaultdict
import matplotlib
import pandas as pd
from scipy.stats.stats import pearsonr
import seaborn as sns
sns.set(color_codes=True, font_scale=2)
f_in_user_labels = "usr_num_CVs.tab"
##################
f_in_user_taxons = "user_taxons.tab"
f_in_user_concepts = "user_concepts.tab"
f_in_user_entities = "user_entities.tab"
#########################
#
f_in_user_sentiment = "user_sentiment.tab"
#
# mention graph
#########################
f_in_graph_weights = "mention_graph_weights.dat"
########################
IN_DIR = "../../../DATA/CAPITAL/"
os.chdir(IN_DIR)
def read_sem_capital(f_name, tname):
f = open(f_name, "r")
cap = defaultdict(int)
cnt = 0
for line in f:
if tname == 'sentiment':
(vid, vn, val) = line.split('\t')
val = float(val)
else:
(vid, val) = line.split('\t')
val = int(val)
cap[vid] = val
cnt += 1
return cap
# one time call
def save_edge_popularity_and_semantic_capital_diff():
sc = read_sem_capital('entities','entities')
G = Graph.Read_Ncol(f_in_graph_weights,names=True, directed=True, weights=True)
summary(G)
fo = open('sem_capital_edge_src_dest_INdeg.dat', 'w')
fow = open('sem_capital_edge_src_dest_weighted_INdeg.dat', 'w')
for e in G.es:
src_id = e.source
dest_id = e.target
src = G.vs[src_id]['name']
dest = G.vs[dest_id]['name']
sc_src = sc[src]
sc_dest = sc[dest]
deg_src = G.degree(src_id, mode=IN)
deg_dest = G.degree(dest_id, mode=IN)
w_deg_src = G.strength(src_id, mode=IN, weights='weight')
w_deg_dest = G.strength(dest_id, mode=IN, weights='weight')
fo.write(str(sc_src) + '\t' + str(sc_dest) + '\t'+ str(deg_src) + '\t' + str(deg_dest) + '\n')
fow.write(str(sc_src) + '\t' + str(sc_dest) + '\t'+ str(w_deg_src) + '\t' + str(w_deg_dest) + '\n')
def read_edge_popularity_and_semantic_capital_diff(f_in):
f = open(f_in, 'r')
res = []
for line in f:
(src_sc,dest_sc,src_IN,dest_IN) = line.split('\t')
res.append((float(src_sc)-float(dest_sc),(float(src_IN)-float(dest_IN))))
return res
def read_edge_semantic_capital_diff(f_in):
f = open(f_in, 'r')
res = []
for line in f:
(src_sc,dest_sc,src_IN,dest_IN) = line.split('\t')
res.append(float(src_sc)-float(dest_sc))
return res
# one time call
def save_edge_popularity_diff():
G = Graph.Read_Ncol(f_in_graph_weights,names=True, directed=True, weights=True)
summary(G)
fo = open('edge_src_dest_INdeg.dat', 'w')
fow = open('edge_src_dest_weighted_INdeg.dat', 'w')
for e in G.es:
src_id = e.source
dest_id = e.target
#src = G.vs[src_id]
#dest = G.vs[dest_id]
deg_src = G.degree(src_id, mode=IN)
deg_dest = G.degree(dest_id, mode=IN)
w_deg_src = G.strength(src_id, mode=IN, weights='weight')
w_deg_dest = G.strength(dest_id, mode=IN, weights='weight')
fo.write(str(deg_src) + '\t' + str(deg_dest) + '\n')
fow.write(str(w_deg_src) + '\t' + str(w_deg_dest) + '\n')
def read_edge_popularity_diff(f_in):
f = open(f_in, 'r')
res = []
for line in f:
(src,dest) = line.split('\t')
res.append(float(src)-float(dest))
return res
def plot_edge_popularity_diff_distr(x, tname):
x = np.array(x)
mu = np.mean(x)
sigma = np.std(x)
med = np.median(x)
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
num_bins = 150
# the histogram of the data
n, bins, patches = plt.hist(x, normed=1, bins=num_bins, histtype='step',color='darkorchid')
#plt.clf() # Get rid of this histogram since not the one we want.
#nx_frac = n/float(len(n)) # Each bin divided by total number of objects.
#width = bins[1] - bins[0] # Width of each bin.
#x = np.ravel(zip(bins[:-1], bins[:-1]+width))
#y = np.ravel(zip(nx_frac,nx_frac))
plt.title(lab)
plt.tight_layout()
#plt.scatter(x,y,color='darkorchid',label=lab)
plt.xlabel('' + tname)
plt.ylabel('p('+ tname +')')
plt.yscale('log')
#plt.xscale('log')
#xint = range(int(min(x)), int(math.ceil(max(x))+1))
#plt.xticks(xint)
plt.xlim(-1000,1000)
#plt.grid(True)
plt.savefig(tname + 'weighted.png')
def plot_edge_popularity_diff_distr_seaborn(x, tname):
x = np.array(x)
mu = np.mean(x)
sigma = np.std(x)
med = np.median(x)
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
xlabel = 'relative social status'
ylabel = 'kde'
fig7s = plt.gcf()
plt.rcParams['figure.figsize']=(8,6)
fig7s.set_size_inches((8,6))
plt.figure(figsize=(8, 6))
print max(x)
z = [xel if xel == 0 else np.log10(abs(xel))*np.sign(xel) for xel in x]
print max(z)
z = np.array(z)
with sns.axes_style("whitegrid"):
"""
g = sns.distplot(z, hist=0, \
#hist_kws={"histtype": "step", "linewidth": 1, "alpha": 0.3, "color": "g"}, \
color="r")
"""
g = sns.kdeplot(z, kernel='cos', c='r')
plt.title(lab)
labels = [r'$ -10^{4} $', r'$ -10^{3} $', r'$ -10^{2} $', \
r'$ -10^{1} $', r'$ 0 $', r'$ 10^1 $', r'$ 10^2 $', \
r'$ 10^3 $', r'$ 10^4 $']
g.set(xticklabels=labels)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
plt.savefig(tname + 'weighted7log.eps', bbox_inches='tight', dpi=550)
def plot_edge_semantic_capital_diff_distr(x, tname):
x = np.array(x)
mu = np.mean(x)
sigma = np.std(x)
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
num_bins = 60
# the histogram of the data
n, bins, patches = plt.hist(x, normed=1, bins=num_bins, histtype='step',color='darkorchid')
plt.title(lab)
plt.tight_layout()
#plt.scatter(x,y,color='darkorchid',label=lab)
plt.xlabel('' + tname)
plt.ylabel('p('+ tname +')')
plt.yscale('log')
#plt.xscale('log')
#xint = range(int(min(x)), int(math.ceil(max(x))+1))
#plt.xticks(xint)
#plt.ylim(0.000001,0.01)
#plt.grid(True)
plt.savefig(tname + 'weighted.png')
def plot_edge_semantic_capital_diff_distr_seaborn(x, tname):
x = np.array(x)
mu = np.mean(x)
sigma = np.std(x)
fig7s = plt.gcf()
plt.rcParams['figure.figsize']=(8,6)
fig7s.set_size_inches((8,6))
plt.figure(figsize=(8, 6))
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
xlabel = 'relative semantic status'
ylabel = 'kde'
print max(x)
z = [xel if xel == 0 else np.log10(abs(xel))*np.sign(xel) for xel in x]
print max(z)
z = np.array(x)
with sns.axes_style("whitegrid"):
"""
g = sns.distplot(z, hist=0, \
#hist_kws={"histtype": "step", "linewidth": 1, "alpha": 0.3, "color": "g"}, \
color="r")
"""
g = sns.kdeplot(z, kernel='cos', c='r')
plt.title(lab)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
plt.savefig(tname + 'weighted77.eps', bbox_inches='tight', dpi=550)
def plot_edge_popularity_distr_2(x, tname):
x = np.array(x)
mu = np.mean(x)
sigma = np.std(x)
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
num_bins = 1500
# the histogram of the data
n, bins, patches = plt.hist(x, normed=1, bins=num_bins, histtype='step',color='darkorchid')
plt.clf() # Get rid of this histogram since not the one we want.
nx_frac = n/float(len(n)) # Each bin divided by total number of objects.
width = bins[1] - bins[0] # Width of each bin.
x = np.ravel(zip(bins[:-1], bins[:-1]+width))
y = np.ravel(zip(nx_frac,nx_frac))
plt.title(lab)
plt.scatter(x,y,color='darkorchid',label=lab)
plt.xlabel('' + tname)
plt.ylabel('p('+ tname +')')
plt.yscale('log')
#plt.xscale('log')
#xint = range(int(min(x)), int(math.ceil(max(x))+1))
#plt.xticks(xint)
plt.ylim(0.0000000001,0.01)
#plt.grid(True)
plt.savefig(tname + 'weighted_v2.png')
def plot_edge_sc_vs_pop_diff(diff, tname):
x = []
y = []
for el in diff:
x.append(el[0])
y.append(el[1])
lab = ''
plt.scatter(x,y,color='darkorchid',label=lab)
plt.xlabel('semantic capital status diff')
plt.ylabel('popularity status diff')
#plt.yscale('log')
#plt.xscale('log')
#xint = range(int(min(x)), int(math.ceil(max(x))+1))
#plt.xticks(xint)
plt.ylim(-1000,1000)
#plt.grid(True)
plt.savefig(tname + '.png')
def plot_edge_sc_vs_pop_diff_2(diff, tname):
coef_soc = 100
soc = []
sem = []
d = defaultdict(int)
for el in diff:
# sc
s1 = el[0]
sem.append(s1)
# pop
s2 = int(el[1]/coef_soc)
soc.append(s2)
if s1 in d:
d[s1][s2] += 1
else:
d[s1] = defaultdict(int)
d[s1][s2] += 1
soc=np.array(soc)
sem=np.array(sem)
print np.corrcoef(soc,sem)
print pearsonr(soc, sem)
x = []
y = []
v = []
for i in d:
for j in d[i]:
if d[i][j] > 0:
x.append(j*coef_soc)
y.append(i)
v.append(d[i][j])
plt.clf()
plt.scatter(x,y,s=v, c='darkorchid', edgecolors='none',alpha=0.4)
plt.ylabel('sem cap diff (source - receiver)')
plt.xlabel('pop status diff (source - receiver)')
plt.tight_layout()
plt.xlim(-1000,1000)
#plt.grid(True)
plt.savefig(tname + 'pretty.png')
def plot_edge_sc_vs_pop_diff_2_seaborn(diff, tname):
coef_soc = 100
soc = []
sem = []
d = defaultdict(int)
for el in diff:
# sc
s1 = el[0]
sem.append(s1)
# pop
s2 = int(el[1]/coef_soc)
soc.append(el[1])
if s1 in d:
d[s1][s2] += 1
else:
d[s1] = defaultdict(int)
d[s1][s2] += 1
x=np.array(soc)
y=np.array(sem)
ylabel = 'relative semantic status'
xlabel = 'relative social status'
print len(x)
print len(y)
print max(x)
z = [xel if xel == 0 else np.log10(abs(xel))*np.sign(xel) for xel in x]
print max(z)
z = np.array(z)
labels = [r'$ 10^{-4} $', r'$ 10^{-3} $', r'$ 10^{-2} $', \
r'$ 10^{-1} $', r'$ 10^0 $', r'$ 10^1 $', r'$ 10^2 $', \
r'$ 10^3 $', r'$ 10^4 $']
with sns.axes_style("white"):
g = sns.jointplot(x=z, y=y, kind="scatter", color="darkorchid").set_axis_labels(xlabel, ylabel)
#g.set(xticklabels=labels)
g.ax_joint.set_xticklabels(labels)
#plt.tight_layout()
plt.savefig(tname + 'scatter.eps', bbox_inches='tight')
def find_BI_diff():
bi = pd.read_csv('BI_indexR_full.txt',\
encoding='utf-8', delim_whitespace=1)
print max(bi['bi']), min(bi['bi'])
bidict = bi.set_index('id')['bi'].to_dict()
cnt = 0
for el in bidict:
if bidict[el] > 1:
bidict[el] = 1
cnt += 1
res = []
f = open(f_in_graph_weights, 'r')
for line in f:
(n1, n2, w) = line.split()
res.append(bidict[int(n1)] - bidict[int(n2)])
print res
return res
def plot_edge_BI_diff_distr_seaborn(x, tname):
x = np.array(x)
mu = np.mean(x)
sigma = np.std(x)
med = np.median(x)
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
xlabel = 'relative status: Burt\'s index'
ylabel = 'kde'
fig7s = plt.gcf()
plt.rcParams['figure.figsize']=(8,6)
fig7s.set_size_inches((8,6))
plt.figure(figsize=(8, 6))
print max(x)
#z = [xel if xel == 0 else np.log10(abs(xel))*np.sign(xel) for xel in x]
#print max(z)
#z = np.array(z)
z = x
with sns.axes_style("whitegrid"):
"""
g = sns.distplot(z, hist=0, \
#hist_kws={"histtype": "step", "linewidth": 1, "alpha": 0.3, "color": "g"}, \
color="r")
"""
g = sns.kdeplot(z, c='r')
plt.title(lab)
#labels = [r'$ -10^{4} $', r'$ -10^{3} $', r'$ -10^{2} $', \
#r'$ -10^{1} $', r'$ 0 $', r'$ 10^1 $', r'$ 10^2 $', \
#r'$ 10^3 $', r'$ 10^4 $']
#g.set(xticklabels=labels)
plt.xlim(-1.1,1.1)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
plt.savefig(tname + 'BI7log.eps', bbox_inches='tight', dpi=550)
#pop_diff = read_edge_popularity_diff('edge_src_dest_weighted_INdeg.dat')
#plot_edge_popularity_diff_distr_seaborn(pop_diff, 'popularity diff (source - receiver)')
#sc_diff = read_edge_semantic_capital_diff('sem_capital_edge_src_dest_weighted_INdeg.dat')
#plot_edge_semantic_capital_diff_distr_seaborn(sc_diff,'sem cap diff (source - receiver)')
#pop_sc_diff = read_edge_popularity_and_semantic_capital_diff('sem_capital_edge_src_dest_weighted_INdeg.dat')
#plot_edge_sc_vs_pop_diff_2(pop_sc_diff, 'pop vs. sem cap status diff')
#pop_sc_diff = read_edge_popularity_and_semantic_capital_diff('sem_capital_edge_src_dest_weighted_INdeg.dat')
#plot_edge_sc_vs_pop_diff_2_seaborn(pop_sc_diff, 'pop vs. sem cap status diff')
x = find_BI_diff()
plot_edge_BI_diff_distr_seaborn(x, 'BI')
|
|
"""Google Sheets for translating pod content."""
import datetime
import random
from babel.messages import catalog
from babel.messages import pofile
from googleapiclient import errors
from grow.preprocessors import google_drive
from grow.translators import errors as translator_errors
try:
import cStringIO as StringIO
except ImportError: # pragma: no cover
try:
import StringIO
except ImportError:
from io import StringIO
from . import base
class AccessLevel(object):
COMMENTER = 'commenter'
OWNER = 'owner'
READER = 'reader'
WRITER = 'writer'
DEFAULT_ACCESS_LEVEL = AccessLevel.WRITER
OAUTH_SCOPE = 'https://www.googleapis.com/auth/spreadsheets'
class GoogleSheetsTranslator(base.Translator):
COLOR_DEEP_PURPLE_50 = {
'red': 0.929,
'green': 0.905,
'blue': 0.964,
}
COLOR_GREY_200 = {
'red': .933,
'blue': .933,
'green': .933,
}
COLOR_GREY_500 = {
'red': .6196,
'blue': .6196,
'green': .6196,
}
KIND = 'google_sheets'
HEADER_ROW_COUNT = 1
# Source locale, translation locale, message location.
HEADER_LABELS = [None, None, 'Extracted comments', 'Reference']
SHEETS_BASE_URL = 'https://docs.google.com/spreadsheets/d/'
has_immutable_translation_resources = False
has_multiple_langs_in_one_resource = True
def __init__(self, pod, config=None, project_title=None,
instructions=None, inject=False):
super(GoogleSheetsTranslator, self).__init__(
pod, config=config, project_title=project_title,
instructions=instructions, inject=inject)
self.update_meta_after_upload = False
def needs_meta_update(self):
"""Allow to be flagged for additional meta update after uploading."""
return self.update_meta_after_upload
def _catalog_has_comments(self, catalog):
for message in catalog:
if not message.id:
continue
if message.auto_comments:
return True
return False
def _content_hash(self, location, locale):
return hash((location, locale)) % (10 ** 8) # 10 Digits of the hash.
def _create_service(self): # pragma: no cover
return google_drive.BaseGooglePreprocessor.create_service(
'sheets', 'v4')
def _download_sheet(self, spreadsheet_id, locale):
service = self._create_service()
rangeName = "'{}'!A:D".format(locale)
try:
# pylint: disable=no-member
resp = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id, range=rangeName).execute()
except errors.HttpError as e:
if e.resp['status'] == '400':
raise translator_errors.NotFoundError(
'Translation for {} not found.'.format(locale))
raise
# Check for spreadsheets that are missing columns.
column_count = len(self.HEADER_LABELS)
if len(resp['values'][0]) < column_count:
missing_columns = [None] * (column_count - len(resp['values'][0]))
resp['values'][:] = [i + missing_columns for i in resp['values']]
return resp['values']
def _download_content(self, stat):
spreadsheet_id = stat.ident
values = self._download_sheet(spreadsheet_id, stat.lang)
values.pop(0)
babel_catalog = catalog.Catalog(stat.lang)
for row in values:
# Skip empty rows.
if not row or self._is_empty_row(row):
continue
source = row[0]
translation = row[1] if len(row) > 1 else None
babel_catalog.add(source, translation, auto_comments=[],
context=None, flags=[])
updated_stat = base.TranslatorStat(
url=stat.url,
lang=stat.lang,
downloaded=datetime.datetime.now(),
source_lang=stat.source_lang,
ident=stat.ident)
fp = StringIO.StringIO()
pofile.write_po(fp, babel_catalog)
fp.seek(0)
content = fp.read()
return updated_stat, content
def _is_empty_row(self, row):
return bool(set(row) - set((None, ''))) is False
def _upload_catalogs(self, catalogs, source_lang, prune=False):
if not catalogs:
raise translator_errors.NoCatalogsError('Missing catalogs to upload.')
project_title = self.project_title
source_lang = str(source_lang)
locales_to_sheet_ids = {}
# Get existing sheet ID (if it exists) from one stat.
spreadsheet_id = None
stats_to_download = self._get_stats_to_download([])
if stats_to_download:
stat = stats_to_download.values()[0]
spreadsheet_id = stat.ident if stat else None
# NOTE: Manging locales across multiple spreadsheets is unsupported.
service = self._create_service()
if spreadsheet_id:
# pylint: disable=no-member
resp = service.spreadsheets().get(
spreadsheetId=spreadsheet_id).execute()
for sheet in resp['sheets']:
locales_to_sheet_ids[sheet['properties']['title']] = \
sheet['properties']['sheetId']
catalogs_to_create = []
sheet_ids_to_catalogs = {}
for catalog in catalogs:
existing_sheet_id = locales_to_sheet_ids.get(
str(catalog.locale))
if existing_sheet_id:
sheet_ids_to_catalogs[existing_sheet_id] = catalog
elif source_lang != str(catalog.locale):
catalogs_to_create.append(catalog)
requests = []
if catalogs_to_create:
requests += self._generate_create_sheets_requests(
catalogs_to_create, source_lang)
if sheet_ids_to_catalogs:
requests += self._generate_update_sheets_requests(
sheet_ids_to_catalogs, source_lang, spreadsheet_id,
prune=prune)
self._perform_batch_update(spreadsheet_id, requests)
else:
# Create a new spreadsheet and use the id.
service = self._create_service()
# pylint: disable=no-member
resp = service.spreadsheets().create(body={
'properties': {
'title': project_title,
},
}).execute()
spreadsheet_id = resp['spreadsheetId']
requests = []
requests += self._generate_create_sheets_requests(
catalogs, source_lang)
self._perform_batch_update(spreadsheet_id, requests)
if 'acl' in self.config:
self._do_update_acl(spreadsheet_id, self.config['acl'])
stats = []
for catalog in catalogs:
if str(catalog.locale) == source_lang:
continue
url = '{}{}'.format(self.SHEETS_BASE_URL, spreadsheet_id)
lang = str(catalog.locale)
if lang in locales_to_sheet_ids:
url += '#gid={}'.format(locales_to_sheet_ids[lang])
stat = base.TranslatorStat(
url=url,
lang=lang,
source_lang=source_lang,
uploaded=datetime.datetime.now(),
ident=spreadsheet_id)
stats.append(stat)
return stats
def _create_header_row_data(self, source_lang, lang):
return {
'values': [
{'userEnteredValue': {'stringValue': source_lang}},
{'userEnteredValue': {'stringValue': lang}},
{'userEnteredValue': {'stringValue': self.HEADER_LABELS[2]}},
{'userEnteredValue': {'stringValue': self.HEADER_LABELS[3]}},
]
}
def _create_catalog_row(self, id, value, comments, locations):
comments = [] if comments is None else comments
return {
'values': [
{'userEnteredValue': {'stringValue': id}},
{'userEnteredValue': {'stringValue': value}},
{'userEnteredValue': {'stringValue': '\n'.join(comments)}},
{'userEnteredValue': {
'stringValue': ', '.join(t[0] for t in locations)}},
],
}
def _create_catalog_rows(self, catalog, prune=False):
rows = []
for message in catalog:
if not message.id:
continue
if prune and not message.locations:
continue
rows.append(self._create_catalog_row(
message.id, message.string, message.auto_comments, message.locations))
return rows
def _diff_data(self, existing_values, catalog):
existing_rows = []
new_rows = []
removed_rows = []
for value in existing_values:
if not value or self._is_empty_row(value): # Skip empty rows.
continue
existing_rows.append({
'source': value[0],
'translation': value[1] if len(value) > 1 else None,
'comments': value[2] if len(value) > 2 else [],
'locations': value[3] if len(value) > 3 else [],
'updated': False, # Has changed from the downloaded value.
'matched': False, # Has been matched to the downloaded values.
})
for message in catalog:
if not message.id:
continue
found = False
# Update for any existing values.
for value in existing_rows:
if value['source'] == message.id:
value['updated'] = (
value['translation'] != message.string or
value['locations'] != message.locations)
value['translation'] = message.string
value['locations'] = message.locations
value['comments'] = message.auto_comments
value['matched'] = True
found = True
break
if found == True:
continue
new_rows.append({
'source': message.id,
'translation': message.string,
'locations': message.locations,
'comments': message.auto_comments,
})
for index, value in enumerate(existing_rows):
if not value['matched'] or len(value['locations']) == 0:
removed_rows.append(index)
# Reset the locations when not found in catalog.
if not value['matched']:
value['locations'] = []
return (existing_rows, new_rows, removed_rows)
def _generate_comments_column_requests(self, sheet_id, catalog):
requests = []
sheet_range = {
'sheetId': sheet_id,
'dimension': 'COLUMNS',
'startIndex': 2,
'endIndex': 3,
}
if self._catalog_has_comments(catalog):
requests.append({
'updateDimensionProperties': {
'range': sheet_range,
'properties': {
'hiddenByUser': False,
},
'fields': 'hiddenByUser',
},
})
else:
requests.append({
'updateDimensionProperties': {
'range': sheet_range,
'properties': {
'hiddenByUser': True,
},
'fields': 'hiddenByUser',
},
})
return requests
def _generate_create_sheets_requests(self, catalogs, source_lang):
# Create sheets.
requests = []
for catalog in catalogs:
sheet_id = self._generate_new_sheet_id()
lang = str(catalog.locale)
# Create a new sheet.
requests.append({
'addSheet': {
'properties': {
'sheetId': sheet_id,
'title': lang,
'gridProperties': {
'columnCount': 4,
'rowCount': self.HEADER_ROW_COUNT + 1,
'frozenRowCount': 1,
'frozenColumnCount': 1,
},
},
},
})
# Add the data to the new sheet.
_, new_rows, _ = self._diff_data([], catalog)
row_data = []
row_data.append(self._create_header_row_data(source_lang, lang))
if len(new_rows):
for value in new_rows:
row_data.append(self._create_catalog_row(
value['source'], value['translation'],
value['comments'], value['locations']))
requests.append({
'appendCells': {
'sheetId': sheet_id,
'fields': 'userEnteredValue',
'rows': row_data,
},
})
# Format the new sheet.
requests += self._generate_style_requests(
sheet_id, catalog=catalog)
# Size the initial sheet columns.
# Not part of the style request to respect user's choice to resize.
requests.append({
'updateDimensionProperties': {
'range': {
'sheetId': sheet_id,
'dimension': 'COLUMNS',
'startIndex': 0,
'endIndex': 3,
},
'properties': {
'pixelSize': 400, # Source and Translation Columns
},
'fields': 'pixelSize',
},
})
requests.append({
'updateDimensionProperties': {
'range': {
'sheetId': sheet_id,
'dimension': 'COLUMNS',
'startIndex': 3,
'endIndex': 4,
},
'properties': {
'pixelSize': 200, # Location Column
},
'fields': 'pixelSize',
},
})
requests += self._generate_comments_column_requests(
sheet_id, catalog)
return requests
def _generate_filter_view_requests(self, sheet_id, sheet, filter_view):
requests = []
is_filtered = False
if sheet and 'filterViews' in sheet:
for existing_range in sheet['filterViews']:
if existing_range['filterViewId'] == filter_view['filterViewId']:
is_filtered = True
requests.append({
'updateFilterView': {
'filter': filter_view,
'fields': 'range,title,criteria',
},
})
break
if not is_filtered:
requests.append({
'addFilterView': {
'filter': filter_view,
},
})
return requests
def _generate_filter_view_resource_requests(self, sheet_id, sheet, catalog=None):
requests = []
if not catalog:
return requests
lang = str(catalog.locale)
location_to_filter_id = {}
filter_ids = set()
# Find all the unique resource locations.
for message in catalog:
if not message.id:
continue
for raw_location in message.locations:
location = raw_location[0]
if not location in location_to_filter_id:
# Try to match up with the document hash value.
location_to_filter_id[
location] = self._content_hash(location, lang)
for location, filter_id in location_to_filter_id.iteritems():
requests += self._generate_filter_view_requests(sheet_id, sheet, {
'filterViewId': filter_id,
'range': {
'sheetId': sheet_id,
'startColumnIndex': 0,
'endColumnIndex': 4,
'startRowIndex': 0,
},
'title': location,
'criteria': {
'3': {
'condition': {
'type': 'TEXT_CONTAINS',
'values': [
{'userEnteredValue': location},
],
},
},
},
})
return requests
def _generate_new_sheet_id(self):
return random.randrange(100, 9999999)
def _generate_style_requests(self, sheet_id, sheet=None, catalog=None):
formats = {}
formats['header_cell'] = {
'backgroundColor': self.COLOR_GREY_200,
'textFormat': {
'bold': True
},
}
formats['info_cell'] = {
'wrapStrategy': 'WRAP',
'textFormat': {
'foregroundColor': self.COLOR_GREY_500,
},
}
formats['missing_cell'] = {
'backgroundColor': self.COLOR_DEEP_PURPLE_50,
}
formats['wrap'] = {'wrapStrategy': 'WRAP'}
requests = []
# TODO Figure out how to be smarter about matching conditional formatting.
# Remove all existing conditional formatting. :(
if sheet and 'conditionalFormats' in sheet:
for _ in sheet['conditionalFormats']:
requests.append({
'deleteConditionalFormatRule': {
'sheetId': sheet_id,
'index': 0
}
})
# Style header cells.
requests.append({
'repeatCell': {
'fields': 'userEnteredFormat',
'range': {
'sheetId': sheet_id,
'startColumnIndex': 0,
'startRowIndex': 0,
'endRowIndex': self.HEADER_ROW_COUNT,
},
'cell': {
'userEnteredFormat': formats['header_cell'],
},
},
})
# Allow the translations and comments to wrap.
requests.append({
'repeatCell': {
'fields': 'userEnteredFormat',
'range': {
'sheetId': sheet_id,
'startColumnIndex': 0,
'endColumnIndex': 3,
'startRowIndex': self.HEADER_ROW_COUNT,
},
'cell': {
'userEnteredFormat': formats['wrap'],
},
},
})
# Comment and source cells are muted in styling.
requests.append({
'repeatCell': {
'fields': 'userEnteredFormat',
'range': {
'sheetId': sheet_id,
'startColumnIndex': 2,
'endColumnIndex': 4,
'startRowIndex': self.HEADER_ROW_COUNT,
},
'cell': {
'userEnteredFormat': formats['info_cell'],
},
},
})
# Highlight missing translations.
requests.append({
'addConditionalFormatRule': {
'rule': {
'ranges': [{
'sheetId': sheet_id,
'startColumnIndex': 1,
'endColumnIndex': 2,
'startRowIndex': self.HEADER_ROW_COUNT,
}],
'booleanRule': {
'condition': {'type': 'BLANK'},
'format': formats['missing_cell']
}
},
'index': 0
}
})
# Protect the original values.
requests += self._generate_style_protected_requests(sheet_id, sheet, {
'protectedRangeId': sheet_id + 1000001, # Keep it predictble.
'range': {
'sheetId': sheet_id,
'startColumnIndex': 0,
'endColumnIndex': 1,
'startRowIndex': self.HEADER_ROW_COUNT,
},
'description': 'Original strings can only be edited in the source files.',
'warningOnly': True,
})
# Protect the comment values.
requests += self._generate_style_protected_requests(sheet_id, sheet, {
'protectedRangeId': sheet_id + 1000002, # Keep it predictble.
'range': {
'sheetId': sheet_id,
'startColumnIndex': 2,
'endColumnIndex': 3,
'startRowIndex': self.HEADER_ROW_COUNT,
},
'description': 'Comment strings can only be edited in the source files.',
'warningOnly': True,
})
# Protect the location values.
requests += self._generate_style_protected_requests(sheet_id, sheet, {
'protectedRangeId': sheet_id + 1000003, # Keep it predictble.
'range': {
'sheetId': sheet_id,
'startColumnIndex': 3,
'endColumnIndex': 4,
'startRowIndex': self.HEADER_ROW_COUNT,
},
'description': 'Source strings can only be edited in the source files.',
'warningOnly': True,
})
# Filter view for untranslated strings.
requests += self._generate_filter_view_requests(sheet_id, sheet, {
'filterViewId': sheet_id + 3300001, # Keep it predictble.
'range': {
'sheetId': sheet_id,
'startColumnIndex': 0,
'endColumnIndex': 4,
'startRowIndex': 0,
},
'title': 'Untranslated Strings',
'criteria': {
'1': {
'condition': {'type': 'BLANK'}
},
},
})
# Filter view for each content path.
requests += self._generate_filter_view_resource_requests(
sheet_id, sheet, catalog)
return requests
def _generate_style_protected_requests(self, sheet_id, sheet, protected_range):
requests = []
is_protected = False
if sheet and 'protectedRanges' in sheet:
for existing_range in sheet['protectedRanges']:
if existing_range['protectedRangeId'] == protected_range['protectedRangeId']:
is_protected = True
requests.append({
'updateProtectedRange': {
'protectedRange': protected_range,
'fields': 'range,description,warningOnly',
},
})
break
if not is_protected:
requests.append({
'addProtectedRange': {
'protectedRange': protected_range,
},
})
return requests
def _generate_update_sheets_requests(self, sheet_ids_to_catalogs,
source_lang, spreadsheet_id, prune=False):
requests = []
for sheet_id, catalog in sheet_ids_to_catalogs.iteritems():
lang = str(catalog.locale)
existing_values = self._download_sheet(spreadsheet_id, lang)
for x in range(self.HEADER_ROW_COUNT):
existing_values.pop(0) # Remove header rows.
for value in existing_values:
if not value or self._is_empty_row(value): # Skip empty rows.
continue
if value not in catalog:
source = value[0]
translation = value[1] if len(value) > 1 else None
catalog.add(source, translation, auto_comments=[],
context=None, flags=[])
# Check for missing columns.
num_missing_columns = 0
for column in existing_values[0]:
if column == None:
num_missing_columns += 1
if num_missing_columns:
requests.append({
'appendDimension': {
'sheetId': sheet_id,
'dimension': 'COLUMNS',
'length': num_missing_columns,
},
})
# Update the column headers.
requests.append({
'updateCells': {
'fields': 'userEnteredValue',
'start': {
'sheetId': sheet_id,
'rowIndex': 0,
'columnIndex': 0,
},
'rows': [
self._create_header_row_data(source_lang, lang)
],
},
})
# Perform a diff of the existing data to what the catalog provides
# to make targeted changes to the spreadsheet and preserve meta
# information--such as comments.
existing_rows, new_rows, removed_rows = self._diff_data(
existing_values, catalog)
# Update the existing values in place.
if len(existing_rows):
row_data = []
for value in existing_rows:
if not value: # Skip empty rows.
continue
row_data.append(self._create_catalog_row(
value['source'], value['translation'],
value['comments'], value['locations']))
# NOTE This is not (yet) smart enough to only update small sections
# with the updated information. Hint: Use value['updated'].
requests.append({
'updateCells': {
'fields': 'userEnteredValue',
'start': {
'sheetId': sheet_id,
# Skip header row.
'rowIndex': self.HEADER_ROW_COUNT,
'columnIndex': 0,
},
'rows': row_data,
},
})
# Append new values to end of sheet.
if len(new_rows):
# Mark to update the sheet metadata after done.
self.update_meta_after_upload = True
row_data = []
for value in new_rows:
row_data.append(self._create_catalog_row(
value['source'], value['translation'],
value['comments'], value['locations']))
requests.append({
'appendCells': {
'sheetId': sheet_id,
'fields': 'userEnteredValue',
'rows': row_data,
},
})
# Remove obsolete rows if not included.
if prune and len(removed_rows):
for value in reversed(removed_rows): # Start from the bottom.
# NOTE this is ineffecient since it does not combine ranges.
# ex: 1, 2, 3 are three requests instead of one request 1-3
requests.append({
'deleteDimension': {
'range': {
'sheetId': sheet_id,
'dimension': 'ROWS',
'startIndex': self.HEADER_ROW_COUNT + value,
'endIndex': self.HEADER_ROW_COUNT + value + 1,
},
},
})
# Sort all rows.
requests.append({
'sortRange': {
'range': {
'sheetId': sheet_id,
'startColumnIndex': 0,
'startRowIndex': self.HEADER_ROW_COUNT,
},
'sortSpecs': [
{
'dimensionIndex': 0,
'sortOrder': 'ASCENDING',
}
],
},
})
requests += self._generate_comments_column_requests(
sheet_id, catalog)
return requests
def _do_update_acl(self, spreadsheet_id, acl):
service = google_drive.BaseGooglePreprocessor.create_service(
'drive', 'v3')
for item in acl:
permission = {
'role': item.get('role', DEFAULT_ACCESS_LEVEL).lower(),
}
if 'domain' in item:
permission['type'] = 'domain'
permission['domain'] = item['domain']
elif 'user' in item:
permission['type'] = 'user'
permission['emailAddress'] = item['user']
elif 'group' in item:
permission['type'] = 'group'
permission['emailAddress'] = item['group']
# pylint: disable=no-member
resp = service.permissions().create(
fileId=spreadsheet_id,
body=permission).execute()
def _perform_batch_update(self, spreadsheet_id, requests):
service = self._create_service()
body = {'requests': requests}
# pylint: disable=no-member
return service.spreadsheets().batchUpdate(
spreadsheetId=spreadsheet_id, body=body).execute()
def _update_acls(self, stats, locales):
if 'acl' not in self.config:
return
stat = stats.values()[0]
spreadsheet_id = stat.ident
acl = self.config['acl']
if not acl:
return
self._do_update_acl(spreadsheet_id, acl)
def _update_meta(self, stat, locale, catalog):
spreadsheet_id = stat.ident
service = self._create_service()
# pylint: disable=no-member
resp = service.spreadsheets().get(
spreadsheetId=stat.ident).execute()
requests = []
for sheet in resp['sheets']:
if sheet['properties']['title'] != locale:
continue
sheet_id = sheet['properties']['sheetId']
requests += self._generate_style_requests(
sheet_id, sheet=sheet, catalog=catalog)
self._perform_batch_update(spreadsheet_id, requests)
def get_edit_url(self, doc):
if not doc.locale:
return
stats = self._get_stats_to_download([doc.locale])
if doc.locale not in stats:
return
stat = stats[doc.locale]
return '{}&fvid={}'.format(stat.url, self._content_hash(doc.pod_path, doc.locale))
|
|
# Neural Network python module
# Written by William Ganucheau 2014
import sys, random, math
# Utility function to create an empty (zero) array of length n
def zeroArray(n):
return [0 for i in range(0, n)]
# A neural network class capable of saving/loading from a file,
# Training based on user-provided input-output data,
# and evaluating based on user input
class NeuralNetwork:
# Create a neural network. If weights aren't provided, they are
# initialized to random values
def __init__(self, neurons, weights=None, biases=None):
self.numLayers = len(neurons)
self.neurons = neurons
self.numNeurons = sum(neurons)
self.numInput = neurons[0];
self.numOutput = neurons[len(neurons)-1]
self.weights = weights
self.biases = biases
self.errorGradients = zeroArray(self.numNeurons)
self.outputs = zeroArray(self.numNeurons)
self.inputs = []
for layer in range(0, self.numLayers-1):
for neuron in range(0, self.neurons[layer+1]):
self.inputs.append(zeroArray(self.neurons[layer]))
# Default random values
if weights == None:
self.__initWeights()
self.tempWeights = weights;
if biases == None:
self.__initBiases()
# Initialize random weights for the neural network
def __initWeights(self):
i = 0
self.weights = []
# Initialize the weights for every non-input neuron
for layer in range(1, self.numLayers):
numWeights = self.neurons[layer-1]
for neuron in range(0, self.neurons[layer]):
self.weights.append(
[random.uniform(-0.5, 0.5) for j in range(0, numWeights)]
)
# Initialize random biases for the neural network
def __initBiases(self):
numBiased = self.numNeurons-self.numInput
self.biases = [random.uniform(-0.5, 0.5) for j in range(0, numBiased)]
# Save the neural network to a file
def save(self, path):
data = ''
# First line is # of layers
data += str(self.numLayers) + '\n'
# Second line is # of neurons in each layer
for c in range(0, len(self.neurons)):
data += str(self.neurons[c]) + ' '
data += '\n'
# Third line is biases for all the neurons
for b in range(0, len(self.biases)):
data += str(self.biases[b]) + ' '
data += '\n'
# Following lines are the weights of each neuron
i = 0
for l in range(1, self.numLayers):
for n in range(0, self.neurons[l]):
for w in range (0, len(self.weights[i])):
data += str(self.weights[i][w]) + ' '
data += '\n'
i += 1
f = open(path, 'w')
f.write(data)
f.flush()
f.close()
# Load a network from a file
@classmethod
def load(self, path):
f = open(path, 'r')
numLayers = int(f.readline())
charNeurons = f.readline().split()
charBiases = f.readline().split()
neurons = [int(charNeurons[i]) for i in range(0, len(charNeurons))]
biases = [float(charBiases[i]) for i in range(0, len(charBiases))]
weights = zeroArray(sum(neurons))
for neuron in range(0, sum(neurons)):
charWeights = f.readline().split()
weights[neuron] = (
[float(charWeights[i]) for i in range(0, len(charWeights))]
)
# Instantiate network
return self(neurons, weights, biases)
# Evaluate an input array with the neural network
def eval(self, input):
if len(input) != self.numInput:
sys.exit ('Error: Invalid input size.')
output = []
neuronIndex = 0;
for layer in range(1, self.numLayers):
output = zeroArray(self.neurons[layer])
for neuron in range (0, self.neurons[layer]):
neuronIndex = self.__getIndex(layer) + neuron
numWeights = len(self.weights[neuronIndex])
for weight in range (0, numWeights):
val = self.weights[neuronIndex][weight] * input[weight]
output[neuron] += val
self.inputs[neuronIndex][weight] = input[weight]
output[neuron] += self.biases[neuronIndex]
output[neuron] = self.__sigmoid(output[neuron])
self.outputs[neuronIndex] = output[neuron]
neuronIndex += 1
input = output
return output
# Sigmoid function maps (-inf, inf) -> (0, 1)
def __sigmoid(self, val):
return 1.0 / (1.0 + math.exp(-1*val))
# Train the network on a single set of expected vs. actual output data
def train(self, expected, actual):
if len(expected) != len(actual):
sys.exit ('Provided output different size from network output.')
# Train the output layer
for neuron in range(0, self.numOutput):
error = expected[neuron] - actual[neuron]
neuronIndex = self.__getIndex(self.numLayers-1) + neuron
self.__trainNeuron (neuronIndex, error)
# Train the hidden layers
for layer in range(self.numLayers-2, 0, -1):
numNeurons = self.neurons[layer]
for neuron in range (0, numNeurons):
neuronIndex = neuron + self.__getIndex(layer)
error = 0
for nextNeuron in range (0, self.neurons[layer+1]):
nextNeuronIndex = nextNeuron + self.__getIndex(layer+1)
error += (
self.weights[nextNeuronIndex][neuron] *
self.errorGradients[nextNeuronIndex]
)
self.__trainNeuron(neuronIndex, error)
self.weights = self.tempWeights;
# Train a neuron
def __trainNeuron(self, index, error):
self.errorGradients[index] = self.outputs[index]
self.errorGradients[index] *= (1 - self.outputs[index]) * error
numWeights = len(self.weights[index])
for weight in range(0, numWeights):
self.tempWeights[index][weight] += (
self.inputs[index][weight] * self.errorGradients[index]
)
# Get the index of the first neuron in a layer
def __getIndex(self, layer):
index = 0
for l in range(0, layer-1):
index += self.neurons[l+1]
return index
# Train a neural network until the error is below the threshold
def simulate (self, inputSet, outputSet, maxError):
iterations = 0
attempts = 1
maxIterations = 100000
maxAttempts = 5
# Arbitrary, initial error just has to be > maxError
error = maxError + 1
while error > maxError:
# Prevent the network from stalling in local mins
if iterations == maxIterations:
if attempts == maxAttempts:
return False
iterations = 0
attempts += 1
# Generate new weights and biases
self.__initWeights()
self.__initBiases()
print('Network failed to converge. Trying again with new vals')
error = 0
# Start at a random index to prevent getting stalled
startIndex = random.randrange(0, len(inputSet))
# Train on each of the input/output data sets
for i in range (0, len(inputSet)):
index = (startIndex + i) % len(inputSet)
output = self.eval(inputSet[index])
# Sum-squared error
error += math.pow(self.__maxDiff(outputSet[index], output), 2)
# Train the neural network
self.train(outputSet[index], output)
iterations += 1
# Network converged
return True
# Find the maximum difference between two numeric arrays
def __maxDiff (self, alist, blist):
if len(alist) != len(blist):
sys.exit('Lists must be of same size!')
max = None
for i in range (0, len(alist)):
dif = alist[i] - blist[i]
if max == None or max < dif:
max = dif
return max
# Convert a list of values two a 2D list
# Each line is one element of the list
def fileToList(path):
f = open(path, 'r')
string = None
list = []
for line in f:
strArr = line.split()
valArr = [float(strArr[i]) for i in range(0, len(strArr))]
list.append(valArr)
return list
def _initParsers():
import argparse
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command')
# Create a new neural network
parser_new = subparsers.add_parser(
'new',
help='Create a new neural net.'
)
parser_new.add_argument(
'filepath',
type=str,
nargs=1,
help='Filepath to which the net should be saved.'
)
parser_new.add_argument(
'neurons',
type=int,
nargs='+',
help='Number of neurons in each layer. (Must have at least 2 layers!)'
)
# Train a neural network
parser_train = subparsers.add_parser(
'train',
help='Train a neural net on a set of input/output data'
)
parser_train.add_argument(
'filepath',
type=str,
nargs=1,
help='Filepath to the neural network.'
)
parser_train.add_argument(
'input',
type=str,
nargs=1,
help='File path to the training input.')
parser_train.add_argument(
'output',
type=str,
nargs=1,
help='File path to the training output.'
)
parser_train.add_argument(
'-e', '--maxError',
type=float,
nargs=1,
help='The desired accuracy of the network'
)
# Evaluate input data on the network
parser_eval = subparsers.add_parser(
'eval',
help='Evaluate input/output data on the neural network'
)
parser_eval.add_argument(
'filepath',
type=str,
nargs=1,
help='Filepath to the neural network.'
)
group = parser_eval.add_mutually_exclusive_group(required=True)
group.add_argument(
'-i', '--input',
type=float,
nargs='+',
help='A single set of input values'
)
group.add_argument(
'-l', '--loadInput',
type=str,
nargs=1,
help='A file from which to load input data'
)
return parser
# Commandline usage
if __name__ == "__main__":
parser = _initParsers()
args = parser.parse_args()
command = args.command.split()[0]
# User wants to create a new neural net
if command == 'new':
numLayers = len(args.neurons)
if numLayers < 2:
sys.exit('Error: Must have at least 2 layers')
net = NeuralNetwork(args.neurons)
net.save(args.filepath[0])
print('Neural network created and saved to ' + args.filepath[0] + '.')
# User wants to train a neural net
elif command == 'train':
net = NeuralNetwork.load(args.filepath[0])
print('Neural network loaded. ' + str(net.numLayers) + ' layers.')
inputSet = fileToList(args.input[0])
outputSet = fileToList(args.output[0])
print('Beginning to train')
if args.maxError:
maxError = args.maxError[0]
else:
maxError = 0.01
net.simulate(inputSet, outputSet, maxError)
net.save(args.filepath[0])
# User wants to evaluate some input on the neural net
elif command == 'eval':
net = NeuralNetwork.load(args.filepath[0])
if args.input:
print(net.eval(args.input))
sys.exit()
input = fileToList(args.loadInput[0])
for i in input:
print(net.eval(i))
|
|
from contextlib import contextmanager
from unittest import mock
import django
from django.db.migrations import (
AddField,
AlterField,
CreateModel,
DeleteModel,
RemoveField,
RenameField,
)
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.operations.base import Operation
from psqlextra.models import (
PostgresMaterializedViewModel,
PostgresPartitionedModel,
PostgresViewModel,
)
from psqlextra.types import PostgresPartitioningMethod
from . import operations
from .state import (
PostgresMaterializedViewModelState,
PostgresPartitionedModelState,
PostgresViewModelState,
)
# original `MigrationAutodetector.add_operation`
# function, saved here so the patched version can
# call the original
add_operation = MigrationAutodetector.add_operation
class AddOperationHandler:
"""Handler for when operations are being added to a new migration.
This is where we intercept operations such as
:see:CreateModel to replace it with our own.
"""
def __init__(self, autodetector, app_label, args, kwargs):
self.autodetector = autodetector
self.app_label = app_label
self.args = args
self.kwargs = kwargs
def add(self, operation):
"""Adds the specified operation to the list of operations to execute in
the migration."""
return add_operation(
self.autodetector,
self.app_label,
operation,
*self.args,
**self.kwargs,
)
def add_field(self, operation: AddField):
"""Adds the specified :see:AddField operation to the list of operations
to execute in the migration."""
return self._transform_view_field_operations(operation)
def remove_field(self, operation: RemoveField):
"""Adds the specified :see:RemoveField operation to the list of
operations to execute in the migration."""
return self._transform_view_field_operations(operation)
def alter_field(self, operation: AlterField):
"""Adds the specified :see:AlterField operation to the list of
operations to execute in the migration."""
return self._transform_view_field_operations(operation)
def rename_field(self, operation: RenameField):
"""Adds the specified :see:RenameField operation to the list of
operations to execute in the migration."""
return self._transform_view_field_operations(operation)
def _transform_view_field_operations(self, operation: Operation):
"""Transforms operations on fields on a (materialized) view into state
only operations.
One cannot add/remove/delete fields on a (materialized) view,
however, we do want Django's migration system to keep track of
these kind of changes to the model. The :see:ApplyState
operation just tells Django the operation was applied without
actually applying it.
"""
if django.VERSION >= (4, 0):
model_identifier = (self.app_label, operation.model_name.lower())
model_state = (
self.autodetector.to_state.models.get(model_identifier)
or self.autodetector.from_state.models[model_identifier]
)
if isinstance(model_state, PostgresViewModelState):
return self.add(
operations.ApplyState(state_operation=operation)
)
else:
model = self.autodetector.new_apps.get_model(
self.app_label, operation.model_name
)
if issubclass(model, PostgresViewModel):
return self.add(
operations.ApplyState(state_operation=operation)
)
return self.add(operation)
def add_create_model(self, operation: CreateModel):
"""Adds the specified :see:CreateModel operation to the list of
operations to execute in the migration."""
if django.VERSION >= (4, 0):
model_state = self.autodetector.to_state.models[
self.app_label, operation.name.lower()
]
if isinstance(model_state, PostgresPartitionedModelState):
return self.add_create_partitioned_model(operation)
elif isinstance(model_state, PostgresMaterializedViewModelState):
return self.add_create_materialized_view_model(operation)
elif isinstance(model_state, PostgresViewModelState):
return self.add_create_view_model(operation)
else:
model = self.autodetector.new_apps.get_model(
self.app_label, operation.name
)
if issubclass(model, PostgresPartitionedModel):
return self.add_create_partitioned_model(operation)
elif issubclass(model, PostgresMaterializedViewModel):
return self.add_create_materialized_view_model(operation)
elif issubclass(model, PostgresViewModel):
return self.add_create_view_model(operation)
return self.add(operation)
def add_delete_model(self, operation: DeleteModel):
"""Adds the specified :see:Deletemodel operation to the list of
operations to execute in the migration."""
if django.VERSION >= (4, 0):
model_state = self.autodetector.from_state.models[
self.app_label, operation.name.lower()
]
if isinstance(model_state, PostgresPartitionedModelState):
return self.add_delete_partitioned_model(operation)
elif isinstance(model_state, PostgresMaterializedViewModelState):
return self.add_delete_materialized_view_model(operation)
elif isinstance(model_state, PostgresViewModelState):
return self.add_delete_view_model(operation)
else:
model = self.autodetector.old_apps.get_model(
self.app_label, operation.name
)
if issubclass(model, PostgresPartitionedModel):
return self.add_delete_partitioned_model(operation)
elif issubclass(model, PostgresMaterializedViewModel):
return self.add_delete_materialized_view_model(operation)
elif issubclass(model, PostgresViewModel):
return self.add_delete_view_model(operation)
return self.add(operation)
def add_create_partitioned_model(self, operation: CreateModel):
"""Adds a :see:PostgresCreatePartitionedModel operation to the list of
operations to execute in the migration."""
if django.VERSION >= (4, 0):
model_state = self.autodetector.to_state.models[
self.app_label, operation.name.lower()
]
partitioning_options = model_state.partitioning_options
else:
model = self.autodetector.new_apps.get_model(
self.app_label, operation.name
)
partitioning_options = model._partitioning_meta.original_attrs
_, args, kwargs = operation.deconstruct()
if partitioning_options["method"] != PostgresPartitioningMethod.HASH:
self.add(
operations.PostgresAddDefaultPartition(
model_name=operation.name, name="default"
)
)
self.add(
operations.PostgresCreatePartitionedModel(
*args, **kwargs, partitioning_options=partitioning_options
)
)
def add_delete_partitioned_model(self, operation: DeleteModel):
"""Adds a :see:PostgresDeletePartitionedModel operation to the list of
operations to execute in the migration."""
_, args, kwargs = operation.deconstruct()
return self.add(
operations.PostgresDeletePartitionedModel(*args, **kwargs)
)
def add_create_view_model(self, operation: CreateModel):
"""Adds a :see:PostgresCreateViewModel operation to the list of
operations to execute in the migration."""
if django.VERSION >= (4, 0):
model_state = self.autodetector.to_state.models[
self.app_label, operation.name.lower()
]
view_options = model_state.view_options
else:
model = self.autodetector.new_apps.get_model(
self.app_label, operation.name
)
view_options = model._view_meta.original_attrs
_, args, kwargs = operation.deconstruct()
self.add(
operations.PostgresCreateViewModel(
*args, **kwargs, view_options=view_options
)
)
def add_delete_view_model(self, operation: DeleteModel):
"""Adds a :see:PostgresDeleteViewModel operation to the list of
operations to execute in the migration."""
_, args, kwargs = operation.deconstruct()
return self.add(operations.PostgresDeleteViewModel(*args, **kwargs))
def add_create_materialized_view_model(self, operation: CreateModel):
"""Adds a :see:PostgresCreateMaterializedViewModel operation to the
list of operations to execute in the migration."""
if django.VERSION >= (4, 0):
model_state = self.autodetector.to_state.models[
self.app_label, operation.name.lower()
]
view_options = model_state.view_options
else:
model = self.autodetector.new_apps.get_model(
self.app_label, operation.name
)
view_options = model._view_meta.original_attrs
_, args, kwargs = operation.deconstruct()
self.add(
operations.PostgresCreateMaterializedViewModel(
*args, **kwargs, view_options=view_options
)
)
def add_delete_materialized_view_model(self, operation: DeleteModel):
"""Adds a :see:PostgresDeleteMaterializedViewModel operation to the
list of operations to execute in the migration."""
_, args, kwargs = operation.deconstruct()
return self.add(
operations.PostgresDeleteMaterializedViewModel(*args, **kwargs)
)
@contextmanager
def patched_autodetector():
"""Patches the standard Django :seee:MigrationAutodetector for the duration
of the context.
The patch intercepts the `add_operation` function to
customize how new operations are added.
We have to do this because there is no way in Django
to extend the auto detector otherwise.
"""
autodetector_module_path = "django.db.migrations.autodetector"
autodetector_class_path = (
f"{autodetector_module_path}.MigrationAutodetector"
)
add_operation_path = f"{autodetector_class_path}.add_operation"
def _patched(autodetector, app_label, operation, *args, **kwargs):
handler = AddOperationHandler(autodetector, app_label, args, kwargs)
if isinstance(operation, CreateModel):
return handler.add_create_model(operation)
if isinstance(operation, DeleteModel):
return handler.add_delete_model(operation)
if isinstance(operation, AddField):
return handler.add_field(operation)
if isinstance(operation, RemoveField):
return handler.remove_field(operation)
if isinstance(operation, AlterField):
return handler.alter_field(operation)
if isinstance(operation, RenameField):
return handler.rename_field(operation)
return handler.add(operation)
with mock.patch(add_operation_path, new=_patched):
yield
|
|
"""
Utility functions for
- building and importing modules on test time, using a temporary location
- detecting if compilers are present
"""
import os
import sys
import subprocess
import tempfile
import shutil
import atexit
import textwrap
import re
import pytest
from numpy.compat import asbytes, asstr
from numpy.testing import temppath
from importlib import import_module
#
# Maintaining a temporary module directory
#
_module_dir = None
_module_num = 5403
def _cleanup():
global _module_dir
if _module_dir is not None:
try:
sys.path.remove(_module_dir)
except ValueError:
pass
try:
shutil.rmtree(_module_dir)
except OSError:
pass
_module_dir = None
def get_module_dir():
global _module_dir
if _module_dir is None:
_module_dir = tempfile.mkdtemp()
atexit.register(_cleanup)
if _module_dir not in sys.path:
sys.path.insert(0, _module_dir)
return _module_dir
def get_temp_module_name():
# Assume single-threaded, and the module dir usable only by this thread
global _module_num
d = get_module_dir()
name = "_test_ext_module_%d" % _module_num
_module_num += 1
if name in sys.modules:
# this should not be possible, but check anyway
raise RuntimeError("Temporary module name already in use.")
return name
def _memoize(func):
memo = {}
def wrapper(*a, **kw):
key = repr((a, kw))
if key not in memo:
try:
memo[key] = func(*a, **kw)
except Exception as e:
memo[key] = e
raise
ret = memo[key]
if isinstance(ret, Exception):
raise ret
return ret
wrapper.__name__ = func.__name__
return wrapper
#
# Building modules
#
@_memoize
def build_module(source_files, options=[], skip=[], only=[], module_name=None):
"""
Compile and import a f2py module, built from the given files.
"""
code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; "
"f2py2e.main()" % repr(sys.path))
d = get_module_dir()
# Copy files
dst_sources = []
f2py_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
base, ext = os.path.splitext(dst)
if ext in ('.f90', '.f', '.c', '.pyf'):
f2py_sources.append(dst)
# Prepare options
if module_name is None:
module_name = get_temp_module_name()
f2py_opts = ['-c', '-m', module_name] + options + f2py_sources
if skip:
f2py_opts += ['skip:'] + skip
if only:
f2py_opts += ['only:'] + only
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, '-c', code] + f2py_opts
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("Running f2py failed: %s\n%s"
% (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Import
return import_module(module_name)
@_memoize
def build_code(source_code, options=[], skip=[], only=[], suffix=None,
module_name=None):
"""
Compile and import Fortran code using f2py.
"""
if suffix is None:
suffix = '.f'
with temppath(suffix=suffix) as path:
with open(path, 'w') as f:
f.write(source_code)
return build_module([path], options=options, skip=skip, only=only,
module_name=module_name)
#
# Check if compilers are available at all...
#
_compiler_status = None
def _get_compiler_status():
global _compiler_status
if _compiler_status is not None:
return _compiler_status
_compiler_status = (False, False, False)
# XXX: this is really ugly. But I don't know how to invoke Distutils
# in a safer way...
code = textwrap.dedent("""\
import os
import sys
sys.path = %(syspath)s
def configuration(parent_name='',top_path=None):
global config
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_name, top_path)
return config
from numpy.distutils.core import setup
setup(configuration=configuration)
config_cmd = config.get_config_cmd()
have_c = config_cmd.try_compile('void foo() {}')
print('COMPILERS:%%d,%%d,%%d' %% (have_c,
config.have_f77c(),
config.have_f90c()))
sys.exit(99)
""")
code = code % dict(syspath=repr(sys.path))
tmpdir = tempfile.mkdtemp()
try:
script = os.path.join(tmpdir, 'setup.py')
with open(script, 'w') as f:
f.write(code)
cmd = [sys.executable, 'setup.py', 'config']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmpdir)
out, err = p.communicate()
finally:
shutil.rmtree(tmpdir)
m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out)
if m:
_compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))),
bool(int(m.group(3))))
# Finished
return _compiler_status
def has_c_compiler():
return _get_compiler_status()[0]
def has_f77_compiler():
return _get_compiler_status()[1]
def has_f90_compiler():
return _get_compiler_status()[2]
#
# Building with distutils
#
@_memoize
def build_module_distutils(source_files, config_code, module_name, **kw):
"""
Build a module via distutils and import it.
"""
d = get_module_dir()
# Copy files
dst_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
# Build script
config_code = textwrap.dedent(config_code).replace("\n", "\n ")
code = textwrap.dedent("""\
import os
import sys
sys.path = %(syspath)s
def configuration(parent_name='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_name, top_path)
%(config_code)s
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
""") % dict(config_code=config_code, syspath=repr(sys.path))
script = os.path.join(d, get_temp_module_name() + '.py')
dst_sources.append(script)
with open(script, 'wb') as f:
f.write(asbytes(code))
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, script, 'build_ext', '-i']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("Running distutils build failed: %s\n%s"
% (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Import
__import__(module_name)
return sys.modules[module_name]
#
# Unittest convenience
#
class F2PyTest:
code = None
sources = None
options = []
skip = []
only = []
suffix = '.f'
module = None
module_name = None
def setup(self):
if sys.platform == 'win32':
pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)')
if self.module is not None:
return
# Check compiler availability first
if not has_c_compiler():
pytest.skip("No C compiler available")
codes = []
if self.sources:
codes.extend(self.sources)
if self.code is not None:
codes.append(self.suffix)
needs_f77 = False
needs_f90 = False
for fn in codes:
if fn.endswith('.f'):
needs_f77 = True
elif fn.endswith('.f90'):
needs_f90 = True
if needs_f77 and not has_f77_compiler():
pytest.skip("No Fortran 77 compiler available")
if needs_f90 and not has_f90_compiler():
pytest.skip("No Fortran 90 compiler available")
# Build the module
if self.code is not None:
self.module = build_code(self.code, options=self.options,
skip=self.skip, only=self.only,
suffix=self.suffix,
module_name=self.module_name)
if self.sources is not None:
self.module = build_module(self.sources, options=self.options,
skip=self.skip, only=self.only,
module_name=self.module_name)
|
|
import os, platform
import dynamixel
import time
import options
import math
import serial
ticks_per_rad = 4096.0/(math.pi*2)
############################################
# _______ __ ______ __
# /_ __(_)_ _ / // / __ \_ _____ ________/ /
# / / / / ' \ / _ / /_/ / |/|/ / _ `/ __/ _ /
# /_/ /_/_/_/_/ /_//_/\____/|__,__/\_,_/_/ \_,_/
############################################
myActuators = list()
def forwardKinematics(theta1, theta2, l1, l2):
return [l1*math.cos(theta1)+l2*(math.cos(theta1+theta2)),
l1*math.sin(theta1)+l2*(math.sin(theta1+theta2))]
#Given: xE,yE, l1, l2
#Return: theta1,theta2
def inverseKinematics(xIn, yIn, l1, l2):
myTheta2 = 2*math.atan2(math.sqrt(((l1+l2)**2-(xIn**2+yIn**2))),math.sqrt((xIn**2+yIn**2.0)-(l1-l2)**2))
myTheta1 = math.atan2(yIn,xIn)-math.atan2(l2*math.sin(myTheta2),l1+l2*math.cos(myTheta2))
return (scaleToCircle(myTheta1), scaleToCircle(myTheta2))
def computeAltIK(x, y, theta1, theta2):
#theta1 and 2 are IK outputs
t2 = -theta2
angle_to_endpoint = scaleToCircle(math.atan2(y,x))
if angle_to_endpoint > theta1:
t1 = theta1 + 2*(angle_to_endpoint-theta1)
elif angle_to_endpoint < theta1:
t1 = theta1 + 2*(angle_to_endpoint-theta1)
else:
t1 = theta1
return (t1, t2)
def scaleToCircle(radianvalue):
return radianvalue % (2*math.pi)
def boundWithinGoal(value, upper, lower):
if value > upper:
bounded = upper
elif value < lower:
bounded = lower
else:
bounded = value
return bounded
def boundWithinRobotReach(x, y, radius):
if math.sqrt(math.pow(x,2)+math.pow(y,2)) > radius:
angle = math.atan2(y,x)
return (0.98*radius*math.cos(angle), 0.98*radius*math.sin(angle))
else:
return (x,y)
def withinThreshold(difference, thresh):
if abs(difference) <= thresh:
return True
elif abs(abs(difference)-2*math.pi) <= thresh:
return True
else:
return False
def actuatorsMoving(actuators):
for actuator in actuators:
if actuator.cache[dynamixel.defs.REGISTER['Moving']]:
return True
return False
if platform.dist()[0] == 'Ubuntu':
portName = options.ubuntu_port
elif os.name == "posix":
portName = options.unix_port
else:
portName = options.windows_port
serial = dynamixel.serial_stream.SerialStream( port=portName, baudrate=options.baudrate, timeout=1)
net = dynamixel.dynamixel_network.DynamixelNetwork( serial )
net.scan( 1, options.num_servos )
print "Scanning for Dynamixels...",
for dyn in net.get_dynamixels():
print dyn.id,
myActuators.append(net[dyn.id])
print "FOUND:" + str(myActuators)
for actuator in myActuators:
actuator.moving_speed = options.servo_speed
actuator.synchronized = True
actuator.torque_enable = True
actuator.torque_control_enable = False
actuator.torque_limit = 1024
actuator.max_torque = 1024
class Arm(object):
def __init__(self, shoulder, elbow, params):
self.params = params
self.shoulder = shoulder
self.elbow = elbow
self.elbow_angle = 0
self.shoulder_angle = 0
#motors
def update(self):
net.synchronize()
self.shoulder.read_all()
self.elbow.read_all()
def moveToXY(self,x,y):
theta1, theta2 = inverseKinematics(x,y, self.params.l1, self.params.l2)
(shoulderCurr, elbowCurr) = self.returnCurrentPositions()
(shoulderCurrNOMOD, elbowCurrNOMOD) = self.returnCurrentPositionsNOMOD()
alpha = shoulderCurr - theta1
if abs(alpha) > abs(shoulderCurr - (theta1+2*math.pi)):
alpha = shoulderCurr - (theta1+2*math.pi)
if abs(alpha) > abs(shoulderCurr - (theta1-2*math.pi)):
alpha = shoulderCurr - (theta1-2*math.pi)
beta = elbowCurr - theta2
if abs(beta) > abs(elbowCurr - (theta2+2*math.pi)):
beta = elbowCurr - (theta2+2*math.pi)
if abs(beta) > abs(elbowCurr - (theta2-2*math.pi)):
beta = elbowCurr - (theta2-2*math.pi)
self.moveToTheta(shoulderCurrNOMOD-alpha, elbowCurrNOMOD-beta)
def moveToXYGoal(self, x, y):
x, y = Arm.transformGoaltoRobot(self,x,y)
x, y = boundWithinRobotReach(x,y, self.params.l1+self.params.l2)
x = boundWithinGoal(x, self.params.max_x, self.params.min_x)
y = boundWithinGoal(y, self.params.max_y, self.params.min_y)
self.moveToXY(x,y)
def transformGoaltoRobot(self,x,y):
return (x-self.params.horizontal_offset, y-self.params.vertical_offset)
def moveToTheta(self, t1, t2):
#print t1, t2
self.shoulder_angle = t1
self.elbow_angle = t2
self.shoulder.goal_position = int((self.shoulder_angle*ticks_per_rad)+self.params.shoulder_offset)
self.elbow.goal_position = int(((self.elbow_angle*ticks_per_rad) +self.params.elbow_offset)/2)
def isMoving(self):
for actuator in [self.shoulder, self.elbow]:
if actuator.cache[dynamixel.defs.REGISTER['Moving']]:
return True
return False
def returnCurrentPositions(self):
theta1 = (self.shoulder.cache[dynamixel.defs.REGISTER['CurrentPosition']]-self.params.shoulder_offset)/ticks_per_rad
theta2 = (self.elbow.cache[dynamixel.defs.REGISTER['CurrentPosition']]-self.params.elbow_offset)/ticks_per_rad*2
theta1 = scaleToCircle(theta1)
theta2 = scaleToCircle(theta2)
return (theta1, theta2)
def returnCurrentPositionsNOMOD(self):
theta1 = (self.shoulder.cache[dynamixel.defs.REGISTER['CurrentPosition']]-self.params.shoulder_offset)/ticks_per_rad
theta2 = (self.elbow.cache[dynamixel.defs.REGISTER['CurrentPosition']]-self.params.elbow_offset)/ticks_per_rad*2
return (theta1, theta2)
def nearGoalPosition(self):
shoulder, elbow = Arm.returnCurrentPositions(self)
if withinThreshold(scaleToCircle(shoulder-self.shoulder_angle),self.params.angle_threshold) and withinThreshold(scaleToCircle(elbow-self.elbow_angle),self.params.angle_threshold):
return True
else:
return False
a = Arm(myActuators[0], myActuators[1], options.left_arm)
b = Arm(myActuators[2], myActuators[3], options.right_arm)
a.update()
b.update()
points = [[55,0],[50,0],[45,0],[40,0],[35,0],[30,0],[25,0],[20,0],[15,0],[10,0],[5,0]]
goal = [60,0]
t = time.time()
raw_input("Press any key to start")
while True:
try:
(theta1_left, theta2_left) = a.returnCurrentPositions()
(theta1_right, theta2_right) = b.returnCurrentPositions()
currXY_left = forwardKinematics(theta1_left, theta2_left, options.left_arm.l1, options.left_arm.l2) #in robot coords
currXY_left_world = [currXY_left[0]+options.left_arm.horizontal_offset, currXY_left[1]+options.left_arm.vertical_offset]
gamma_left = math.atan2(goal[1]-currXY_left_world[1], goal[0]-currXY_left_world[0])
currXY_right = forwardKinematics(theta1_right, theta2_right, options.right_arm.l1, options.right_arm.l2) #in robot coords
currXY_right_world = [currXY_right[0]+options.right_arm.horizontal_offset, currXY_right[1]+options.right_arm.vertical_offset]
gamma_right = math.atan2(goal[1]-currXY_right_world[1], goal[0]-currXY_right_world[0])
l_left=4
l_right=4
if( ((goal[1]-currXY_left_world[1])**2 + (goal[0]-currXY_left_world[0])**2) < l_left**2):
l_left = math.sqrt((goal[1]-currXY_left_world[1])**2 + (goal[0]-currXY_left_world[0])**2)
if ( ((goal[1]-currXY_right_world[1])**2 + (goal[0]-currXY_right_world[0])**2) < l_right**2):
l_right = math.sqrt((goal[1]-currXY_right_world[1])**2 + (goal[0]-currXY_right_world[0])**2)
a.moveToXYGoal(currXY_left_world[0]+l_left*math.cos(gamma_left), currXY_left_world[1]+l_left*math.sin(gamma_left))
b.moveToXYGoal(currXY_right_world[0]+l_right*math.cos(gamma_right), currXY_right_world[1]+l_right*math.sin(gamma_right))
a.update()
b.update()
'''
if time.time() > 1+t:
goal = points.pop()
t = time.time()
'''
except KeyboardInterrupt:
break
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from generate import generate
class VariantType:
def __init__(self,
variantType,
managedType,
emitAccessors=True,
isPrimitiveType=True,
unmanagedRepresentationType=None,
includeInUnionTypes=True,
getStatements=None,
setStatements=None,
critical=False):
self.emitAccessors = emitAccessors
self.variantType = variantType
self.managedType = managedType
self.isPrimitiveType = isPrimitiveType
if unmanagedRepresentationType == None: self.unmanagedRepresentationType = managedType
else: self.unmanagedRepresentationType = unmanagedRepresentationType
self.includeInUnionTypes = includeInUnionTypes
self.getStatements = getStatements
self.setStatements = setStatements
self.managedFieldName = "_" + self.variantType.lower()
firstChar = self.variantType[0]
self.name = self.variantType.lower().replace(firstChar.lower(), firstChar, 1)
self.accessorName = "As" + self.name
self.critical = critical
def write_UnionTypes(self, cw):
if not self.includeInUnionTypes: return
if self.unmanagedRepresentationType == "IntPtr":
cw.write('[SuppressMessage("Microsoft.Reliability", "CA2006:UseSafeHandleToEncapsulateNativeResources")]')
if self.managedFieldName == '_bstr':
cw.write('[SuppressMessage("Microsoft.Performance", "CA1823:AvoidUnusedPrivateFields")]')
cw.write("[FieldOffset(0)] internal %s %s;" % (self.unmanagedRepresentationType, self.managedFieldName))
def write_ToObject(self, cw):
cw.write("case VarEnum.VT_%s: return %s;" % (self.variantType, self.accessorName))
def write_accessor(self, cw, transparent):
if self.emitAccessors == False :
return
cw.write("// VT_%s" % self.variantType)
cw.enter_block('public %s %s' % (self.managedType, self.accessorName))
# Getter
if not transparent and self.critical: gen_exposed_code_security(cw)
cw.enter_block("get")
cw.write("Debug.Assert(VariantType == VarEnum.VT_%s);" % self.variantType)
if self.getStatements == None:
cw.write("return _typeUnion._unionTypes.%s;" % self.managedFieldName)
else:
for s in self.getStatements: cw.write(s)
cw.exit_block()
# Setter
if not transparent and self.critical: gen_exposed_code_security(cw)
cw.enter_block("set")
cw.write("Debug.Assert(IsEmpty); // The setter can only be called once as VariantClear might be needed otherwise")
cw.write("VariantType = VarEnum.VT_%s;" % self.variantType)
if self.setStatements == None:
cw.write("_typeUnion._unionTypes.%s = value;" % self.managedFieldName)
else:
for s in self.setStatements: cw.write(s)
cw.exit_block()
cw.exit_block()
# Byref Setter
cw.writeline()
if not transparent: gen_exposed_code_security(cw)
cw.enter_block("public void SetAsByref%s(ref %s value)" % (self.name, self.unmanagedRepresentationType))
cw.write("Debug.Assert(IsEmpty); // The setter can only be called once as VariantClear might be needed otherwise")
cw.write("VariantType = (VarEnum.VT_%s | VarEnum.VT_BYREF);" % self.variantType)
cw.write("_typeUnion._unionTypes._byref = UnsafeMethods.Convert%sByrefToPtr(ref value);" % self.unmanagedRepresentationType)
cw.exit_block()
cw.writeline()
def write_accessor_propertyinfo(self, cw):
if self.emitAccessors == True :
cw.write('case VarEnum.VT_%s: return typeof(Variant).GetProperty("%s");' % (self.variantType, self.accessorName))
def write_byref_setters(self, cw):
if self.emitAccessors == True :
cw.write('case VarEnum.VT_%s: return typeof(Variant).GetMethod("SetAsByref%s");' % (self.variantType, self.name))
def write_ComToManagedPrimitiveTypes(self, cw):
wrapper_types = ["CY", "DISPATCH", "UNKNOWN", "ERROR"]
if not self.isPrimitiveType or (self.variantType in wrapper_types) : return
cw.write("dict[VarEnum.VT_%s] = typeof(%s);" % (self.variantType, self.managedType))
def write_IsPrimitiveType(self, cw):
if not self.isPrimitiveType: return
cw.write("case VarEnum.VT_%s:" % self.variantType)
def write_ConvertByrefToPtr(self, cw, transparent):
if self.isPrimitiveType and self.unmanagedRepresentationType == self.managedType and self.variantType != "ERROR":
if not transparent: gen_exposed_code_security(cw)
cw.write('[SuppressMessage("Microsoft.Design", "CA1045:DoNotPassTypesByReference")]')
if self.unmanagedRepresentationType == 'Int32':
cw.enter_block("public static unsafe IntPtr Convert%sByrefToPtr(ref %s value)" % (self.unmanagedRepresentationType, self.unmanagedRepresentationType))
else:
cw.enter_block("internal static unsafe IntPtr Convert%sByrefToPtr(ref %s value)" % (self.unmanagedRepresentationType, self.unmanagedRepresentationType))
cw.enter_block('fixed (%s *x = &value)' % self.unmanagedRepresentationType)
cw.write('AssertByrefPointsToStack(new IntPtr(x));')
cw.write('return new IntPtr(x);')
cw.exit_block()
cw.exit_block()
cw.write('')
def write_ConvertByrefToPtr_Outer(self, cw, transparent):
if self.isPrimitiveType and self.unmanagedRepresentationType == self.managedType and self.variantType != "ERROR":
if not transparent: gen_exposed_code_security(cw)
cw.write('[SuppressMessage("Microsoft.Design", "CA1045:DoNotPassTypesByReference")]')
cw.write("public static IntPtr Convert%sByrefToPtr(ref %s value) { return _Convert%sByrefToPtr(ref value); }" % (self.unmanagedRepresentationType, self.unmanagedRepresentationType, self.unmanagedRepresentationType))
def write_ConvertByrefToPtrDelegates(self, cw):
if self.isPrimitiveType and self.unmanagedRepresentationType == self.managedType and self.variantType != "ERROR":
cw.write("private static readonly ConvertByrefToPtrDelegate<%s> _Convert%sByrefToPtr = (ConvertByrefToPtrDelegate<%s>)Delegate.CreateDelegate(typeof(ConvertByrefToPtrDelegate<%s>), _ConvertByrefToPtr.MakeGenericMethod(typeof(%s)));" % (5 * (self.unmanagedRepresentationType,)))
def gen_exposed_code_security(cw):
cw.write("#if CLR2")
cw.write("[PermissionSet(SecurityAction.LinkDemand, Unrestricted = true)]")
cw.write("#endif")
cw.write("[SecurityCritical]")
variantTypes = [
# VariantType('varEnum', 'managed_type')
VariantType('I1', "SByte"),
VariantType('I2', "Int16"),
VariantType('I4', "Int32"),
VariantType('I8', "Int64"),
VariantType('UI1', "Byte"),
VariantType('UI2', "UInt16"),
VariantType('UI4', "UInt32"),
VariantType('UI8', "UInt64"),
VariantType('INT', "IntPtr"),
VariantType('UINT', "UIntPtr"),
VariantType('BOOL', "Boolean",
unmanagedRepresentationType="Int16",
getStatements=["return _typeUnion._unionTypes._bool != 0;"],
setStatements=["_typeUnion._unionTypes._bool = value ? (Int16)(-1) : (Int16)0;"]),
VariantType("ERROR", "Int32"),
VariantType('R4', "Single"),
VariantType('R8', "Double"),
VariantType('DECIMAL', "Decimal",
includeInUnionTypes=False,
getStatements=["// The first byte of Decimal is unused, but usually set to 0",
"Variant v = this;",
"v._typeUnion._vt = 0;",
"return v._decimal;"],
setStatements=["_decimal = value;",
"// _vt overlaps with _decimal, and should be set after setting _decimal",
"_typeUnion._vt = (ushort)VarEnum.VT_DECIMAL;"]),
VariantType("CY", "Decimal",
unmanagedRepresentationType="Int64",
getStatements=["return Decimal.FromOACurrency(_typeUnion._unionTypes._cy);"],
setStatements=["_typeUnion._unionTypes._cy = Decimal.ToOACurrency(value);"]),
VariantType('DATE', "DateTime",
unmanagedRepresentationType="Double",
getStatements=["return DateTime.FromOADate(_typeUnion._unionTypes._date);"],
setStatements=["_typeUnion._unionTypes._date = value.ToOADate();"]),
VariantType('BSTR', "String",
unmanagedRepresentationType="IntPtr",
getStatements=[
"if (_typeUnion._unionTypes._bstr != IntPtr.Zero) {",
" return Marshal.PtrToStringBSTR(_typeUnion._unionTypes._bstr);",
"}",
"return null;"
],
setStatements=[
"if (value != null) {",
" Marshal.GetNativeVariantForObject(value, UnsafeMethods.ConvertVariantByrefToPtr(ref this));",
"}"
],
critical=True),
VariantType("UNKNOWN", "Object",
isPrimitiveType=False,
unmanagedRepresentationType="IntPtr",
getStatements=[
"if (_typeUnion._unionTypes._dispatch != IntPtr.Zero) {",
" return Marshal.GetObjectForIUnknown(_typeUnion._unionTypes._unknown);",
"}",
"return null;"
],
setStatements=[
"if (value != null) {",
" _typeUnion._unionTypes._unknown = Marshal.GetIUnknownForObject(value);",
"}"
],
critical=True),
VariantType("DISPATCH", "Object",
isPrimitiveType=False,
unmanagedRepresentationType="IntPtr",
getStatements=[
"if (_typeUnion._unionTypes._dispatch != IntPtr.Zero) {",
" return Marshal.GetObjectForIUnknown(_typeUnion._unionTypes._dispatch);",
"}",
"return null;"
],
setStatements=[
"if (value != null) {",
" _typeUnion._unionTypes._unknown = GetIDispatchForObject(value);",
"}"
],
critical=True),
VariantType("VARIANT", "Object",
emitAccessors=False,
isPrimitiveType=False,
unmanagedRepresentationType="Variant",
includeInUnionTypes=False, # will use "this"
getStatements=["return Marshal.GetObjectForNativeVariant(UnsafeMethods.ConvertVariantByrefToPtr(ref this));"],
setStatements=["UnsafeMethods.InitVariantForObject(value, ref this);"],
critical=True)
]
managed_types_to_variant_types_add = [
("Char", "UI2"),
("CurrencyWrapper", "CY"),
("ErrorWrapper", "ERROR"),
]
def gen_UnionTypes(cw):
for variantType in variantTypes:
variantType.write_UnionTypes(cw)
def gen_ToObject(cw):
for variantType in variantTypes:
variantType.write_ToObject(cw)
def gen_accessors(transparent):
def gen_accessors(cw):
for variantType in variantTypes:
variantType.write_accessor(cw, transparent)
return gen_accessors
def gen_accessor_propertyinfo(cw):
for variantType in variantTypes:
variantType.write_accessor_propertyinfo(cw)
def gen_byref_setters(cw):
for variantType in variantTypes:
variantType.write_byref_setters(cw)
def gen_ComToManagedPrimitiveTypes(cw):
for variantType in variantTypes:
variantType.write_ComToManagedPrimitiveTypes(cw)
def gen_ManagedToComPrimitiveTypes(cw):
import System
import clr
# build inverse map
type_map = {}
for variantType in variantTypes:
# take them in order, first one wins ... handles ERROR and INT32 conflict
if variantType.isPrimitiveType and not type_map.has_key(variantType.managedType):
type_map[variantType.managedType] = variantType.variantType
for managedType, variantType in managed_types_to_variant_types_add:
type_map[managedType] = variantType
def is_system_type(name):
t = getattr(System, name, None)
return t and System.Type.GetTypeCode(t) not in [System.TypeCode.Empty, System.TypeCode.Object]
system_types = filter(is_system_type, type_map.keys())
system_types = sorted(system_types, cmp, lambda name: int(System.Type.GetTypeCode(getattr(System, name))))
other_types = sorted(set(type_map.keys()).difference(set(system_types)))
# switch from sytem types
cw.enter_block("switch (Type.GetTypeCode(argumentType))")
for t in system_types:
cw.write("""case TypeCode.%(code)s:
primitiveVarEnum = VarEnum.VT_%(vt)s;
return true;""", code = System.Type.GetTypeCode(getattr(System, t)).ToString(), vt = type_map[t])
cw.exit_block()
# if statements from the rest
for t in other_types:
clrtype = getattr(System, t, None)
if not clrtype: clrtype = getattr(System.Runtime.InteropServices, t, None)
clrtype = clr.GetClrType(clrtype)
cw.write("""
if (argumentType == typeof(%(type)s)) {
primitiveVarEnum = VarEnum.VT_%(vt)s;
return true;
}""", type = clrtype.Name, vt = type_map[t])
def gen_IsPrimitiveType(cw):
for variantType in variantTypes:
variantType.write_IsPrimitiveType(cw)
def gen_ConvertByrefToPtr(transparent):
def gen_ConvertByrefToPtr(cw):
for variantType in variantTypes:
if transparent:
variantType.write_ConvertByrefToPtr_Outer(cw, transparent)
else:
variantType.write_ConvertByrefToPtr(cw, transparent)
return gen_ConvertByrefToPtr
def gen_ConvertByrefToPtrDelegates(cw):
for variantType in variantTypes:
variantType.write_ConvertByrefToPtrDelegates(cw)
def main():
return generate(
("Convert ByRef Delegates", gen_ConvertByrefToPtrDelegates),
("Outer Managed To COM Primitive Type Map", gen_ManagedToComPrimitiveTypes),
("Outer Variant union types", gen_UnionTypes),
("Outer Variant ToObject", gen_ToObject),
("Outer Variant accessors", gen_accessors(True)),
("Outer Variant accessors PropertyInfos", gen_accessor_propertyinfo),
("Outer Variant byref setter", gen_byref_setters),
("Outer ComToManagedPrimitiveTypes", gen_ComToManagedPrimitiveTypes),
("Outer Variant IsPrimitiveType", gen_IsPrimitiveType),
("Outer ConvertByrefToPtr", gen_ConvertByrefToPtr(True)),
#TODO: we don't build ndp\fx\src\Dynamic any more for IronPython
#("Managed To COM Primitive Type Map", gen_ManagedToComPrimitiveTypes),
#("Variant union types", gen_UnionTypes),
#("Variant ToObject", gen_ToObject),
#("Variant accessors", gen_accessors(False)),
#("Variant accessors PropertyInfos", gen_accessor_propertyinfo),
#("Variant byref setter", gen_byref_setters),
#("ComToManagedPrimitiveTypes", gen_ComToManagedPrimitiveTypes),
#("Variant IsPrimitiveType", gen_IsPrimitiveType),
#("ConvertByrefToPtr", gen_ConvertByrefToPtr(False)),
)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
# (C) Copyright 2014 Voyager Search
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import glob
import collections
import shutil
import requests
from utils import status
from utils import task_utils
import warnings
from requests.packages.urllib3.exceptions import InsecureRequestWarning
warnings.simplefilter('ignore', InsecureRequestWarning)
# Get SSL trust setting.
verify_ssl = task_utils.get_ssl_mode()
result_count = 0
processed_count = 0.
status_writer = status.Writer()
import arcpy
skipped_reasons = {}
errors_reasons = {}
layer_name = ''
existing_fields = []
new_fields = []
field_values = []
def execute(request):
"""Converts each input dataset to kml (.kmz).
:param request: json as a dict.
"""
converted = 0
skipped = 0
errors = 0
global result_count
parameters = request['params']
out_workspace = os.path.join(request['folder'], 'temp')
if not os.path.exists(out_workspace):
os.makedirs(out_workspace)
# Get the boundary box extent for input to KML tools.
extent = ''
try:
try:
ext = task_utils.get_parameter_value(parameters, 'processing_extent', 'wkt')
if ext:
sr = task_utils.get_spatial_reference("4326")
extent = task_utils.from_wkt(ext, sr)
except KeyError:
ext = task_utils.get_parameter_value(parameters, 'processing_extent', 'feature')
if ext:
extent = arcpy.Describe(ext).extent
except KeyError:
pass
# Get the output file name.
output_file_name = task_utils.get_parameter_value(parameters, 'output_file_name', 'value')
if not output_file_name:
output_file_name = 'kml_results'
result_count, response_index = task_utils.get_result_count(parameters)
# Query the index for results in groups of 25.
query_index = task_utils.QueryIndex(parameters[response_index])
fl = query_index.fl
query = '{0}{1}{2}'.format(sys.argv[2].split('=')[1], '/select?&wt=json', fl)
fq = query_index.get_fq()
if fq:
groups = task_utils.grouper(range(0, result_count), task_utils.CHUNK_SIZE, '')
query += fq
elif 'ids' in parameters[response_index]:
groups = task_utils.grouper(list(parameters[response_index]['ids']), task_utils.CHUNK_SIZE, '')
else:
groups = task_utils.grouper(range(0, result_count), task_utils.CHUNK_SIZE, '')
# Begin processing
status_writer.send_percent(0.0, _('Starting to process...'), 'convert_to_kml')
headers = {'x-access-token': task_utils.get_security_token(request['owner'])}
for group in groups:
if fq:
results = requests.get(query + "&rows={0}&start={1}".format(task_utils.CHUNK_SIZE, group[0]), verify=verify_ssl, headers=headers)
elif 'ids' in parameters[response_index]:
results = requests.get(query + '{0}&ids={1}'.format(fl, ','.join(group)), verify=verify_ssl, headers=headers)
else:
results = requests.get(query + "&rows={0}&start={1}".format(task_utils.CHUNK_SIZE, group[0]), verify=verify_ssl, headers=headers)
docs = results.json()['response']['docs']
input_items = task_utils.get_input_items(docs)
if not input_items:
input_items = task_utils.get_input_items(parameters[response_index]['response']['docs'])
input_rows = collections.defaultdict(list)
for doc in docs:
if 'path' not in doc:
input_rows[doc['name']].append(doc)
if input_rows:
result = convert_to_kml(input_rows, out_workspace, extent)
converted += result[0]
errors += result[1]
skipped += result[2]
if input_items:
result = convert_to_kml(input_items, out_workspace, extent)
converted += result[0]
errors += result[1]
skipped += result[2]
if not input_items and not input_rows:
status_writer.send_state(status.STAT_FAILED, _('No items to process. Check if items exist.'))
return
# Zip up kmz files if more than one.
if converted > 1:
status_writer.send_status("Converted: {}".format(converted))
zip_file = task_utils.zip_data(out_workspace, '{0}.zip'.format(output_file_name))
shutil.move(zip_file, os.path.join(os.path.dirname(out_workspace), os.path.basename(zip_file)))
shutil.copy2(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'supportfiles', '_thumb.png'), request['folder'])
elif converted == 1:
try:
kml_file = glob.glob(os.path.join(out_workspace, '*.kmz'))[0]
tmp_lyr = arcpy.KMLToLayer_conversion(kml_file, out_workspace, 'kml_layer')
task_utils.make_thumbnail(tmp_lyr.getOutput(0), os.path.join(request['folder'], '_thumb.png'))
except arcpy.ExecuteError:
pass
shutil.move(kml_file, os.path.join(request['folder'], os.path.basename(kml_file)))
# Update state if necessary.
if skipped > 0 or errors > 0:
status_writer.send_state(status.STAT_WARNING, _('{0} results could not be processed').format(errors + skipped))
task_utils.report(os.path.join(request['folder'], '__report.json'), converted, skipped, errors, errors_reasons, skipped_reasons)
def convert_to_kml(input_items, out_workspace, extent, show_progress=False):
converted = 0
errors = 0
skipped = 0
global processed_count
global layer_name
global existing_fields
global new_fields
global field_values
arcpy.env.overwriteOutput = True
for ds, out_name in input_items.iteritems():
try:
# -----------------------------------------------
# If the item is a service layer, process and continue.
# -----------------------------------------------
if ds.startswith('http'):
try:
service_layer = task_utils.ServiceLayer(ds, extent.JSON, 'esriGeometryPolygon')
arcpy.env.overwriteOutput = True
oid_groups = service_layer.object_ids
out_features = None
g = 0.
group_cnt = service_layer.object_ids_cnt
if not arcpy.Exists(os.path.join(out_workspace, 'temp.gdb')):
temp_gdb = arcpy.CreateFileGDB_management(out_workspace, 'temp.gdb')
temp_gdb = temp_gdb[0]
else:
temp_gdb = os.path.join(out_workspace, 'temp.gdb')
for group in oid_groups:
g += 1
group = [oid for oid in group if oid]
where = '{0} IN {1}'.format(service_layer.oid_field_name, tuple(group))
url = ds + "/query?where={}&outFields={}&returnGeometry=true&f=json&".format(where, '*')
feature_set = arcpy.FeatureSet()
try:
feature_set.load(url)
except Exception:
continue
if not out_features:
out_features = arcpy.CopyFeatures_management(feature_set, task_utils.create_unique_name(out_name, temp_gdb))
else:
features = arcpy.CopyFeatures_management(feature_set, task_utils.create_unique_name(out_name, temp_gdb))
arcpy.Append_management(features, out_features, 'NO_TEST')
try:
arcpy.Delete_management(features)
except arcpy.ExecuteError:
pass
status_writer.send_percent(float(g) / group_cnt * 100, '', 'convert_to_kml')
arcpy.MakeFeatureLayer_management(out_features, out_name)
arcpy.LayerToKML_conversion(out_name, '{0}.kmz'.format(os.path.join(out_workspace, out_name)), 1, boundary_box_extent=extent)
processed_count += 1.
converted += 1
status_writer.send_percent(processed_count / result_count, _('Converted: {0}').format(ds), 'convert_to_kml')
continue
except Exception as ex:
status_writer.send_state(status.STAT_WARNING, str(ex))
errors += 1
errors_reasons[ds] = ex.message
continue
# Is the input a mxd data frame.
map_frame_name = task_utils.get_data_frame_name(ds)
if map_frame_name:
ds = ds.split('|')[0].strip()
# -------------------------------
# Is the input a geometry feature
# -------------------------------
if isinstance(out_name, list):
increment = task_utils.get_increment(result_count)
for row in out_name:
try:
name = arcpy.ValidateTableName(ds, 'in_memory')
name = os.path.join('in_memory', name)
# Clip the geometry.
geo_json = row['[geo]']
geom = arcpy.AsShape(geo_json)
row.pop('[geo]')
if not arcpy.Exists(name):
if arcpy.env.outputCoordinateSystem:
layer_name = arcpy.CreateFeatureclass_management('in_memory', os.path.basename(name), geom.type.upper())
else:
arcpy.env.outputCoordinateSystem = 4326
layer_name = arcpy.CreateFeatureclass_management('in_memory', os.path.basename(name), geom.type.upper())
# layer_name = arcpy.MakeFeatureLayer_management(name, 'flayer')
existing_fields = [f.name for f in arcpy.ListFields(layer_name)]
new_fields = []
field_values = []
for field, value in row.iteritems():
valid_field = arcpy.ValidateFieldName(field, 'in_memory')
new_fields.append(valid_field)
field_values.append(value)
arcpy.AddField_management(layer_name, valid_field, 'TEXT')
else:
if not geom.type.upper() == arcpy.Describe(name).shapeType.upper():
name = arcpy.CreateUniqueName(os.path.basename(name), 'in_memory')
if arcpy.env.outputCoordinateSystem:
layer_name = arcpy.CreateFeatureclass_management('in_memory', os.path.basename(name), geom.type.upper())
else:
arcpy.env.outputCoordinateSystem = 4326
layer_name = arcpy.CreateFeatureclass_management('in_memory', os.path.basename(name), geom.type.upper())
existing_fields = [f.name for f in arcpy.ListFields(layer_name)]
new_fields = []
field_values = []
for field, value in row.iteritems():
valid_field = arcpy.ValidateFieldName(field, 'in_memory')
new_fields.append(valid_field)
field_values.append(value)
if not valid_field in existing_fields:
arcpy.AddField_management(layer_name, valid_field, 'TEXT')
with arcpy.da.InsertCursor(layer_name, ["SHAPE@"] + new_fields) as icur:
icur.insertRow([geom] + field_values)
arcpy.MakeFeatureLayer_management(layer_name, os.path.basename(name))
arcpy.LayerToKML_conversion(os.path.basename(name),
'{0}.kmz'.format(os.path.join(out_workspace, os.path.basename(name))),
1,
boundary_box_extent=extent)
if (processed_count % increment) == 0:
status_writer.send_percent(float(processed_count) / result_count, _('Converted: {0}').format(row['name']), 'convert_to_kml')
processed_count += 1
converted += 1
except KeyError:
processed_count += 1
skipped += 1
skipped_reasons[ds] = 'Invalid input type'
status_writer.send_state(_(status.STAT_WARNING, 'Invalid input type: {0}').format(ds))
except Exception as ex:
processed_count += 1
errors += 1
errors_reasons[ds] = ex.message
continue
del icur
continue
dsc = arcpy.Describe(ds)
if os.path.exists(os.path.join('{0}.kmz'.format(os.path.join(out_workspace, out_name)))):
out_name = os.path.basename(arcpy.CreateUniqueName(out_name + '.kmz', out_workspace))[:-4]
if dsc.dataType == 'FeatureClass':
arcpy.MakeFeatureLayer_management(ds, dsc.name)
if out_name == '':
out_name = dsc.name
arcpy.LayerToKML_conversion(dsc.name,
'{0}.kmz'.format(os.path.join(out_workspace, out_name)),
1,
boundary_box_extent=extent)
converted += 1
elif dsc.dataType == 'ShapeFile':
arcpy.MakeFeatureLayer_management(ds, dsc.name[:-4])
if out_name == '':
out_name = dsc.name[:-4]
arcpy.LayerToKML_conversion(dsc.name[:-4],
'{0}.kmz'.format(os.path.join(out_workspace, out_name)),
1,
boundary_box_extent=extent)
converted += 1
elif dsc.dataType == 'RasterDataset':
arcpy.MakeRasterLayer_management(ds, dsc.name)
if out_name == '':
out_name = dsc.name
arcpy.LayerToKML_conversion(dsc.name,
'{0}.kmz'.format(os.path.join(out_workspace, out_name)),
1,
boundary_box_extent=extent)
converted += 1
elif dsc.dataType == 'Layer':
if out_name == '':
if dsc.name.endswith('.lyr'):
out_name = dsc.name[:-4]
else:
out_name = dsc.name
arcpy.LayerToKML_conversion(ds,
'{0}.kmz'.format(os.path.join(out_workspace, out_name)),
1,
boundary_box_extent=extent)
converted += 1
elif dsc.dataType == 'FeatureDataset':
arcpy.env.workspace = ds
for fc in arcpy.ListFeatureClasses():
arcpy.MakeFeatureLayer_management(fc, 'tmp_lyr')
arcpy.LayerToKML_conversion('tmp_lyr',
'{0}.kmz'.format(os.path.join(out_workspace, fc)),
1,
boundary_box_extent=extent)
converted += 1
elif dsc.dataType == 'CadDrawingDataset':
arcpy.env.workspace = dsc.catalogPath
for cad_fc in arcpy.ListFeatureClasses():
if cad_fc.lower() == 'annotation':
try:
cad_anno = arcpy.ImportCADAnnotation_conversion(
cad_fc,
arcpy.CreateUniqueName('cadanno', arcpy.env.scratchGDB)
)
except arcpy.ExecuteError:
cad_anno = arcpy.ImportCADAnnotation_conversion(
cad_fc,
arcpy.CreateUniqueName('cadanno', arcpy.env.scratchGDB),
1
)
arcpy.MakeFeatureLayer_management(cad_anno, 'cad_lyr')
name = '{0}_{1}'.format(dsc.name[:-4], cad_fc)
arcpy.LayerToKML_conversion('cad_lyr',
'{0}.kmz'.format(os.path.join(out_workspace, name)),
1,
boundary_box_extent=extent)
converted += 1
else:
arcpy.MakeFeatureLayer_management(cad_fc, 'cad_lyr')
name = '{0}_{1}'.format(dsc.name[:-4], cad_fc)
arcpy.LayerToKML_conversion('cad_lyr',
'{0}.kmz'.format(os.path.join(out_workspace, name)),
1,
boundary_box_extent=extent)
converted += 1
# Map document to KML.
elif dsc.dataType == 'MapDocument':
mxd = arcpy.mapping.MapDocument(ds)
if map_frame_name:
data_frames = arcpy.mapping.ListDataFrames(mxd, map_frame_name)
else:
data_frames = arcpy.mapping.ListDataFrames(mxd)
for df in data_frames:
name = '{0}_{1}'.format(dsc.name[:-4], df.name)
arcpy.MapToKML_conversion(ds,
df.name,
'{0}.kmz'.format(os.path.join(out_workspace, name)),
extent_to_export=extent)
converted += 1
else:
processed_count += 1
status_writer.send_percent(processed_count / result_count, _('Invalid input type: {0}').format(dsc.name), 'convert_to_kml')
skipped += 1
skipped_reasons[ds] = _('Invalid input type: {0}').format(dsc.dataType)
continue
processed_count += 1
status_writer.send_percent(processed_count / result_count, _('Converted: {0}').format(ds), 'convert_to_kml')
status_writer.send_status(_('Converted: {0}').format(ds))
except Exception as ex:
processed_count += 1
status_writer.send_percent(processed_count / result_count, _('Skipped: {0}').format(ds), 'convert_to_kml')
status_writer.send_status(_('WARNING: {0}').format(repr(ex)))
errors_reasons[ds] = repr(ex)
errors += 1
pass
return converted, errors, skipped
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import ldap
import time
import paramiko
import traceback
import codecs
import subprocess
from cgi import escape
from vmchecker import paths, update_db, penalty, submissions, submit, coursedb
from vmchecker.coursedb import opening_course_db
from vmchecker.courselist import CourseList
from vmchecker.config import LdapConfig, StorerCourseConfig
try:
import simplejson as json
except ImportError:
import json
# The buffer size used for copying temporary files
FILE_BUF_SIZE=10000
# If requested, remember user for up to two weeks
EXTENDED_SESSION_TIMEOUT = 60 * 60 * 24 * 14;
# .vmr files may be very large because of errors in the student's submission.
MAX_VMR_FILE_SIZE = 5 * 1024 * 1024 # 500 KB
# define ERROR_MESSAGES
ERR_AUTH = 1
ERR_EXCEPTION = 2
ERR_OTHER = 3
# define MD5 processing errors
MD5_ERR_BAD_MD5 = 'md5'
MD5_ERR_BAD_ZIP = 'zip'
# I18N support
import gettext
# NOTE: This is where the locale files are installed by default.
# Ideally this shouldn't be hardcoded, but it will do for now.
DEFAULT_LOCALE_PATH="/usr/local/share/locale"
DEFAULT_LOCALE="en"
lang_en = gettext.translation("vmchecker", DEFAULT_LOCALE_PATH, languages=["en"])
lang_fr = gettext.translation("vmchecker", DEFAULT_LOCALE_PATH, languages=["fr"])
lang_ro = gettext.translation("vmchecker", DEFAULT_LOCALE_PATH, languages=["ro"])
def install_i18n(lang):
if lang == "en":
lang_en.install()
elif lang == "ro":
lang_ro.install()
elif lang == "fr":
lang_fr.install()
else:
lang_en.install()
# End I18N support
class OutputString():
def __init__(self):
self.st = ""
def write(self, st):
self.st += st
def get(self):
return self.st
def xssescape(text):
"""Gets rid of < and > and & and, for good measure, :"""
return escape(text, quote=True).replace(':',':')
def get_user(username, password):
"""Find the username for a user based on username/password.
This searches LDAP or some file-based json files in the user's
home directories.
Returns the username on success.
"""
# allthough a misconfigured user can block access to any course,
# we preffer early LOUD errors to silently ignored ones.
# Fail fast, fail lowdly!
r = get_user_from_auth_files(username, password)
if not r is None:
return r
try:
r = get_ldap_user(username, password)
except:
r = None
return r
def get_user_from_auth_files(username, password):
"""Search all courseses for auth_files and if we can login in any
course, return the login from there"""
for coursecfg_fname in CourseList().course_configs():
vmpaths = paths.VmcheckerPaths(StorerCourseConfig(coursecfg_fname).root_path())
r = get_user_from_auth_file(vmpaths, username, password)
if not r is None:
return r
return None
def get_user_from_auth_file(vmpaths, username, password):
"""Try to authenticate using one course's auth_file.
Return the username on success.
"""
if not os.path.exists(vmpaths.auth_file()):
return None
with open(vmpaths.auth_file()) as handle:
auth_file_contents = handle.read()
auth_dic = json.loads(auth_file_contents)['auth']
if auth_dic.has_key(username) and auth_dic[username] == password:
return username
return None
def get_ldap_user(username, password):
"""Try to authenticate using the global LDAP configuration file.
Return the username on success.
"""
ldap_cfg = LdapConfig()
con = ldap.initialize(ldap_cfg.server())
if not ldap_cfg.bind_anonymous():
con.simple_bind_s(ldap_cfg.bind_user(),
ldap_cfg.bind_pass())
baseDN = ldap_cfg.root_search()
searchScope = ldap.SCOPE_SUBTREE
retrieveAttributes = None
# XXX : Needs sanitation
searchFilter = '(uid=' + username + ')'
timeout = 0
count = 0
# find the user's dn
result_id = con.search(baseDN,
searchScope,
searchFilter,
retrieveAttributes)
result_set = []
while 1:
result_type, result_data = con.result(result_id, timeout)
if (result_data == []):
break
else:
if result_type == ldap.RES_SEARCH_ENTRY:
result_set.append(result_data)
if len(result_set) == 0:
#no results
return None
if len(result_set) > 1:
# too many results for the same uid
raise
user_dn, entry = result_set[0][0]
if not ldap_cfg.bind_anonymous():
con.unbind_s()
# check the password
try:
con = ldap.initialize(ldap_cfg.server())
con.simple_bind_s(user_dn, password)
except ldap.INVALID_CREDENTIALS:
return None
except:
raise
return entry['cn'][0]
def _find_file(searched_file_name, rfiles):
"""Search for a filename in an array for {fname:fcontent} dicts"""
for rfile in rfiles:
if rfile.has_key(searched_file_name):
return rfile
return None
def submission_upload_info(vmcfg, courseId, assignment, account, isTeamAccount, isGraded):
"""Return a string explaining the submission upload time, deadline
and the late submission penalty
"""
vmpaths = paths.VmcheckerPaths(vmcfg.root_path())
sbroot = vmpaths.dir_cur_submission_root(assignment, account)
grade_file = paths.submission_results_grade(sbroot)
sbcfg = paths.submission_config_file(sbroot)
if not os.path.exists(sbcfg):
return _("No submission exists for this assignment")
late_penalty = update_db.compute_late_penalty(assignment, account, vmcfg)
ta_penalty = update_db.compute_TA_penalty(grade_file)
deadline_str = vmcfg.assignments().get(assignment, 'Deadline')
total_points = int(vmcfg.assignments().get(assignment, 'TotalPoints'))
deadline_struct = time.strptime(vmcfg.assignments().get(assignment, 'Deadline'),
penalty.DATE_FORMAT)
sss = submissions.Submissions(vmpaths)
upload_time_str = sss.get_upload_time_str(assignment, account)
upload_time_struct = sss.get_upload_time_struct(assignment, account)
deadline_explanation = penalty.verbose_time_difference(upload_time_struct, deadline_struct)
submitter_explanation = None
if isTeamAccount:
submitting_user = sss.get_submitting_user(assignment, account)
if submitting_user is not None:
submitter_explanation = _("Submitted by") + ": " + submitting_user
max_line_width = 0
rows_to_print = []
if submitter_explanation is not None:
rows_to_print += [
[ submitter_explanation ],
[ '' ]
]
rows_to_print += [
[ _("Submission date"), upload_time_str ],
[ _("Assignment deadline"), deadline_str ],
[ deadline_explanation ]
]
if isGraded or not vmcfg.assignments().is_deadline_hard(assignment):
rows_to_print += [
[ '' ]
]
if not vmcfg.assignments().is_deadline_hard(assignment):
rows_to_print += [
[ _("Penalty (late submission)"), str(late_penalty) ],
]
if isGraded:
rows_to_print += [
[ _("Penalty (grading)"), str(ta_penalty) ],
[ _("Penalty (total)"), str(ta_penalty + late_penalty) ],
[ '' ],
[ _("Grade"), str(total_points + ta_penalty + late_penalty) ]
]
for row in rows_to_print:
row[0] = row[0].decode("utf-8")
if len(row) == 2 and len(row[0]) > max_line_width:
max_line_width = len(row[0])
if isGraded:
# Put a dashed line just above the 'Grade' line
rows_to_print[len(rows_to_print) - 2][0] = '-' * max_line_width
ret = u""
for row in rows_to_print:
if len(row) == 1:
ret += row[0] + "\n"
elif len(row) == 2:
ret += unicode("{0[0]:<" + str(max_line_width) + "} : {0[1]}\n").format(row)
ret += "\n"
return ret
def sortResultFiles(rfiles):
"""Sort the vector of result files and change keys with human
readable descriptions"""
file_descriptions = [
{'fortune.vmr' : _('Results not yet available')},
{'grade.vmr' : _('Grade')},
{'submission.vmr' : _('Submission info')},
{'build-stdout.vmr' : _('Compilation (stdout)')},
{'build-stderr.vmr' : _('Compilation (stderr)')},
{'run-stdout.vmr' : _('Testing (stdout)')},
{'run-stderr.vmr' : _('Testing (stderr)')},
{'run-km.vmr' : _('Kernel messages(netconsole)')},
{'queue-contents.vmr' : _('Testing queue')},
{'vmchecker-stderr.vmr' : _('Errors')},
]
ret = []
for f_des in file_descriptions:
key = f_des.keys()[0] # there is only one key:value pair in each dict
rfile = _find_file(key, rfiles)
if rfile == None:
continue
else:
ret.append({f_des.get(key) : rfile.get(key)})
rfiles.remove(rfile)
ret += rfiles
return ret
def get_test_queue_contents(vmcfg, courseId):
"""Get the contents of the test queues for all testers configured
in the system."""
try:
tstcfg = vmcfg.testers()
queue_contents = {} # dict of strings
for tester_id in tstcfg:
queue_contents[tester_id] = submit.get_tester_queue_contents(vmcfg, tester_id)
# print the concatenation of all 'ls' instances
return json.dumps(queue_contents, indent=4)
except:
strout = OutputString()
traceback.print_exc(file = strout)
return json.dumps({'errorTrace' : strout.get()}, indent=4)
def get_storagedir_contents(courseId, assignmentId, account):
"""Get the content of a the archive coresponding to a
MD5Submission-type homework"""
client = paramiko.SSHClient()
try:
vmcfg = StorerCourseConfig(CourseList().course_config(courseId))
assignments = vmcfg.assignments()
storage_hostname = assignments.get(assignmentId, 'AssignmentStorageHost')
storage_username = assignments.get(assignmentId, 'AssignmentStorageQueryUser')
storage_basepath = assignments.storage_basepath( \
assignments.get(assignmentId, 'AssignmentStorageBasepath') , account)
client.load_system_host_keys(vmcfg.known_hosts_file())
client.connect(storage_hostname,
username=storage_username,
key_filename=vmcfg.storer_sshid(),
look_for_keys=False)
cmd = "find " + storage_basepath + '/' + account + \
" \( ! -regex '.*/\..*' \) -type f"
stdin, stdout, stderr = client.exec_command(cmd)
result = []
for d in stdout.readlines():
result.append({'fileName' : d})
for f in [stdin, stdout, stderr]: f.close()
return json.dumps(result)
except:
strout = OutputString()
traceback.print_exc(file = strout)
return json.dumps({'errorTrace' : strout.get()}, indent=4)
finally:
client.close()
def QuoteForPOSIX(string):
return "\\'".join("'" + p + "'" for p in string.split("'"))
def validate_md5_submission(courseId, assignmentId, account, archiveFileName):
"""Checks whether a MD5Submission is valid:
* checks that the uploaded md5 corresponds to the one of the machine
* checks that the archive uploaded by the student is a zip file
On success returns (True,).
On failure reports the source of the failure:
- (False, 'md5') - the uploaded md5 does not match the one computed on the archive
- (False, 'zip') - the uploaded archive is not zip.
"""
md5_calculated = ""
md5_uploaded = ""
archive_file_type = ""
client = paramiko.SSHClient()
try:
vmcfg = StorerCourseConfig(CourseList().course_config(courseId))
assignments = vmcfg.assignments()
storage_hostname = assignments.get(assignmentId, 'AssignmentStorageHost')
storage_username = assignments.get(assignmentId, 'AssignmentStorageQueryUser')
storage_basepath = assignments.storage_basepath( \
assignments.get(assignmentId, 'AssignmentStorageBasepath'), account)
client.load_system_host_keys(vmcfg.known_hosts_file())
client.connect(storage_hostname,
username=storage_username,
key_filename=vmcfg.storer_sshid(),
look_for_keys=False)
archive_abs = os.path.join(storage_basepath, account, archiveFileName)
# XXX: This will take ages to compute! I wonder how many
# connections will Apache hold.
stdin, stdout, stderr = client.exec_command("md5sum " + QuoteForPOSIX(archive_abs))
md5_calculated = stdout.readline().split()[0]
for f in [stdin, stdout, stderr]: f.close()
stdin, stdout, stderr = client.exec_command("file " + QuoteForPOSIX(archive_abs))
archive_file_type = stdout.readline()[len(archive_abs):].split()[1].lower()
for f in [stdin, stdout, stderr]: f.close()
vmpaths = paths.VmcheckerPaths(vmcfg.root_path())
submission_dir = vmpaths.dir_cur_submission_root(assignmentId, account)
md5_fpath = paths.submission_md5_file(submission_dir)
if os.path.isfile(md5_fpath):
with open(md5_fpath, 'r') as f:
md5_uploaded = f.read(32)
except:
strout = OutputString()
traceback.print_exc(file = strout)
return json.dumps({'errorTrace' : strout.get()}, indent=4)
finally:
client.close()
if not md5_calculated == md5_uploaded:
return (False, MD5_ERR_BAD_MD5) # report the type of the problem
if not archive_file_type == "zip":
return (False, MD5_ERR_BAD_ZIP) # report the type of the problem
return (True,) # no problemo
def getAssignmentAccountName(courseId, assignmentId, username, strout):
try:
vmcfg = StorerCourseConfig(CourseList().course_config(courseId))
except:
traceback.print_exc(file = strout)
return json.dumps({'errorType' : ERR_EXCEPTION,
'errorMessage' : "",
'errorTrace' : strout.get()})
vmpaths = paths.VmcheckerPaths(vmcfg.root_path())
with opening_course_db(vmpaths.db_file()) as course_db:
return course_db.get_assignment_account(assignmentId, username)
# Service method helpers
def getUserUploadedMd5Helper(courseId, assignmentId, username, strout):
"""Get the current MD5 sum submitted for a given username on a given assignment"""
try:
vmcfg = StorerCourseConfig(CourseList().course_config(courseId))
except:
traceback.print_exc(file = strout)
return json.dumps({'errorType' : ERR_EXCEPTION,
'errorMessage' : "",
'errorTrace' : strout.get()})
(_, account) = getAssignmentAccountName(courseId, assignmentId, username, strout)
vmpaths = paths.VmcheckerPaths(vmcfg.root_path())
submission_dir = vmpaths.dir_cur_submission_root(assignmentId, account)
md5_fpath = paths.submission_md5_file(submission_dir)
md5_result = {}
try:
if os.path.exists(paths.submission_config_file(submission_dir)) and os.path.isfile(md5_fpath):
sss = submissions.Submissions(vmpaths)
upload_time_str = sss.get_upload_time_str(assignmentId, account)
md5_result['fileExists'] = True
with open(md5_fpath, 'r') as f:
md5_result['md5Sum'] = f.read(32)
md5_result['uploadTime'] = upload_time_str
else:
md5_result['fileExists'] = False
return json.dumps(md5_result)
except:
traceback.print_exc(file = strout)
return json.dumps({'errorType' : ERR_EXCEPTION,
'errorMessage' : "",
'errorTrace' : strout.get()})
def getAssignmentsHelper(courseId, currentUser, strout):
try:
vmcfg = StorerCourseConfig(CourseList().course_config(courseId))
except:
traceback.print_exc(file = strout)
return json.dumps({'errorType': ERR_EXCEPTION,
'errorMessage':"Unable to load course config",
'errorTrace':strout.get()})
assignments = vmcfg.assignments()
sorted_assg = sorted(assignments, lambda x, y: int(assignments.get(x, "OrderNumber")) -
int(assignments.get(y, "OrderNumber")))
assg_arr = []
vmpaths = paths.VmcheckerPaths(vmcfg.root_path())
with opening_course_db(vmpaths.db_file()) as course_db:
for key in sorted_assg:
if assignments.is_hidden(key) and not currentUser in vmcfg.admin_list():
continue
a = {}
a['assignmentId'] = key
a['assignmentTitle'] = assignments.get(key, "AssignmentTitle")
a['assignmentStorage'] = assignments.getd(key, "AssignmentStorage", "")
if a['assignmentStorage'].lower() == "large":
a['assignmentStorageHost'] = assignments.get(key, "AssignmentStorageHost")
a['assignmentStorageBasepath'] = assignments.storage_basepath( \
assignments.get(key, "AssignmentStorageBasepath"), currentUser)
a['deadline'] = assignments.get(key, "Deadline")
a['statementLink'] = assignments.get(key, "StatementLink")
team = course_db.get_user_team_for_assignment(key, currentUser)
if team is not None:
a['team'] = team
assg_arr.append(a)
return json.dumps(assg_arr)
def getResultsHelper(courseId, assignmentId, currentUser, strout, username = None, teamname = None, currentTeam = None):
# assume that the session was already checked
if username != None and teamname != None:
return json.dumps({'errorType' : ERR_OTHER,
'errorMessage' : "Can't query both user and team results at the same time.",
'errorTrace' : ""})
try:
vmcfg = StorerCourseConfig(CourseList().course_config(courseId))
except:
traceback.print_exc(file = strout)
return json.dumps({'errorType' : ERR_EXCEPTION,
'errorMessage' : "",
'errorTrace' : strout.get()})
# Check if the current user is allowed to view any other user's grade.
# TODO: This should be implemented neater using some group
# and permission model.
is_authorized = vmcfg.public_results() or \
currentUser in vmcfg.admin_list() or \
username == currentUser or \
teamname == currentTeam
if not is_authorized:
traceback.print_exc(file = strout)
return json.dumps({'errorType' : ERR_EXCEPTION,
'errorMessage' : "User is not authorized to view results.",
'errorTrace' : strout.get()})
vmpaths = paths.VmcheckerPaths(vmcfg.root_path())
account = None
if username != None:
# Get the individual results for this user
account = username
isTeamAccount = False
elif teamname != None:
# Get the team results for this team
account = teamname
isTeamAccount = True
else:
# Check if the user is part of a team with a mutual account for this submission
(isTeamAccount, account) = getAssignmentAccountName(courseId, assignmentId, currentUser, strout)
submission_dir = vmpaths.dir_cur_submission_root(assignmentId, account)
r_path = paths.dir_submission_results(submission_dir)
assignments = vmcfg.assignments()
ignored_vmrs = assignments.ignored_vmrs(assignmentId)
try:
isGraded = False
result_files = []
if os.path.isdir(r_path):
update_db.update_grades(courseId, account=account, assignment=assignmentId)
for fname in os.listdir(r_path):
# skill all files not ending in '.vmr'
if not fname.endswith('.vmr'):
continue
if fname in ignored_vmrs:
continue
f_path = os.path.join(r_path, fname)
if os.path.isfile(f_path):
overflow_msg = u""
f_size = os.path.getsize(f_path)
if f_size > MAX_VMR_FILE_SIZE:
overflow_msg = '\n\n' + _('File truncated! Actual size') + ': ' + str(f_size) + ' ' + _('bytes') + '\n'
# decode as utf-8 and ignore any errors, because
# characters will be badly encoded as json.
with codecs.open(f_path, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read(MAX_VMR_FILE_SIZE) + overflow_msg.decode("utf-8")
content = xssescape(content)
result_files.append({fname : content})
if fname == 'grade.vmr' and \
"".join(content.split()) not in submissions.GENERATED_STATUSES:
isGraded = True
if (len(result_files) == 1 and result_files[0].keys()[0] == "grade.vmr") and \
not vmcfg.assignments().submit_only(assignmentId):
msg = _("In the meantime have a fortune cookie") + ": <blockquote>"
try:
process = subprocess.Popen('/usr/games/fortune',
shell=False,
stdout=subprocess.PIPE)
msg += process.communicate()[0] + "</blockquote>"
except:
msg += "Knock knock. Who's there? [Silence] </blockquote>"
result_files = [ {'fortune.vmr' : msg } ]
result_files.append({'queue-contents.vmr' : get_test_queue_contents(vmcfg, courseId) })
if 'submission.vmr' not in ignored_vmrs:
result_files.append({'submission.vmr' :
submission_upload_info(vmcfg, courseId, assignmentId, account, isTeamAccount, isGraded)})
result_files = sortResultFiles(result_files)
return json.dumps(result_files)
except:
traceback.print_exc(file = strout)
return json.dumps({'errorType' : ERR_EXCEPTION,
'errorMessage' : "",
'errorTrace' : strout.get()})
def getAllGradesHelper(courseId, username, strout):
try:
# XXX: DON'T DO THIS: performance degrades very much!
#update_db.update_grades(courseId)
vmcfg = StorerCourseConfig(CourseList().course_config(courseId))
vmpaths = paths.VmcheckerPaths(vmcfg.root_path())
assignments = vmcfg.assignments()
sorted_assg = sorted(assignments, lambda x, y: int(assignments.get(x, "OrderNumber")) -
int(assignments.get(y, "OrderNumber")))
# Check if the current user is allowed to view all the grades
# TODO: This should be implemented neater using some group
# and permission model.
user_can_view_all = False
if vmcfg.public_results() or username in vmcfg.admin_list():
user_can_view_all = True
user_grade_rows = None
team_grade_rows = None
with opening_course_db(vmpaths.db_file()) as course_db:
if user_can_view_all:
user_grade_rows = course_db.get_user_grades()
team_grade_rows = course_db.get_team_grades()
else:
# Get all the individual grades that the user is allowed to see
user_grade_rows = course_db.get_user_and_teammates_grades(username)
# Get all the team grades that the user is allowed to see
team_grade_rows = course_db.get_user_team_grades(user = username)
ret = []
grades = {}
for row in user_grade_rows:
user, assignment, grade = row
if not assignment in vmcfg.assignments():
continue
if not vmcfg.assignments().show_grades_before_deadline(assignment):
deadline = time.strptime(vmcfg.assignments().get(assignment, 'Deadline'), DATE_FORMAT)
deadtime = time.mktime(deadline)
if time.time() < deadtime:
continue
if vmcfg.assignments().is_hidden(assignment) and username not in vmcfg.admin_list():
continue
grades.setdefault(user, {})[assignment] = grade
for user in sorted(grades.keys()):
ret.append({'gradeOwner' : 'user',
'name' : user,
'results' : grades.get(user)})
grades = {}
for row in team_grade_rows:
team, assignment, grade = row
if not assignment in vmcfg.assignments():
continue
if not vmcfg.assignments().show_grades_before_deadline(assignment):
deadline = time.strptime(vmcfg.assignments().get(assignment, 'Deadline'), DATE_FORMAT)
deadtime = time.mktime(deadline)
if time.time() < deadtime:
continue
grades.setdefault(team, {})[assignment] = grade
for team in sorted(grades.keys()):
ret.append({'gradeOwner' : 'team',
'name' : team,
'results' : grades.get(team)})
return json.dumps(ret)
except:
traceback.print_exc(file = strout)
return json.dumps({'errorType' : ERR_EXCEPTION,
'errorMessage' : "",
'errorTrace' : strout.get()})
def getUserStorageDirContentsHelper(courseId, assignmentId, username, strout):
"""Get the current files in the home directory on the storage host for a given username"""
(_, account) = getAssignmentAccountName(courseId, assignmentId, username, strout)
try:
result = get_storagedir_contents(courseId, assignmentId, account)
return result
except:
traceback.print_exc(file = strout)
return json.dumps({'errorType' : ERR_EXCEPTION,
'errorMessage' : "",
'errorTrace' : strout.get()})
class InvalidDataException(Exception):
pass
import re
courseIdRegex = re.compile('^[a-zA-Z0-9]+$')
def sanityCheckCourseId(courseId):
if courseIdRegex.match(courseId) is None:
raise InvalidDataException
return courseId
assignmentIdRegex = re.compile('^[0-9a-zA-Z-_]+$')
def sanityCheckAssignmentId(assignmentId):
if assignmentIdRegex.match(assignmentId) is None:
raise InvalidDataException
return assignmentId
dotdotRegexp = re.compile('\.\.')
def sanityCheckDotDot(param):
if len(dotdotRegexp.findall(param)) != 0:
raise InvalidDataException
return param
usernameRegexWhiteList = re.compile('^[a-zA-Z0-9-_.@]+$')
def sanityCheckUsername(username):
if usernameRegexWhiteList.match(username) is None:
raise InvalidDataException
sanityCheckDotDot(username)
return username
localeRegexWhiteList = re.compile('^[a-z]{2}$')
def sanityCheckLocale(locale):
if localeRegexWhiteList.match(locale) is None:
raise InvalidDataException
return locale
|
|
# -*- coding: utf-8 -*-
import time
from flask import has_app_context
from framework import sentry
import mendeley
from addons.base.models import BaseCitationsNodeSettings, BaseOAuthUserSettings
from django.db import models
from addons.base import exceptions
from framework.exceptions import HTTPError
from mendeley.exception import MendeleyApiException
from oauthlib.oauth2 import InvalidGrantError
from addons.mendeley import \
settings # TODO: Move `settings` to `apps.py` when deleting
from addons.mendeley.api import APISession
from addons.mendeley.serializer import MendeleySerializer
from website.citations.providers import CitationsOauthProvider
from website.util import web_url_for
class Mendeley(CitationsOauthProvider):
name = 'Mendeley'
short_name = 'mendeley'
client_id = settings.MENDELEY_CLIENT_ID
client_secret = settings.MENDELEY_CLIENT_SECRET
auth_url_base = 'https://api.mendeley.com/oauth/authorize'
callback_url = 'https://api.mendeley.com/oauth/token'
auto_refresh_url = callback_url
default_scopes = ['all']
expiry_time = settings.EXPIRY_TIME
serializer = MendeleySerializer
def handle_callback(self, response):
client = self._get_client(credentials=response)
# make a second request for the Mendeley user's ID and name
profile = client.profiles.me
return {
'provider_id': profile.id,
'display_name': profile.display_name,
'profile_url': profile.link,
}
def _get_folders(self):
"""Get a list of a user's folders"""
client = self.client
return client.folders.list().items
def _get_client(self, credentials=None):
partial = mendeley.Mendeley(
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=web_url_for('oauth_callback',
service_name='mendeley',
_absolute=True) if has_app_context() else None,
)
credentials = credentials or {
'access_token': self.account.oauth_key,
'refresh_token': self.account.refresh_token,
'expires_at': time.mktime(self.account.expires_at.timetuple()),
'token_type': 'bearer',
}
return APISession(partial, credentials)
def _verify_client_validity(self):
#Check if Mendeley can be accessed
try:
self._client.folders.list()
except MendeleyApiException as error:
if error.status == 401 and 'Token has expired' in error.message:
try:
refreshed_key = self.refresh_oauth_key()
except InvalidGrantError:
self._client = None
raise HTTPError(401)
if not refreshed_key:
self._client = None
raise HTTPError(401)
else:
self._client = None
if error.status == 403:
raise HTTPError(403)
else:
raise HTTPError(error.status)
def _folder_metadata(self, folder_id):
folder = self.client.folders.get(folder_id)
return folder
def _citations_for_folder(self, list_id):
folder = self.client.folders.get(list_id)
document_ids = [
document.id
for document in folder.documents.iter(page_size=500)
]
citations = {
citation['id']: citation
for citation in self._citations_for_user()
}
return map(lambda id: citations[id], document_ids)
def _citations_for_user(self):
documents = self.client.documents.iter(page_size=500)
return [
self._citation_for_mendeley_document(document)
for document in documents
]
def _citation_for_mendeley_document(self, document):
"""Mendeley document to ``website.citations.models.Citation``
:param BaseDocument document:
An instance of ``mendeley.models.base_document.BaseDocument``
:return Citation:
"""
csl = {
'id': document.json.get('id')
}
CSL_TYPE_MAP = {
'book_section': 'chapter',
'case': 'legal_case',
'computer_program': 'article',
'conference_proceedings': 'paper-conference',
'encyclopedia_article': 'entry-encyclopedia',
'film': 'motion_picture',
'generic': 'article',
'hearing': 'speech',
'journal': 'article-journal',
'magazine_article': 'article-magazine',
'newspaper_article': 'article-newspaper',
'statute': 'legislation',
'television_broadcast': 'broadcast',
'web_page': 'webpage',
'working_paper': 'report'
}
csl_type = document.json.get('type')
if csl_type in CSL_TYPE_MAP:
csl['type'] = CSL_TYPE_MAP[csl_type]
else:
csl['type'] = 'article'
if document.json.get('abstract'):
csl['abstract'] = document.json.get('abstract')
if document.json.get('accessed'):
csl['accessed'] = document.json.get('accessed')
if document.json.get('authors'):
csl['author'] = [
{
'given': person.get('first_name'),
'family': person.get('last_name'),
} for person in document.json.get('authors')
]
if document.json.get('chapter'):
csl['chapter-number'] = document.json.get('chapter')
if document.json.get('city') and document.json.get('country'):
csl['publisher-place'] = document.json.get('city') + ', ' + document.json.get('country')
elif document.json.get('city'):
csl['publisher-place'] = document.json.get('city')
elif document.json.get('country'):
csl['publisher-place'] = document.json.get('country')
if document.json.get('edition'):
csl['edition'] = document.json.get('edition')
if document.json.get('editors'):
csl['editor'] = [
{
'given': person.get('first_name'),
'family': person.get('last_name'),
} for person in document.json.get('editors')
]
if document.json.get('genre'):
csl['genre'] = document.json.get('genre')
# gather identifiers
idents = document.json.get('identifiers')
if idents is not None:
if idents.get('doi'):
csl['DOI'] = idents.get('doi')
if idents.get('isbn'):
csl['ISBN'] = idents.get('isbn')
if idents.get('issn'):
csl['ISSN'] = idents.get('issn')
if idents.get('pmid'):
csl['PMID'] = idents.get('pmid')
if document.json.get('issue'):
csl['issue'] = document.json.get('issue')
if document.json.get('language'):
csl['language'] = document.json.get('language')
if document.json.get('medium'):
csl['medium'] = document.json.get('medium')
if document.json.get('pages'):
csl['page'] = document.json.get('pages')
if document.json.get('publisher'):
csl['publisher'] = document.json.get('publisher')
if csl_type == 'thesis':
csl['publisher'] = document.json.get('institution')
if document.json.get('revision'):
csl['number'] = document.json.get('revision')
if document.json.get('series'):
csl['collection-title'] = document.json.get('series')
if document.json.get('series_editor'):
csl['collection-editor'] = document.json.get('series_editor')
if document.json.get('short_title'):
csl['shortTitle'] = document.json.get('short_title')
if document.json.get('source'):
csl['container-title'] = document.json.get('source')
if document.json.get('title'):
csl['title'] = document.json.get('title')
if document.json.get('volume'):
csl['volume'] = document.json.get('volume')
urls = document.json.get('websites', [])
if urls:
csl['URL'] = urls[0]
if document.json.get('year'):
csl['issued'] = {'date-parts': [[document.json.get('year')]]}
return csl
class UserSettings(BaseOAuthUserSettings):
oauth_provider = Mendeley
serializer = MendeleySerializer
class NodeSettings(BaseCitationsNodeSettings):
provider_name = 'mendeley'
oauth_provider = Mendeley
serializer = MendeleySerializer
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
list_id = models.TextField(blank=True, null=True)
_api = None
@property
def _fetch_folder_name(self):
folder = self.api._folder_metadata(self.list_id)
return folder.name
def get_folders(self, show_root=False, **kwargs):
if self.has_auth:
try:
folders = self.api._get_folders()
serialized_root_folder = {
'name': 'All Documents',
'provider_list_id': None,
'id': 'ROOT',
'parent_list_id': '__',
'kind': 'folder',
'addon': 'mendeley'
}
serialized_folders = [{
'addon': 'mendeley',
'kind': 'folder',
'id': folder.json['id'],
'name': folder.json['name'],
'path': folder.json.get('parent_id', '/'),
'parent_list_id': folder.json.get('parent_id', None),
'provider_list_id': folder.json['id']
} for folder in folders]
if show_root:
serialized_folders.insert(0, serialized_root_folder)
return serialized_folders
except MendeleyApiException as error:
sentry.log_exception()
sentry.log_message('Unexpected Mendeley Error when fetching folders.')
raise HTTPError(error.status)
else:
raise exceptions.InvalidAuthError()
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import os
import subprocess
import sys
from contextlib import contextmanager
from distutils import sysconfig
from site import USER_SITE
import pkg_resources
from pkg_resources import EntryPoint, WorkingSet, find_distributions
from .common import die
from .compatibility import exec_function
from .environment import PEXEnvironment
from .finders import get_entry_point_from_console_script, get_script_from_distributions
from .interpreter import PythonInterpreter
from .orderedset import OrderedSet
from .pex_info import PexInfo
from .tracer import TRACER
from .variables import ENV
class DevNull(object):
def __init__(self):
pass
def write(self, *args, **kw):
pass
def flush(self):
pass
class PEX(object): # noqa: T000
"""PEX, n. A self-contained python environment."""
class Error(Exception): pass
class NotFound(Error): pass
@classmethod
def clean_environment(cls):
try:
del os.environ['MACOSX_DEPLOYMENT_TARGET']
except KeyError:
pass
# Cannot change dictionary size during __iter__
filter_keys = [key for key in os.environ if key.startswith('PEX_')]
for key in filter_keys:
del os.environ[key]
def __init__(self, pex=sys.argv[0], interpreter=None, env=ENV):
self._pex = pex
self._interpreter = interpreter or PythonInterpreter.get()
self._pex_info = PexInfo.from_pex(self._pex)
self._pex_info_overrides = PexInfo.from_env(env=env)
self._vars = env
self._envs = []
self._working_set = None
def _activate(self):
if not self._working_set:
working_set = WorkingSet([])
# set up the local .pex environment
pex_info = self._pex_info.copy()
pex_info.update(self._pex_info_overrides)
self._envs.append(PEXEnvironment(self._pex, pex_info))
# set up other environments as specified in PEX_PATH
for pex_path in filter(None, self._vars.PEX_PATH.split(os.pathsep)):
pex_info = PexInfo.from_pex(pex_path)
pex_info.update(self._pex_info_overrides)
self._envs.append(PEXEnvironment(pex_path, pex_info))
# activate all of them
for env in self._envs:
for dist in env.activate():
working_set.add(dist)
self._working_set = working_set
return self._working_set
@classmethod
def _extras_paths(cls):
standard_lib = sysconfig.get_python_lib(standard_lib=True)
try:
makefile = sysconfig.parse_makefile(sysconfig.get_makefile_filename())
except (AttributeError, IOError):
# This is not available by default in PyPy's distutils.sysconfig or it simply is
# no longer available on the system (IOError ENOENT)
makefile = {}
extras_paths = filter(None, makefile.get('EXTRASPATH', '').split(':'))
for path in extras_paths:
yield os.path.join(standard_lib, path)
@classmethod
def _site_libs(cls):
try:
from site import getsitepackages
site_libs = set(getsitepackages())
except ImportError:
site_libs = set()
site_libs.update([sysconfig.get_python_lib(plat_specific=False),
sysconfig.get_python_lib(plat_specific=True)])
# On windows getsitepackages() returns the python stdlib too.
if sys.prefix in site_libs:
site_libs.remove(sys.prefix)
real_site_libs = set(os.path.realpath(path) for path in site_libs)
return site_libs | real_site_libs
@classmethod
def _tainted_path(cls, path, site_libs):
paths = frozenset([path, os.path.realpath(path)])
return any(path.startswith(site_lib) for site_lib in site_libs for path in paths)
@classmethod
def minimum_sys_modules(cls, site_libs, modules=None):
"""Given a set of site-packages paths, return a "clean" sys.modules.
When importing site, modules within sys.modules have their __path__'s populated with
additional paths as defined by *-nspkg.pth in site-packages, or alternately by distribution
metadata such as *.dist-info/namespace_packages.txt. This can possibly cause namespace
packages to leak into imports despite being scrubbed from sys.path.
NOTE: This method mutates modules' __path__ attributes in sys.module, so this is currently an
irreversible operation.
"""
modules = modules or sys.modules
new_modules = {}
for module_name, module in modules.items():
# builtins can stay
if not hasattr(module, '__path__'):
new_modules[module_name] = module
continue
# Pop off site-impacting __path__ elements in-place.
for k in reversed(range(len(module.__path__))):
if cls._tainted_path(module.__path__[k], site_libs):
TRACER.log('Scrubbing %s.__path__: %s' % (module_name, module.__path__[k]), V=3)
module.__path__.pop(k)
# It still contains path elements not in site packages, so it can stay in sys.modules
if module.__path__:
new_modules[module_name] = module
return new_modules
@classmethod
def minimum_sys_path(cls, site_libs):
site_distributions = OrderedSet()
user_site_distributions = OrderedSet()
def all_distribution_paths(path):
locations = set(dist.location for dist in find_distributions(path))
return set([path]) | locations | set(os.path.realpath(path) for path in locations)
for path_element in sys.path:
if cls._tainted_path(path_element, site_libs):
TRACER.log('Tainted path element: %s' % path_element)
site_distributions.update(all_distribution_paths(path_element))
else:
TRACER.log('Not a tainted path element: %s' % path_element, V=2)
user_site_distributions.update(all_distribution_paths(USER_SITE))
for path in site_distributions:
TRACER.log('Scrubbing from site-packages: %s' % path)
for path in user_site_distributions:
TRACER.log('Scrubbing from user site: %s' % path)
scrub_paths = site_distributions | user_site_distributions
scrubbed_sys_path = list(OrderedSet(sys.path) - scrub_paths)
scrub_from_importer_cache = filter(
lambda key: any(key.startswith(path) for path in scrub_paths),
sys.path_importer_cache.keys())
scrubbed_importer_cache = dict((key, value) for (key, value) in sys.path_importer_cache.items()
if key not in scrub_from_importer_cache)
for importer_cache_entry in scrub_from_importer_cache:
TRACER.log('Scrubbing from path_importer_cache: %s' % importer_cache_entry, V=2)
return scrubbed_sys_path, scrubbed_importer_cache
@classmethod
def minimum_sys(cls):
"""Return the minimum sys necessary to run this interpreter, a la python -S.
:returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a
bare python installation.
"""
site_libs = set(cls._site_libs())
for site_lib in site_libs:
TRACER.log('Found site-library: %s' % site_lib)
for extras_path in cls._extras_paths():
TRACER.log('Found site extra: %s' % extras_path)
site_libs.add(extras_path)
site_libs = set(os.path.normpath(path) for path in site_libs)
sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs)
sys_modules = cls.minimum_sys_modules(site_libs)
return sys_path, sys_path_importer_cache, sys_modules
@classmethod
@contextmanager
def patch_pkg_resources(cls, working_set):
"""Patch pkg_resources given a new working set."""
def patch(working_set):
pkg_resources.working_set = working_set
pkg_resources.require = working_set.require
pkg_resources.iter_entry_points = working_set.iter_entry_points
pkg_resources.run_script = pkg_resources.run_main = working_set.run_script
pkg_resources.add_activation_listener = working_set.subscribe
old_working_set = pkg_resources.working_set
patch(working_set)
try:
yield
finally:
patch(old_working_set)
# Thar be dragons -- when this contextmanager exits, the interpreter is
# potentially in a wonky state since the patches here (minimum_sys_modules
# for example) actually mutate global state. This should not be
# considered a reversible operation despite being a contextmanager.
@classmethod
@contextmanager
def patch_sys(cls):
"""Patch sys with all site scrubbed."""
def patch_dict(old_value, new_value):
old_value.clear()
old_value.update(new_value)
def patch_all(path, path_importer_cache, modules):
sys.path[:] = path
patch_dict(sys.path_importer_cache, path_importer_cache)
patch_dict(sys.modules, modules)
old_sys_path, old_sys_path_importer_cache, old_sys_modules = (
sys.path[:], sys.path_importer_cache.copy(), sys.modules.copy())
new_sys_path, new_sys_path_importer_cache, new_sys_modules = cls.minimum_sys()
patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)
yield
def _wrap_coverage(self, runner, *args):
if not self._vars.PEX_COVERAGE and self._vars.PEX_COVERAGE_FILENAME is None:
runner(*args)
return
try:
import coverage
except ImportError:
die('Could not bootstrap coverage module, aborting.')
pex_coverage_filename = self._vars.PEX_COVERAGE_FILENAME
if pex_coverage_filename is not None:
cov = coverage.coverage(data_file=pex_coverage_filename)
else:
cov = coverage.coverage(data_suffix=True)
TRACER.log('Starting coverage.')
cov.start()
try:
runner(*args)
finally:
TRACER.log('Stopping coverage')
cov.stop()
# TODO(wickman) Post-process coverage to elide $PEX_ROOT and make
# the report more useful/less noisy. #89
if pex_coverage_filename:
cov.save()
else:
cov.report(show_missing=False, ignore_errors=True, file=sys.stdout)
def _wrap_profiling(self, runner, *args):
if not self._vars.PEX_PROFILE and self._vars.PEX_PROFILE_FILENAME is None:
runner(*args)
return
pex_profile_filename = self._vars.PEX_PROFILE_FILENAME
pex_profile_sort = self._vars.PEX_PROFILE_SORT
try:
import cProfile as profile
except ImportError:
import profile
profiler = profile.Profile()
try:
return profiler.runcall(runner, *args)
finally:
if pex_profile_filename is not None:
profiler.dump_stats(pex_profile_filename)
else:
profiler.print_stats(sort=pex_profile_sort)
def execute(self):
"""Execute the PEX.
This function makes assumptions that it is the last function called by
the interpreter.
"""
teardown_verbosity = self._vars.PEX_TEARDOWN_VERBOSE
try:
with self.patch_sys():
working_set = self._activate()
TRACER.log('PYTHONPATH contains:')
for element in sys.path:
TRACER.log(' %c %s' % (' ' if os.path.exists(element) else '*', element))
TRACER.log(' * - paths that do not exist or will be imported via zipimport')
with self.patch_pkg_resources(working_set):
self._wrap_coverage(self._wrap_profiling, self._execute)
except Exception:
# Allow the current sys.excepthook to handle this app exception before we tear things down in
# finally, then reraise so that the exit status is reflected correctly.
sys.excepthook(*sys.exc_info())
raise
except SystemExit as se:
# Print a SystemExit error message, avoiding a traceback in python3.
# This must happen here, as sys.stderr is about to be torn down
if not isinstance(se.code, int) and se.code is not None:
print(se.code, file=sys.stderr)
raise
finally:
# squash all exceptions on interpreter teardown -- the primary type here are
# atexit handlers failing to run because of things such as:
# http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit
if not teardown_verbosity:
sys.stderr.flush()
sys.stderr = DevNull()
sys.excepthook = lambda *a, **kw: None
def _execute(self):
force_interpreter = self._vars.PEX_INTERPRETER
self.clean_environment()
if force_interpreter:
TRACER.log('PEX_INTERPRETER specified, dropping into interpreter')
return self.execute_interpreter()
if self._pex_info_overrides.script and self._pex_info_overrides.entry_point:
die('Cannot specify both script and entry_point for a PEX!')
if self._pex_info.script and self._pex_info.entry_point:
die('Cannot specify both script and entry_point for a PEX!')
if self._pex_info_overrides.script:
return self.execute_script(self._pex_info_overrides.script)
elif self._pex_info_overrides.entry_point:
return self.execute_entry(self._pex_info_overrides.entry_point)
elif self._pex_info.script:
return self.execute_script(self._pex_info.script)
elif self._pex_info.entry_point:
return self.execute_entry(self._pex_info.entry_point)
else:
TRACER.log('No entry point specified, dropping into interpreter')
return self.execute_interpreter()
def execute_interpreter(self):
if sys.argv[1:]:
try:
with open(sys.argv[1]) as fp:
name, content = sys.argv[1], fp.read()
except IOError as e:
die("Could not open %s in the environment [%s]: %s" % (sys.argv[1], sys.argv[0], e))
sys.argv = sys.argv[1:]
self.execute_content(name, content)
else:
import code
code.interact()
def execute_script(self, script_name):
dists = list(self._activate())
entry_point = get_entry_point_from_console_script(script_name, dists)
if entry_point:
return self.execute_entry(entry_point)
dist, script_path, script_content = get_script_from_distributions(script_name, dists)
if not dist:
raise self.NotFound('Could not find script %s in pex!' % script_name)
TRACER.log('Found script %s in %s' % (script_name, dist))
return self.execute_content(script_path, script_content, argv0=script_name)
@classmethod
def execute_content(cls, name, content, argv0=None):
argv0 = argv0 or name
try:
ast = compile(content, name, 'exec', flags=0, dont_inherit=1)
except SyntaxError:
die('Unable to parse %s. PEX script support only supports Python scripts.' % name)
old_name, old_file = globals().get('__name__'), globals().get('__file__')
try:
old_argv0, sys.argv[0] = sys.argv[0], argv0
globals()['__name__'] = '__main__'
globals()['__file__'] = name
exec_function(ast, globals())
finally:
if old_name:
globals()['__name__'] = old_name
else:
globals().pop('__name__')
if old_file:
globals()['__file__'] = old_file
else:
globals().pop('__file__')
sys.argv[0] = old_argv0
@classmethod
def execute_entry(cls, entry_point):
runner = cls.execute_pkg_resources if ':' in entry_point else cls.execute_module
runner(entry_point)
@staticmethod
def execute_module(module_name):
import runpy
runpy.run_module(module_name, run_name='__main__')
@staticmethod
def execute_pkg_resources(spec):
entry = EntryPoint.parse("run = {0}".format(spec))
# See https://pythonhosted.org/setuptools/history.html#id25 for rationale here.
if hasattr(entry, 'resolve'):
# setuptools >= 11.3
runner = entry.resolve()
else:
# setuptools < 11.3
runner = entry.load(require=False)
runner()
def cmdline(self, args=()):
"""The commandline to run this environment.
:keyword args: Additional arguments to be passed to the application being invoked by the
environment.
"""
cmds = [self._interpreter.binary]
cmds.append(self._pex)
cmds.extend(args)
return cmds
def run(self, args=(), with_chroot=False, blocking=True, setsid=False, **kw):
"""Run the PythonEnvironment in an interpreter in a subprocess.
:keyword args: Additional arguments to be passed to the application being invoked by the
environment.
:keyword with_chroot: Run with cwd set to the environment's working directory.
:keyword blocking: If true, return the return code of the subprocess.
If false, return the Popen object of the invoked subprocess.
:keyword setsid: If true, run the PEX in a separate operating system session.
Remaining keyword arguments are passed directly to subprocess.Popen.
"""
self.clean_environment()
cmdline = self.cmdline(args)
TRACER.log('PEX.run invoking %s' % ' '.join(cmdline))
process = subprocess.Popen(
cmdline,
cwd=self._pex if with_chroot else os.getcwd(),
preexec_fn=os.setsid if setsid else None,
**kw)
return process.wait() if blocking else process
|
|
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import glob
from copy import deepcopy
from langdetect import detect, DetectorFactory
from langdetect.lang_detect_exception import LangDetectException
import dateutil.parser
from dateutil.tz import tzutc
from goose3.article import Article
from goose3.utils import URLHelper, RawHelper
from goose3.text import get_encodings_from_content
from goose3.extractors.content import StandardContentExtractor
from goose3.extractors.videos import VideoExtractor
from goose3.extractors.title import TitleExtractor
from goose3.extractors.images import ImageExtractor
from goose3.extractors.links import LinksExtractor
from goose3.extractors.tweets import TweetsExtractor
from goose3.extractors.authors import AuthorsExtractor
from goose3.extractors.tags import TagsExtractor
from goose3.extractors.opengraph import OpenGraphExtractor
from goose3.extractors.publishdate import PublishDateExtractor, TIMEZONE_INFO
from goose3.extractors.schema import SchemaExtractor
from goose3.extractors.metas import MetasExtractor
from goose3.cleaners import StandardDocumentCleaner
from goose3.outputformatters import StandardOutputFormatter
from goose3.network import NetworkFetcher
class CrawlCandidate(object):
def __init__(self, config, url, raw_html):
self.config = config
# parser
self.parser = self.config.get_parser()
self.url = url
self.raw_html = raw_html
class Crawler(object):
def __init__(self, config, fetcher=None):
# config
self.config = config
# parser
self.parser = self.config.get_parser()
# article
self.article = Article()
# init the extractor
self.extractor = self.get_extractor()
# init the document cleaner
self.cleaner = self.get_cleaner()
# init the output formatter
self.formatter = self.get_formatter()
# metas extractor
self.metas_extractor = self.get_metas_extractor()
# opengraph extractor
self.opengraph_extractor = self.get_opengraph_extractor()
# schema.org news article extractor
self.schema_extractor = self.get_schema_extractor()
# publishdate extractor
self.publishdate_extractor = self.get_publishdate_extractor()
# tags extractor
self.tags_extractor = self.get_tags_extractor()
# authors extractor
self.authors_extractor = self.get_authors_extractor()
# tweets extractor
self.tweets_extractor = self.get_tweets_extractor()
# links extractor
self.links_extractor = self.get_links_extractor()
# video extractor
self.video_extractor = self.get_video_extractor()
# title extractor
self.title_extractor = self.get_title_extractor()
# html fetcher
if isinstance(fetcher, NetworkFetcher):
self.fetcher = fetcher
else:
self.fetcher = NetworkFetcher(self.config)
# image extractor
self.image_extractor = self.get_image_extractor()
# TODO: use the log prefix
self.log_prefix = "crawler: "
def crawl(self, crawl_candidate):
# parser candidate
parse_candidate = self.get_parse_candidate(crawl_candidate)
# raw html
raw_html = self.get_html(crawl_candidate, parse_candidate)
if raw_html is None:
return self.article
return self.process(raw_html, parse_candidate.url, parse_candidate.link_hash)
def process(self, raw_html, final_url, link_hash):
# create document
doc = self.get_document(raw_html)
# article
self.article._final_url = final_url
self.article._link_hash = link_hash
self.article._raw_html = raw_html
self.article._doc = doc
self.article._raw_doc = deepcopy(doc)
# open graph
self.article._opengraph = self.opengraph_extractor.extract()
# schema.org:
# - (ReportageNewsArticle) https://pending.schema.org/ReportageNewsArticle
# - (NewsArticle) https://schema.org/NewsArticle
# - (Article) https://schema.org/Article
self.article._schema = self.schema_extractor.extract()
if not self.article._final_url:
if "url" in self.article.opengraph:
self.article._final_url = self.article.opengraph["url"]
elif self.article.schema and "url" in self.article.schema:
self.article._final_url = self.article.schema["url"]
# meta
metas = self.metas_extractor.extract()
# print(metas)
self.article._meta_lang = metas['lang']
self.article._meta_favicon = metas['favicon']
self.article._meta_description = metas['description']
self.article._meta_keywords = metas['keywords']
self.article._meta_encoding = metas['encoding']
self.article._canonical_link = metas['canonical']
self.article._domain = metas['domain']
# publishdate
self.article._publish_date = self.publishdate_extractor.extract()
if self.article.publish_date:
try:
publish_datetime = dateutil.parser.parse(self.article.publish_date, tzinfos=TIMEZONE_INFO)
if publish_datetime.tzinfo:
self.article._publish_datetime_utc = publish_datetime.astimezone(tzutc())
else:
self.article._publish_datetime_utc = publish_datetime
except (ValueError, OverflowError):
self.article._publish_datetime_utc = None
# tags
self.article._tags = self.tags_extractor.extract()
# authors
self.article._authors = self.authors_extractor.extract()
# title
self.article._title = self.title_extractor.extract()
# jump through some hoops on attempting to get a language if not found
if self.article._meta_lang is None:
tmp_lang_detect = "{} {} {} {}".format(self.article._meta_description, self.article._title, self.article._meta_keywords, self.article._tags)
tmp_lang_detect = " ".join(tmp_lang_detect.split())
if len(tmp_lang_detect) > 15:
# required to make it deterministic;
# see: https://github.com/Mimino666/langdetect/blob/master/README.md#basic-usage
DetectorFactory.seed = 0
try:
self.article._meta_lang = detect(tmp_lang_detect)
except LangDetectException:
self.article._meta_lang = None
# print(self.article._meta_lang)
# check for known node as content body
# if we find one force the article.doc to be the found node
# this will prevent the cleaner to remove unwanted text content
article_body = self.extractor.get_known_article_tags()
if article_body is not None:
doc = article_body
# before we do any calcs on the body itself let's clean up the document
if not isinstance(doc, list):
doc = [self.cleaner.clean(doc)]
else:
doc = [self.cleaner.clean(deepcopy(x)) for x in doc]
# big stuff
self.article._top_node = self.extractor.calculate_best_node(doc)
# if we do not find an article within the discovered possible article nodes,
# try again with the root node.
if self.article._top_node is None:
# try again with the root node.
self.article._top_node = self.extractor.calculate_best_node(self.article._doc)
else:
# set the doc member to the discovered article node.
self.article._doc = doc
# if we have a top node
# let's process it
if self.article._top_node is not None:
# article links
self.article._links = self.links_extractor.extract()
# tweets
self.article._tweets = self.tweets_extractor.extract()
# video handling
self.article._movies = self.video_extractor.get_videos()
# image handling
if self.config.enable_image_fetching:
self.get_image()
# post cleanup
self.article._top_node = self.extractor.post_cleanup()
# clean_text
self.article._cleaned_text = self.formatter.get_formatted_text()
# cleanup tmp file
self.release_resources()
# return the article
return self.article
@staticmethod
def get_parse_candidate(crawl_candidate):
if crawl_candidate.raw_html:
return RawHelper.get_parsing_candidate(crawl_candidate.url, crawl_candidate.raw_html)
return URLHelper.get_parsing_candidate(crawl_candidate.url)
def get_image(self):
doc = self.article.raw_doc
top_node = self.article.top_node
self.article._top_image = self.image_extractor.get_best_image(doc, top_node)
def get_html(self, crawl_candidate, parsing_candidate):
# we got a raw_tml
# no need to fetch remote content
if crawl_candidate.raw_html:
return crawl_candidate.raw_html
# fetch HTML
response = self.fetcher.fetch_obj(parsing_candidate.url)
if response.encoding != 'ISO-8859-1': # requests has a good idea; use what it says
# return response as a unicode string
html = response.text
self.article._meta_encoding = response.encoding
else:
html = response.content
encodings = get_encodings_from_content(response.text)
if len(encodings) > 0:
self.article._meta_encoding = encodings[0]
response.encoding = encodings[0]
html = response.text
else:
self.article._meta_encoding = encodings
return html
def get_metas_extractor(self):
return MetasExtractor(self.config, self.article)
def get_publishdate_extractor(self):
return PublishDateExtractor(self.config, self.article)
def get_opengraph_extractor(self):
return OpenGraphExtractor(self.config, self.article)
def get_schema_extractor(self):
return SchemaExtractor(self.config, self.article)
def get_tags_extractor(self):
return TagsExtractor(self.config, self.article)
def get_authors_extractor(self):
return AuthorsExtractor(self.config, self.article)
def get_tweets_extractor(self):
return TweetsExtractor(self.config, self.article)
def get_links_extractor(self):
return LinksExtractor(self.config, self.article)
def get_title_extractor(self):
return TitleExtractor(self.config, self.article)
def get_image_extractor(self):
return ImageExtractor(self.fetcher, self.config, self.article)
def get_video_extractor(self):
return VideoExtractor(self.config, self.article)
def get_formatter(self):
return StandardOutputFormatter(self.config, self.article)
def get_cleaner(self):
return StandardDocumentCleaner(self.config, self.article)
def get_document(self, raw_html):
doc = self.parser.fromstring(raw_html)
return doc
def get_extractor(self):
return StandardContentExtractor(self.config, self.article)
def release_resources(self):
path = os.path.join(self.config.local_storage_path, '%s_*' % self.article.link_hash)
for fname in glob.glob(path):
try:
os.remove(fname)
except OSError:
# TODO: better log handeling
pass
|
|
#!/usr/bin/env python
import sys
import os
import errno
import re
import subprocess
import datetime
from collections import OrderedDict
import MySQLdb
import yaml
import ordered_yaml
myname = sys.argv[0]
CONFIG_FILENAME = "config.yml"
DEFAULT_TARGET_DIR = "/data/dump"
## Worker Functions
def mongodump(config, db):
"""
Drive the mongodump command from a stanza that looks like this:
dbname:
use: mongodump
host: dbname.dbhoster.com
port: 27017
user: root
password: redacted
db: importantstuff
collection: stuff
format: tarball
"""
target_name = make_target_name(config, db)
info("dumping \"" + db + "\" to \"" + target_name + "\"")
option_mapping = OrderedDict([
("-h", "host"),
("--port", "port"),
("-d", "db"),
("-c", "collection"),
("-u", "user"),
("-p", "password"),
])
cmd = ["mongodump"]
for (option, setting_name) in option_mapping.iteritems():
if setting_name in config[db]:
cmd.append(option)
setting = str(config[db][setting_name])
if len(setting):
cmd.append(setting)
cmd.append("-o")
cmd.append(target_name)
subprocess.call(cmd)
compressed_name = compress(config[db], target_name)
make_symlink(config[db], compressed_name)
def mysqldump(config, db):
"""
Drive the mysql command from a stanza that looks like this:
dbname:
use: mysqldump
host: dbname.dbhoster.com
port: 3306
user: root
password: redacted
db: importantstuff
sed:
- s/test.class.stanford.edu/localhost:8000/g
- s/class.stanford.edu/localhost:8000/g
format: tarball
"""
target_name = make_target_name(config, db)
info("dumping \"" + db + "\" to \"" + target_name + "\"")
option_mapping = OrderedDict([
("-h", "host"),
("-P", "port"),
("-u", "user"),
])
cmd = ["mysqldump"]
for (option, setting_name) in option_mapping.iteritems():
if setting_name in config[db]:
cmd.append(option)
setting = str(config[db][setting_name])
if len(setting):
cmd.append(setting)
cmd.append("--lock-tables=false") # for R/O account
cmd.append("-p" + config[db].get('password', "")) # not space separated
cmd.append('--ignore-table=' + config[db].get('db', db) + '.courseware_studentmodulehistory') # Ignore csmh since it is now static
cmd.append(config[db].get('db', db)) # db param is last
with open(target_name, "w") as outfile:
subprocess.call(cmd, stdout=outfile)
filter_with_sed(config[db], target_name)
compressed_name = compress(config[db], target_name)
make_symlink(config[db], compressed_name)
def mysqlcsmhedump(config, db):
"""
Dumps the contents of the CSMHE table in increments.
Drive the mysqlcsmhe command from a stanza that looks like this:
dbname:
use mysqlcsmhedump
host: dbname.dbhoster.com
port: 3306
user: root
password: redacted
db: importantstuff
sed:
- s/test.class.stanford.edu/localhost:8000/g
- s/class.stanford.edu/localhost:8000/g
format: tarball
csmhe_id_file: max_id_file
"""
# Read previous max(id) from file
info("Reading previous max(id) from file")
id_file = config["target_dir"] + "/csmhe/" + config[db].get("csmhe_id_file")
old_max_id = read_id_from_file(id_file)
# Read current max(id) from db
info("Reading max(id) from db")
dbConnection = MySQLdb.connect(
str(config[db]['host']),
str(config[db]['user']),
str(config[db]['password']),
str(config[db]['db']),
)
cursor = dbConnection.cursor()
cursor.execute('select max(id) from coursewarehistoryextended_studentmodulehistoryextended')
data = cursor.fetchone()
cursor.close()
dbConnection.close()
new_max_id = str(data[0])
# Dump rows from old to new ids.
target_name = config["target_dir"] + '/csmhe/' + make_target_name(config, db)
info("Dumping \"" + db + "\" to \"" + target_name + "\"")
mysqldump_cmd = ["mysqldump"]
option_mapping = OrderedDict([
("-h", "host"),
("-P", "port"),
("-u", "user"),
])
add_options(config, db, mysqldump_cmd, option_mapping, old_max_id, new_max_id)
with open(target_name, "w") as outfile:
subprocess.call(mysqldump_cmd, stdout=outfile)
filter_with_sed(config[db], target_name)
compressed_name = compress(config[db], target_name)
# Write new max(id) to file
info("Writing max(id) to file")
write_id_to_file(id_file, new_max_id)
## Helper Functions
def read_id_from_file(file):
"""
Read the last written max(id) from the tracking file.
"""
input = open(file, 'r')
id = input.readline().strip()
input.close()
return id
def write_id_to_file(file, id):
"""
Write the new max(id) to the tracking file.
"""
output = open(file, 'w')
output.write(id)
output.close()
def add_options(config, db, cmd, option_mapping, min_id, max_id):
"""
Add the base options to the command
"""
for (option, setting_name) in option_mapping.iteritems():
if setting_name in config[db]:
cmd.append(option)
setting = str(config[db][setting_name])
if len(setting):
cmd.append(setting)
cmd.append("--where=id >" + min_id + " and id <=" + max_id) # where clause
cmd.append("--lock-tables=false") # for R/O account
cmd.append("-p" + config[db].get('password', "")) # not space separated
cmd.append(config[db].get('db', db)) # db param
cmd.append("coursewarehistoryextended_studentmodulehistoryextended") # only export this table
def filter_with_sed(dbconfig, target_name):
for sedcmd in dbconfig.get('sed', []):
info("cleaning " + target_name + "with \"" + sedcmd + "\"")
cmd = ['sed', '-i', '-e', sedcmd, target_name]
subprocess.call(cmd)
def compress(dbconfig, target_name):
"""
Compress the target, method depends on the "format" parameter. Returns
the resulting target filename, useful for symlinking to.
"""
fmt = dbconfig.get("format", None)
if fmt in ["tarball", ".tar.gz", "tar.gz"]:
info("zipping and compressing " + target_name)
output_name = target_name + ".tar.gz"
cmd = ["tar", "zcvf", output_name, target_name]
subprocess.call(cmd)
info("removing " + target_name)
cmd = ["rm", "-r", target_name]
subprocess.call(cmd)
elif fmt in [".gz", "gz", "compress", "compressed", "gzip", "gzipped"]:
info("compressing " + target_name)
cmd = ["gzip", "-r", "-q", target_name]
output_name = target_name + ".gz"
subprocess.call(cmd)
else:
error("invalid \"compress\" setting, should be tarball or compress, " + target_name)
output_name = ""
return output_name
def make_symlink(dbconfig, targ):
"""Create a symlink unless explicitly configured not to."""
if "latest" in dbconfig and not dbconfig["latest"]:
return
link = re.sub(r'[0-9]+', 'latest', targ)
try:
os.symlink(targ, link)
info("create link " + link + " --> " + targ)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link)
os.symlink(targ, link)
info("move link " + link + " --> " + targ)
def make_target_name(config, db):
"""
Return a tuple: the filename that we'll want to generate, with
today's date, and the symlink from/to that we'll want to create.
"""
templ = config[db].get("name", "%(dbname)s-%(today)s")
return templ % {
"today": datetime.datetime.now().strftime("%Y%m%d"),
"dbname": db, "name": db,
}
def info(msg):
sys.stderr.write(myname + " INFO: " + msg + "\n")
def error(msg):
sys.stderr.write(myname + " ERROR: " + msg + "\n")
## Main
def main():
config_file = open(CONFIG_FILENAME, "r")
config = yaml.load(config_file, Loader=ordered_yaml.OrderedDictYAMLLoader)
if "target_dir" in config:
os.chdir(config["target_dir"])
else:
os.chdir(DEFAULT_TARGET_DIR)
for db in config:
if type(config[db]) is str:
continue
method_name = config[db].get('use')
if not method_name:
continue
methodfunc = globals()[method_name]
methodfunc(config, db)
if __name__ == "__main__":
main()
|
|
import core
from core import *
class DMIDE_MenuBar(wx.MenuBar):
def __init__(self, window=None):
wx.MenuBar.__init__(self, wx.MB_DOCKABLE)
self.installMenuService()
def installMenuService(self):
""" Build a MenuBar for the top-level window as read from an XML file """
if os.path.exists(os.path.join(wx.GetApp().get_dir(), 'settings', 'menubar.xml')):
handler = MenuBarHandler(self)
parser = xml.sax.parse(os.path.join(wx.GetApp().get_dir(), 'settings', 'menubar.xml'), handler)
elif os.path.exists('menubar.xml'):
handler = MenuBarHandler(self)
parser = xml.sax.parse('menubar.xml', handler)
else:
handler = MenuBarHandler(self)
parser = xml.sax.parseString(default_menu, handler)
class DMIDE_FancyMenuBar(wxFlatMenu.FlatMenuBar):
def __init__(self, window):
wxFlatMenu.FlatMenuBar.__init__(self, window, ID_MENUBAR)
self.installMenuService()
def installMenuService(self):
""" Build a MenuBar for the top-level window as read from an XML file """
if os.path.exists(os.path.join(wx.GetApp().get_dir(), 'settings', 'menubar.xml')):
handler = FancyMenuBarHandler(self)
parser = xml.sax.parse(os.path.join(wx.GetApp().get_dir(), 'settings', 'menubar.xml'), handler)
elif os.path.exists('menubar.xml'):
handler = FancyMenuBarHandler(self)
parser = xml.sax.parse('menubar.xml', handler)
else:
handler = FancyMenuBarHandler(self)
parser = xml.sax.parseString(default_menu, handler)
def FindItemById(self, id):
return self.FindMenuItem(id)
class MenuBarHandler(xml.sax.handler.ContentHandler):
""" Handler for reading the XML """
def __init__(self, menubar):
self.ordered_list = []
self.current = []
self.menubar = menubar
def startElement(self, name, attributes):
if name == 'menu_bar':
if not self.current:
menu_title = attributes['title']
self.current = [(wx.Menu(), menu_title)]
else:
menu_title = attributes['title']
self.current.append((wx.Menu(), menu_title))
elif name == 'menu':
if not self.current:
return
if attributes.has_key('type'):
if attributes['type'] == 'separator':
self.current[-1][0].AppendSeparator()
return
id = attributes['id']
title = attributes['title']
macro = ''
desc = ''
flags = ''
if attributes.has_key('macro'):
macro = attributes['macro']
if attributes.has_key('desc'):
desc = attributes['desc']
if attributes.has_key('flags'):
flags = attributes['flags']
if macro:
title = '%s\t%s' % (title, macro)
if flags:
self.current[-1][0].Append(globals()[id], title, desc, globals()[flags])
else:
self.current[-1][0].Append(globals()[id], title, desc)
def endElement(self, name):
if name == 'menu_bar':
if self.current == [self.current[-1]]:
self.menubar.Append(*self.current[-1])
self.current = []
else:
self.current[-2][0].AppendMenu(wx.ID_ANY, self.current[-1][1], self.current[-1][0])
self.current = self.current[:-1]
class FancyMenuBarHandler(xml.sax.handler.ContentHandler):
""" Handler for reading the XML """
def __init__(self, menubar):
self.ordered_list = []
self.current = []
self.menubar = menubar
def startElement(self, name, attributes):
if name == 'menu_bar':
if not self.current:
menu_title = attributes['title']
self.current = [(wxFlatMenu.FlatMenu(), menu_title)]
else:
menu_title = attributes['title']
self.current.append((wxFlatMenu.FlatMenu(), menu_title))
elif name == 'menu':
if not self.current:
return
if attributes.has_key('type'):
if attributes['type'] == 'separator':
self.current[-1][0].AppendSeparator()
return
id = attributes['id']
title = attributes['title']
macro = ''
desc = ''
flags = ''
if attributes.has_key('macro'):
macro = attributes['macro']
if attributes.has_key('desc'):
desc = attributes['desc']
if attributes.has_key('flags'):
flags = attributes['flags']
if macro:
title = '%s\t%s' % (title, macro)
'''
if flags:
self.current[-1][0].Append(globals()[id], title, desc, globals()[flags])
else:
self.current[-1][0].Append(globals()[id], title, desc)
'''
if not flags:
flags = 'ID_ITEM_NORMAL'
if id in id_to_art:
bmp = wx.GetApp().art.getFromWx(id_to_art[id], (dmide_menu_art_size, dmide_menu_art_size), wx.ART_MENU)
disabled = wx.BitmapFromImage(wx.ImageFromBitmap(bmp).ConvertToGreyscale())
else:
bmp = wx.NullBitmap
disabled = wx.NullBitmap
item = wxFlatMenu.FlatMenuItem(self.current[-1][0], globals()[id], title, desc, globals()[flags], normalBmp=bmp, disabledBmp=disabled)
self.current[-1][0].AppendItem(item)
def endElement(self, name):
if name == 'menu_bar':
if self.current == [self.current[-1]]:
self.menubar.Append(*self.current[-1])
self.current = []
else:
self.current[-2][0].AppendMenu(wx.ID_ANY, self.current[-1][1], self.current[-1][0], wx.ITEM_NORMAL)
self.current = self.current[:-1]
default_menu = '''
<menu_list>
<menu_bar title="File">
<menu id="ID_FILE_NEW" title="New" macro="Ctrl+N" desc="Create a new file." />
<menu id="ID_FILE_OPEN" title="Open" macro="Ctrl+O" desc="Open a file." />
<menu id="ID_FILE_CLOSE" title="Close" macro="Ctrl+Shift+C" desc="Close the current file." />
<menu id="ID_FILE_SAVE" title="Save" macro="Ctrl+S" desc="Save the current file." />
<menu id="ID_FILE_SAVEAS" title="Save As" macro="Ctrl+Shift+S" desc="Save the current file in a different title." />
<menu type="separator" />
<menu id="ID_FILE_NEWENVIRONMENT" title="New Environment" macro="Ctrl+Shift+N" desc="Create a new environment." />
<menu id="ID_FILE_OPENENVIRONMENT" title="Open Environment" macro="Ctrl+Shift+O" desc="Open an environment." />
<menu type="separator" />
<menu id="ID_EXIT" title="Exit" macro="Ctrl+Q" desc="Exit DMIDE." />
</menu_bar>
<menu_bar title="Edit">
<menu id="ID_EDIT_UNDO" title="Undo" macro="Ctrl+Z" desc="Undo last change." />
<menu id="ID_EDIT_REDO" title="Redo" macro="Ctrl+Y" desc="Redo last undo change." />
<menu type="separator" />
<menu id="ID_EDIT_CUT" title="Cut" macro="Ctrl+X" desc="Cut the selected text." />
<menu id="ID_EDIT_COPY" title="Copy" macro="Ctrl+C" desc="Copy the selected text." />
<menu id="ID_EDIT_PASTE" title="Paste" macro="Ctrl+V" desc="Paste the text in clipboard." />
<menu id="ID_EDIT_DELETE" title="Delete" macro="Del" desc="Delete the selected text." />
<menu type="separator" />
<menu id="ID_EDIT_FIND" title="Find" macro="Ctrl+F" desc="Find text in this document." />
<menu id="ID_EDIT_FINDNEXT" title="Find Next" macro="F3" desc="Find the next text in this document." />
<menu id="ID_EDIT_FINDPREV" title="Find Previous" macro="Shift+F3" desc="Find the previous text in this document." />
<menu id="ID_EDIT_REPLACE" title="Replace" macro="Ctrl+H" desc="Replace text in this document." />
<menu type="separator" />
<menu id="ID_EDIT_GOTOLINE" title="Goto Line" macro="Ctrl+G" desc="Go to specified line." />
<menu id="ID_EDIT_SELECTALL" title="Select All" macro="Ctrl+A" desc="Select all text in this document." />
</menu_bar>
<menu_bar title="View">
<menu id="ID_VIEW_FILETOOLBAR" title="File Toolbar" desc="Toggle view of the file toolbar." flags="ID_ITEM_CHECK" />
<menu type="separator" />
<menu id="ID_VIEW_FILETREE" title="File Tree" desc="Toggle view of the file tree." flags="ID_ITEM_CHECK" />
<menu id="ID_VIEW_EDITOR" title="Main Editor" desc="Toggle view of the main editor." flags="ID_ITEM_CHECK" />
<menu id="ID_VIEW_BUILDINFORMATION" title="Build Information" desc="Toggle view of the build information." flags="ID_ITEM_CHECK" />
<menu id="ID_VIEW_CONSOLE" title="Console" desc="Toggle view of the developer console." flags="ID_ITEM_CHECK" />
</menu_bar>
<menu_bar title="Perspective">
<menu id="ID_PERSPECTIVE_DEFAULT" title="Default" desc="Load default perspective." />
<menu id="ID_PERSPECTIVE_SAVE" title="Save" desc="Save perspective." />
<menu id="ID_PERSPECTIVE_LOAD" title="Load" desc="Load perspective." />
<menu type="separator" />
</menu_bar>
<menu_bar title="Options">
<menu id="ID_OPTIONS_PERSPECTIVE" title="Perspective" desc="Settings for the look and feel of DMIDE." />
</menu_bar>
<menu_bar title="Help">
<menu type="separator" />
<menu id="ID_HELP_ABOUT" title="About" desc="About DMIDE." />
</menu_bar>
</menu_list>
'''
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import json
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'APSL (v2) BSD (4 clause)',
'APSL (v2)',
'Anti-Grain Geometry',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) BSD-like',
'Apache (v2.0) GPL (v2)',
'Apache (v2.0)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'BSD (2 clause) ISC',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (2 clause)',
'BSD (3 clause) GPL (v2)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (4 clause)',
'BSD',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD MIT/X11 (BSD like)',
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'BSL (v1) LGPL (v2.1 or later)',
'FreeType (BSD like) with patent clause',
'FreeType (BSD like)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v2) LGPL (v2.1 or later)',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'ISC',
'Independent JPEG Group License',
'LGPL (unversioned/unknown version)',
'LGPL (v2 or later)',
'LGPL (v2)',
'LGPL (v2.1 or later)',
'LGPL (v2.1)',
'LGPL (v3 or later)',
'MIT/X11 (BSD like) LGPL (v2.1 or later)',
'MIT/X11 (BSD like)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v1.1) GPL (v2) LGPL (v2 or later)',
'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (v2)',
'MPL (v1.1) LGPL (v2 or later)',
'MPL (v1.1) LGPL (v2.1 or later)',
'MPL (v1.1)',
'MPL (v2.0)',
'Ms-PL',
'Public domain BSD (3 clause)',
'Public domain BSD',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'Public domain',
'SGI Free Software License B',
'SunSoft (BSD like)',
'libpng',
'zlib/libpng',
'University of Illinois/NCSA Open Source License (BSD like)',
('University of Illinois/NCSA Open Source License (BSD like) '
'MIT/X11 (BSD like)'),
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
'base/third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'buildtools/third_party/libc++/trunk/test': [
# http://llvm.org/bugs/show_bug.cgi?id=25980
'UNKNOWN',
],
# http://llvm.org/bugs/show_bug.cgi?id=25976
'buildtools/third_party/libc++/trunk/src/include/atomic_support.h': [
'UNKNOWN'
],
'buildtools/third_party/libc++/trunk/utils/gen_link_script': [ 'UNKNOWN' ],
'buildtools/third_party/libc++/trunk/utils/not': [ 'UNKNOWN' ],
'buildtools/third_party/libc++/trunk/utils/sym_check': [ 'UNKNOWN' ],
'buildtools/third_party/libc++abi/trunk/test': [ 'UNKNOWN' ],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
# This contains files copied from elsewhere from the tree. Since the copied
# directories might have suppressions below (like simplejson), whitelist the
# whole directory. This is also not shipped code.
'chrome/common/extensions/docs/server2/third_party': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'courgette/third_party/qsufsort.h': [ # http://crbug.com/98095
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSD (4 clause) ISC',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
'MPL (v1.1) LGPL (unversioned/unknown version)',
],
# The project is BSD-licensed but the individual files do not have
# consistent license headers. Also, this is just used in a utility
# and not shipped. https://github.com/waylan/Python-Markdown/issues/435
'third_party/Python-Markdown': [
'UNKNOWN',
],
'third_party/WebKit': [
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
# http://crbug.com/222828
# http://bugs.python.org/issue17514
'third_party/chromite/third_party/argparse.py': [
'UNKNOWN',
],
# http://crbug.com/326117
# https://bitbucket.org/chrisatlee/poster/issue/21
'third_party/chromite/third_party/poster': [
'UNKNOWN',
],
# http://crbug.com/333508
'buildtools/clang_format/script': [
'UNKNOWN',
],
# https://mail.python.org/pipermail/cython-devel/2014-July/004062.html
'third_party/cython': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/catapult/tracing/third_party/devscripts': [
'GPL (v2 or later)',
],
# https://github.com/shazow/apiclient/issues/8
# MIT license.
'third_party/catapult/third_party/apiclient': [
'UNKNOWN',
],
# https://bugs.launchpad.net/beautifulsoup/+bug/1481316
# MIT license.
'third_party/catapult/third_party/beautifulsoup': [
'UNKNOWN'
],
# https://code.google.com/p/graphy/issues/detail?id=6
# Apache (v2.0)
'third_party/catapult/third_party/graphy': [
'UNKNOWN',
],
# https://github.com/GoogleCloudPlatform/gsutil/issues/305
('third_party/catapult/third_party/gsutil/gslib/third_party/'
'storage_apitools'): [
'UNKNOWN',
],
# https://github.com/google/apitools/issues/63
'third_party/catapult/third_party/gsutil/third_party/apitools': [
'UNKNOWN',
],
# https://github.com/boto/boto/issues/3373
'third_party/catapult/third_party/gsutil/third_party/boto': [
'UNKNOWN',
],
# https://bitbucket.org/cmcqueen1975/crcmod/issues/1/please-add-per-file-licenses
# Includes third_party/catapult/third_party/gsutil/third_party/crcmod_osx.
'third_party/catapult/third_party/gsutil/third_party/crcmod': [
'UNKNOWN',
],
# https://github.com/jcgregorio/httplib2/issues/307
'third_party/catapult/third_party/gsutil/third_party/httplib2': [
'UNKNOWN',
],
# https://github.com/google/oauth2client/issues/331
'third_party/catapult/third_party/gsutil/third_party/oauth2client': [
'UNKNOWN',
],
# https://github.com/google/protorpc/issues/14
'third_party/catapult/third_party/gsutil/third_party/protorpc': [
'UNKNOWN',
],
# https://sourceforge.net/p/pyasn1/tickets/4/
# Includes
# third_party/catapult/third_party/gsutil/third_party/pyasn1-modules.
'third_party/catapult/third_party/gsutil/third_party/pyasn1': [
'UNKNOWN',
],
# https://github.com/pnpnpn/retry-decorator/issues/4
'third_party/catapult/third_party/gsutil/third_party/retry-decorator': [
'UNKNOWN',
],
# https://bitbucket.org/sybren/python-rsa/issues/28/please-add-per-file-licenses
'third_party/catapult/third_party/gsutil/third_party/rsa': [
'UNKNOWN',
],
# https://bitbucket.org/gutworth/six/issues/137/please-add-per-file-licenses
# Already fixed upstream. https://crbug.com/573341
'third_party/catapult/third_party/gsutil/third_party/six': [
'UNKNOWN',
],
# https://github.com/html5lib/html5lib-python/issues/125
# MIT license.
'third_party/catapult/third_party/html5lib-python': [
'UNKNOWN',
],
# https://github.com/GoogleCloudPlatform/appengine-mapreduce/issues/71
# Apache (v2.0)
'third_party/catapult/third_party/mapreduce': [
'UNKNOWN',
],
# https://code.google.com/p/webapp-improved/issues/detail?id=103
# Apache (v2.0).
'third_party/catapult/third_party/webapp2': [
'UNKNOWN',
],
# https://github.com/Pylons/webob/issues/211
# MIT license.
'third_party/catapult/third_party/WebOb': [
'UNKNOWN',
],
# https://github.com/Pylons/webtest/issues/141
# MIT license.
'third_party/catapult/third_party/webtest': [
'UNKNOWN',
],
# https://bitbucket.org/ianb/paste/issues/12/add-license-headers-to-source-files
# MIT license.
'third_party/catapult/third_party/Paste': [
'UNKNOWN',
],
# https://bitbucket.org/gutworth/six/issues/129/add-license-headers-to-source-files
# MIT license.
'third_party/catapult/third_party/six': [
'UNKNOWN',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/fontconfig': [
# https://bugs.freedesktop.org/show_bug.cgi?id=73401
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/jmake': [ # Used only at build time.
'GPL (v2)',
],
'third_party/jsoncpp/source': [
# https://github.com/open-source-parsers/jsoncpp/issues/234
'UNKNOWN',
],
'third_party/junit/src': [
# Pulled in via DEPS for Android only.
# Eclipse Public License / not shipped.
# Bug filed but upstream prefers not to fix.
# https://github.com/junit-team/junit/issues/1132
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
# Many liblouis files are mirrored but not used in the NaCl module.
# They are not excluded from the mirror because of lack of infrastructure
# support. Getting license headers added to the files where missing is
# tracked in https://github.com/liblouis/liblouis/issues/22.
'third_party/liblouis/src': [
'GPL (v3 or later)',
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libvpx_new/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/src': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
# Missing license headers in openh264 sources: https://github.com/cisco/openh264/issues/2233
'third_party/openh264/src': [
'UNKNOWN',
],
'third_party/openmax_dl/dl' : [
'Khronos Group',
],
'third_party/boringssl': [
# There are some files in BoringSSL which came from OpenSSL and have no
# license in them. We don't wish to add the license header ourselves
# thus we don't expect to pass license checks.
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
# https://bitbucket.org/ned/coveragepy/issue/313/add-license-file-containing-2-3-or-4
# BSD 2-clause license.
'third_party/pycoverage': [
'UNKNOWN',
],
'third_party/pyelftools': [ # http://crbug.com/222831
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/sfntly/src/java': [ # Apache 2.0, not shipped.
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/speech-dispatcher/libspeechd.h': [
'GPL (v2 or later)',
],
'third_party/sqlite': [
'UNKNOWN',
],
# http://crbug.com/334668
# MIT license.
'tools/swarming_client/third_party/httplib2': [
'UNKNOWN',
],
# http://crbug.com/334668
# Apache v2.0.
'tools/swarming_client/third_party/oauth2client': [
'UNKNOWN',
],
# http://crbug.com/471372
# BSD
'tools/swarming_client/third_party/pyasn1': [
'UNKNOWN',
],
# http://crbug.com/471372
# Apache v2.0.
'tools/swarming_client/third_party/rsa': [
'UNKNOWN',
],
# https://github.com/kennethreitz/requests/issues/1610
'tools/swarming_client/third_party/requests': [
'UNKNOWN',
],
# BSD License. http://bugzilla.maptools.org/show_bug.cgi?id=2532
'third_party/pdfium/third_party/libtiff/tif_ojpeg.c': [
'UNKNOWN',
],
'third_party/pdfium/third_party/libtiff/tiffvers.h': [
'UNKNOWN',
],
'third_party/pdfium/third_party/libtiff/uvcode.h': [
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
# MIT license but some files contain no licensing info. e.g. autogen.sh.
# Files missing licensing info are not shipped.
'third_party/wayland': [ # http://crbug.com/553573
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
# https://github.com/html5lib/html5lib-python/issues/125
# https://github.com/KhronosGroup/WebGL/issues/435
'third_party/webgl/src': [
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
# Not shipped, MIT license but the header files contain no licensing info.
'tools/telemetry/third_party/altgraph': [
'UNKNOWN',
],
# Not shipped, MIT license but the header files contain no licensing info.
'tools/telemetry/third_party/modulegraph': [
'UNKNOWN',
],
'tools/telemetry/third_party/pyserial': [
# https://sourceforge.net/p/pyserial/feature-requests/35/
'UNKNOWN',
],
}
EXCLUDED_PATHS = [
# Don't check generated files
'out/',
# Don't check sysroot directories
'build/linux/debian_wheezy_amd64-sysroot',
'build/linux/debian_wheezy_arm-sysroot',
'build/linux/debian_wheezy_i386-sysroot',
'build/linux/debian_wheezy_mips-sysroot',
]
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
used_suppressions = set()
errors = []
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# Check if the file belongs to one of the excluded paths.
if any((filename.startswith(path) for path in EXCLUDED_PATHS)):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
matched_prefixes = [
prefix for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES
if filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]]
if matched_prefixes:
used_suppressions.update(set(matched_prefixes))
continue
errors.append({'filename': filename, 'license': license})
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
for error in errors:
print "'%s' has non-whitelisted license '%s'" % (
error['filename'], error['license'])
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
# Do not print unused suppressions so that above message is clearly
# visible and gets proper attention. Too much unrelated output
# would be distracting and make the important points easier to miss.
return 1
print "\nSUCCESS\n"
if not len(args):
unused_suppressions = set(
PATH_SPECIFIC_WHITELISTED_LICENSES.iterkeys()).difference(
used_suppressions)
if unused_suppressions:
print "\nNOTE: unused suppressions detected:\n"
print '\n'.join(unused_suppressions)
return 0
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
option_parser.add_option('--json', help='Path to JSON output file')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
|
|
from mock import MagicMock
from elasticmagic import agg, Document, DynamicDocument, Field, SearchQuery, Term, Match, Index
from elasticmagic.types import Integer, Float
from elasticmagic.ext.queryfilter import QueryFilter, FacetFilter, RangeFilter, SimpleFilter
from elasticmagic.ext.queryfilter import FacetQueryFilter, FacetQueryValue
from elasticmagic.ext.queryfilter import SimpleQueryFilter, SimpleQueryValue
from elasticmagic.ext.queryfilter import OrderingFilter, OrderingValue
from elasticmagic.ext.queryfilter import GroupedPageFilter, PageFilter
from .base import BaseTestCase
class CarType(object):
def __init__(self, id, title):
self.id = id
self.title = title
TYPES = {
t.id: t
for t in [
CarType(0, 'Sedan'),
CarType(1, 'Station Wagon'),
CarType(2, 'Hatchback'),
CarType(3, 'Coupe'),
]
}
def type_mapper(values):
return TYPES
class QueryFilterTest(BaseTestCase):
def test_simple_filter(self):
class CarQueryFilter(QueryFilter):
type = SimpleFilter(self.index.car.type, type=Integer)
vendor = SimpleFilter(self.index.car.vendor)
model = SimpleFilter(self.index.car.model, alias='m')
qf = CarQueryFilter()
sq = self.index.query()
sq = qf.apply(sq, {})
self.assert_expression(sq, {})
sq = self.index.query()
sq = qf.apply(sq, {'m': ['vrx']})
self.assert_expression(
sq,
{
"query": {
"filtered": {
"filter": {
"term": {
"model": "vrx"
}
}
}
}
}
)
sq = (
self.index.query(Match(self.index.car.name, 'test'))
.filter(self.index.car.status == 0)
)
sq = qf.apply(sq, {'type': ['0', '1:break', '3', 'null'], 'vendor': ['Subaru']})
self.assert_expression(
sq,
{
"query": {
"filtered": {
"query": {
"match": {"name": "test"}
},
"filter": {
"bool": {
"must": [
{"term": {"status": 0}},
{"terms": {"type": [0, 1, 3]}},
{"term": {"vendor": "Subaru"}}
]
}
}
}
}
}
)
def test_simple_filter_with_and_conjunction(self):
class ClientQueryFilter(QueryFilter):
label = SimpleFilter(self.index.client.label, conj_operator=QueryFilter.CONJ_AND)
qf = ClientQueryFilter()
sq = self.index.query()
sq = qf.apply(sq, {})
self.assert_expression(sq, {})
sq = self.index.query()
sq = qf.apply(sq, {'label': ['greedy']})
self.assert_expression(
sq,
{
"query": {
"filtered": {
"filter": {
"term": {
"label": "greedy"
}
}
}
}
}
)
sq = self.index.query()
sq = qf.apply(sq, {'label': ['greedy', 'young', 'nasty']})
self.assert_expression(
sq,
{
"query": {
"filtered": {
"filter": {
"bool": {
"must": [
{
"term": {"label": "greedy"}
},
{
"term": {"label": "young"}
},
{
"term": {"label": "nasty"}
}
]
}
}
}
}
}
)
def test_facet_filter(self):
class CarQueryFilter(QueryFilter):
type = FacetFilter(
self.index.car.type,
instance_mapper=type_mapper,
get_title=lambda v: v.instance.title if v.instance else unicode(v.value),
type=Integer,
)
vendor = FacetFilter(self.index.car.vendor, aggs={'min_price': agg.Min(self.index.car.price)})
model = FacetFilter(self.index.car.model, alias='m')
qf = CarQueryFilter()
sq = self.index.query()
sq = qf.apply(sq, {})
self.assert_expression(
sq,
{
"aggregations": {
"qf.type": {
"terms": {"field": "type"}
},
"qf.vendor": {
"terms": {"field": "vendor"},
"aggregations": {
"min_price": {
"min": {"field": "price"}
}
}
},
"qf.model": {
"terms": {"field": "model"}
}
}
}
)
sq = self.index.query()
sq = qf.apply(sq, {'m': ['vrx']})
self.assert_expression(
sq,
{
"aggregations": {
"qf.type.filter": {
"filter": {
"term": {"model": "vrx"}
},
"aggregations": {
"qf.type": {
"terms": {"field": "type"}
}
}
},
"qf.vendor.filter": {
"filter": {
"term": {"model": "vrx"}
},
"aggregations": {
"qf.vendor": {
"terms": {"field": "vendor"},
"aggregations": {
"min_price": {
"min": {"field": "price"}
}
}
}
}
},
"qf.model": {
"terms": {"field": "model"}
}
},
"post_filter": {
"term": {
"model": "vrx"
}
}
}
)
sq = (
self.index.query(Match(self.index.car.name, 'test'))
.filter(self.index.car.status == 0)
.post_filter(self.index.car.date_created > 'now-1y',
meta={'tags': {qf.get_name()}})
)
sq = qf.apply(sq, {'type': ['0', '1:break', '3', 'null'], 'vendor': ['Subaru']})
self.assert_expression(
sq,
{
"query": {
"filtered": {
"query": {
"match": {"name": "test"}
},
"filter": {
"term": {"status": 0}
}
}
},
"aggregations": {
"qf.type.filter": {
"filter": {
"term": {"vendor": "Subaru"}
},
"aggregations": {
"qf.type": {
"terms": {"field": "type"}
}
}
},
"qf.vendor.filter": {
"filter": {
"terms": {"type": [0, 1, 3]}
},
"aggregations": {
"qf.vendor": {
"terms": {"field": "vendor"},
"aggregations": {
"min_price": {
"min": {"field": "price"}
}
}
}
}
},
"qf.model.filter": {
"filter": {
"bool": {
"must": [
{"terms": {"type": [0, 1, 3]}},
{"term": {"vendor": "Subaru"}}
]
}
},
"aggregations": {
"qf.model": {
"terms": {"field": "model"}
}
}
}
},
"post_filter": {
"bool": {
"must": [
{"range": {"date_created": {"gt": "now-1y"}}},
{"terms": {"type": [0, 1, 3]}},
{"term": {"vendor": "Subaru"}}
]
}
}
}
)
self.client.search = MagicMock(
return_value={
"hits": {
"hits": [],
"max_score": 1.829381,
"total": 893
},
"aggregations": {
"qf.type.filter": {
"doc_count": 1298,
"qf.type": {
"buckets": [
{
"key": 0,
"doc_count": 744
},
{
"key": 2,
"doc_count": 392
},
{
"key": 1,
"doc_count": 162
}
]
}
},
"qf.vendor.filter": {
"doc_count": 2153,
"qf.vendor": {
"buckets": [
{
"key": "Subaru",
"doc_count": 2153,
"min_price": {"value": 4000},
},
]
}
},
"qf.model.filter": {
"doc_count": 2153,
"qf.model": {
"buckets": [
{
"key": "Imprezza",
"doc_count": 1586
},
{
"key": "Forester",
"doc_count": 456
},
]
}
}
}
}
)
qf.process_results(sq.get_result())
type_filter = qf.type
self.assertEqual(len(type_filter.selected_values), 3)
self.assertEqual(len(type_filter.values), 1)
self.assertEqual(len(type_filter.all_values), 4)
self.assertEqual(type_filter.all_values[0].value, 0)
self.assertEqual(type_filter.all_values[0].count, 744)
self.assertEqual(type_filter.all_values[0].count_text, '744')
self.assertEqual(type_filter.all_values[0].selected, True)
self.assertEqual(type_filter.all_values[0].title, 'Sedan')
self.assertEqual(type_filter.all_values[0].instance.title, 'Sedan')
self.assertIs(type_filter.all_values[0], type_filter.get_value(0))
self.assertIs(type_filter.all_values[0], type_filter.selected_values[0])
self.assertEqual(type_filter.all_values[1].value, 2)
self.assertEqual(type_filter.all_values[1].count, 392)
self.assertEqual(type_filter.all_values[1].count_text, '+392')
self.assertEqual(type_filter.all_values[1].selected, False)
self.assertEqual(type_filter.all_values[1].title, 'Hatchback')
self.assertEqual(type_filter.all_values[1].instance.title, 'Hatchback')
self.assertIs(type_filter.all_values[1], type_filter.get_value(2))
self.assertIs(type_filter.all_values[1], type_filter.values[0])
self.assertEqual(type_filter.all_values[2].value, 1)
self.assertEqual(type_filter.all_values[2].count, 162)
self.assertEqual(type_filter.all_values[2].count_text, '162')
self.assertEqual(type_filter.all_values[2].selected, True)
self.assertEqual(type_filter.all_values[2].title, 'Station Wagon')
self.assertEqual(type_filter.all_values[2].instance.title, 'Station Wagon')
self.assertIs(type_filter.all_values[2], type_filter.get_value(1))
self.assertIs(type_filter.all_values[2], type_filter.selected_values[1])
self.assertEqual(type_filter.all_values[3].value, 3)
self.assertIs(type_filter.all_values[3].count, None)
self.assertEqual(type_filter.all_values[3].count_text, '')
self.assertEqual(type_filter.all_values[3].selected, True)
self.assertEqual(type_filter.all_values[3].title, 'Coupe')
self.assertEqual(type_filter.all_values[3].instance.title, 'Coupe')
self.assertIs(type_filter.all_values[3], type_filter.get_value(3))
self.assertIs(type_filter.all_values[3], type_filter.selected_values[2])
vendor_filter = qf.vendor
self.assertEqual(len(vendor_filter.selected_values), 1)
self.assertEqual(len(vendor_filter.values), 0)
self.assertEqual(len(vendor_filter.all_values), 1)
self.assertEqual(vendor_filter.all_values[0].value, 'Subaru')
self.assertEqual(vendor_filter.all_values[0].count, 2153)
self.assertEqual(vendor_filter.all_values[0].count_text, '2153')
self.assertEqual(vendor_filter.all_values[0].selected, True)
self.assertEqual(vendor_filter.all_values[0].bucket.get_aggregation('min_price').value, 4000)
self.assertIs(vendor_filter.all_values[0], vendor_filter.selected_values[0])
self.assertIs(vendor_filter.all_values[0], vendor_filter.get_value('Subaru'))
model_filter = qf.model
self.assertEqual(len(model_filter.selected_values), 0)
self.assertEqual(len(model_filter.values), 2)
self.assertEqual(len(model_filter.all_values), 2)
self.assertEqual(model_filter.all_values[0].value, 'Imprezza')
self.assertEqual(model_filter.all_values[0].count, 1586)
self.assertEqual(model_filter.all_values[0].count_text, '1586')
self.assertEqual(model_filter.all_values[0].selected, False)
self.assertIs(model_filter.all_values[0], model_filter.values[0])
self.assertIs(model_filter.all_values[0], model_filter.get_value('Imprezza'))
self.assertEqual(model_filter.all_values[1].value, 'Forester')
self.assertEqual(model_filter.all_values[1].count, 456)
self.assertEqual(model_filter.all_values[1].count_text, '456')
self.assertEqual(model_filter.all_values[1].selected, False)
self.assertIs(model_filter.all_values[1], model_filter.values[1])
self.assertIs(model_filter.all_values[1], model_filter.get_value('Forester'))
def test_facet_filter_with_and_conjunction(self):
class ClientQueryFilter(QueryFilter):
region = FacetFilter(self.index.client.region_id, type=Integer)
label = FacetFilter(self.index.client.label, conj_operator=QueryFilter.CONJ_AND)
qf = ClientQueryFilter()
sq = self.index.query()
sq = qf.apply(sq, {})
self.assert_expression(
sq,
{
"aggregations": {
"qf.region": {
"terms": {
"field": "region_id"
}
},
"qf.label": {
"terms": {
"field": "label"
}
}
}
}
)
sq = self.index.query()
sq = qf.apply(sq, {'label': ['greedy']})
self.assert_expression(
sq,
{
"aggregations": {
"qf.region.filter": {
"filter": {
"term": {
"label": "greedy"
}
},
"aggregations": {
"qf.region": {
"terms": {
"field": "region_id"
}
}
}
},
"qf.label.filter": {
"filter": {
"term": {
"label": "greedy"
}
},
"aggregations": {
"qf.label": {
"terms": {
"field": "label"
}
}
}
}
},
"post_filter": {
"term": {
"label": "greedy"
}
}
}
)
sq = self.index.query()
sq = qf.apply(sq, {'region': [123, 456], 'label': ['greedy', 'young', 'nasty']})
self.assert_expression(
sq,
{
"aggregations": {
"qf.region.filter": {
"filter": {
"bool": {
"must": [
{
"term": {"label": "greedy"}
},
{
"term": {"label": "young"}
},
{
"term": {"label": "nasty"}
}
]
}
},
"aggregations": {
"qf.region": {
"terms": {
"field": "region_id"
}
}
}
},
"qf.label.filter": {
"filter": {
"bool": {
"must": [
{
"terms": {"region_id": [123, 456]}
},
{
"bool": {
"must": [
{
"term": {"label": "greedy"}
},
{
"term": {"label": "young"}
},
{
"term": {"label": "nasty"}
}
]
}
}
]
}
},
"aggregations": {
"qf.label": {
"terms": {
"field": "label"
}
}
}
}
},
"post_filter": {
"bool": {
"must": [
{
"terms": {"region_id": [123, 456]}
},
{
"bool": {
"must": [
{
"term": {"label": "greedy"}
},
{
"term": {"label": "young"}
},
{
"term": {"label": "nasty"}
}
]
}
}
]
}
}
}
)
def test_range_filter(self):
class CarDocument(Document):
__doc_type__ = 'car'
price = Field(Integer)
engine_displacement = Field(Float)
class CarQueryFilter(QueryFilter):
price = RangeFilter(CarDocument.price, compute_min_max=False)
disp = RangeFilter(CarDocument.engine_displacement, alias='ed', compute_enabled=False)
qf = CarQueryFilter()
sq = self.index.query()
sq = qf.apply(sq, {'ed__gte': ['1.9']})
self.assert_expression(
sq,
{
"aggregations": {
"qf.price.enabled": {"filter": {"exists": {"field": "price"}}},
"qf.disp.min": {"min": {"field": "engine_displacement"}},
"qf.disp.max": {"max": {"field": "engine_displacement"}}
},
"post_filter": {
"range": {"engine_displacement": {"gte": 1.9}}
}
}
)
sq = self.index.query()
sq = qf.apply(sq, {'price__lte': ['10000']})
self.assert_expression(
sq,
{
"aggregations": {
"qf.price.enabled": {"filter": {"exists": {"field": "price"}}},
"qf.disp.filter": {
"filter": {
"range": {"price": {"lte": 10000}}
},
"aggregations": {
"qf.disp.min": {"min": {"field": "engine_displacement"}},
"qf.disp.max": {"max": {"field": "engine_displacement"}}
}
}
},
"post_filter": {
"range": {"price": {"lte": 10000}}
}
}
)
self.client.search = MagicMock(
return_value={
"hits": {
"hits": [],
"max_score": 1.829381,
"total": 893
},
"aggregations": {
"qf.price.enabled": {"doc_count": 890},
"qf.disp.filter": {
"doc_count": 237,
"qf.disp.min": {"value": 1.6},
"qf.disp.max": {"value": 3.0}
}
}
}
)
qf.process_results(sq.get_result())
price_filter = qf.price
self.assertEqual(price_filter.enabled, True)
self.assertIs(price_filter.min, None)
self.assertIs(price_filter.max, None)
self.assertIs(price_filter.from_value, None)
self.assertEqual(price_filter.to_value, 10000)
disp_filter = qf.disp
self.assertIs(disp_filter.enabled, None)
self.assertAlmostEqual(disp_filter.min, 1.6)
self.assertAlmostEqual(disp_filter.max, 3.0)
self.assertIs(disp_filter.from_value, None)
self.assertIs(disp_filter.to_value, None)
def test_range_filter_dynamic_document(self):
class CarQueryFilter(QueryFilter):
price = RangeFilter(self.index.car.price, type=Integer)
disp = RangeFilter(self.index.car.engine_displacement, type=Float)
qf = CarQueryFilter()
sq = self.index.query()
sq = qf.apply(sq, {'price__lte': ['10000']})
self.assert_expression(
sq,
{
"aggregations": {
"qf.price.enabled": {"filter": {"exists": {"field": "price"}}},
"qf.price.min": {"min": {"field": "price"}},
"qf.price.max": {"max": {"field": "price"}},
"qf.disp.enabled": {"filter": {"exists": {"field": "engine_displacement"}}},
"qf.disp.filter": {
"filter": {
"range": {"price": {"lte": 10000}}
},
"aggregations": {
"qf.disp.min": {"min": {"field": "engine_displacement"}},
"qf.disp.max": {"max": {"field": "engine_displacement"}}
}
}
},
"post_filter": {
"range": {"price": {"lte": 10000}}
}
}
)
self.client.search = MagicMock(
return_value={
"hits": {
"hits": [],
"max_score": 1.829381,
"total": 893
},
"aggregations": {
"qf.price.enabled": {"doc_count": 890},
"qf.price.min": {"value": 7500},
"qf.price.max": {"value": 25800},
"qf.disp.enabled": {"doc_count": 888},
"qf.disp.filter": {
"doc_count": 237,
"qf.disp.min": {"value": 1.6},
"qf.disp.max": {"value": 3.0}
}
}
}
)
qf.process_results(sq.get_result())
price_filter = qf.price
self.assertEqual(price_filter.enabled, True)
self.assertEqual(price_filter.min, 7500)
self.assertEqual(price_filter.max, 25800)
self.assertIs(price_filter.from_value, None)
self.assertEqual(price_filter.to_value, 10000)
disp_filter = qf.disp
self.assertAlmostEqual(disp_filter.enabled, True)
self.assertAlmostEqual(disp_filter.min, 1.6)
self.assertAlmostEqual(disp_filter.max, 3.0)
self.assertIs(disp_filter.from_value, None)
self.assertIs(disp_filter.to_value, None)
def test_simple_query_filter(self):
class CarQueryFilter(QueryFilter):
is_new = SimpleQueryFilter(
SimpleQueryValue('true', self.index.car.state == 'new'),
alias='new'
)
price = SimpleQueryFilter(
SimpleQueryValue('*-10000', self.index.car.price <= 10000),
SimpleQueryValue('10000-20000', self.index.car.price.range(gt=10000, lte=20000)),
SimpleQueryValue('20000-30000', self.index.car.price.range(gt=20000, lte=30000)),
SimpleQueryValue('30000-*', self.index.car.price.range(gt=30000)),
aggs={'disp_avg': agg.Avg(self.index.car.engine_displacement)}
)
qf = CarQueryFilter()
sq = self.index.query()
sq = qf.apply(sq, {})
self.assert_expression(sq, {})
sq = self.index.query()
sq = qf.apply(sq, {'price': [None]})
self.assert_expression(sq, {})
sq = self.index.query()
sq = qf.apply(sq, {'new': ['true', 'false']})
self.assert_expression(
sq,
{
"query": {
"filtered": {
"filter": {
"term": {"state": "new"}
}
}
}
}
)
qf = CarQueryFilter()
sq = (
self.index.query()
.filter(self.index.car.year == 2014)
)
sq = qf.apply(sq, {'price': ['*-10000', '10000-20000', 'null']})
self.assert_expression(
sq,
{
"query": {
"filtered": {
"filter": {
"bool": {
"must": [
{
"term": {"year": 2014}
},
{
"bool": {
"should": [
{
"range": {
"price": {"lte": 10000}
}
},
{
"range": {
"price": {"gt": 10000, "lte": 20000}
}
}
]
}
}
]
}
}
}
}
}
)
def test_simple_query_filter_with_and_conjunction(self):
class ItemQueryFilter(QueryFilter):
selling_type = SimpleQueryFilter(
SimpleQueryValue('retail', self.index.item.selling_type.in_([1, 2, 3])),
SimpleQueryValue('wholesale', self.index.item.selling_type.in_([3, 4, 5])),
conj_operator=QueryFilter.CONJ_AND
)
qf = ItemQueryFilter()
sq = self.index.query()
sq = qf.apply(sq, {})
self.assert_expression(sq, {})
sq = self.index.query()
sq = qf.apply(sq, {'selling_type': ['retail', 'wholesale']})
self.assert_expression(
sq,
{
"query": {
"filtered": {
"filter": {
"bool": {
"must": [
{
"terms": {"selling_type": [1, 2, 3]}
},
{
"terms": {"selling_type": [3, 4, 5]}
}
]
}
}
}
}
}
)
def test_facet_query_filter(self):
class CarQueryFilter(QueryFilter):
is_new = FacetQueryFilter(
FacetQueryValue('true', self.index.car.state == 'new'),
alias='new'
)
price = FacetQueryFilter(
FacetQueryValue('*-10000', self.index.car.price <= 10000),
FacetQueryValue('10000-20000', self.index.car.price.range(gt=10000, lte=20000)),
FacetQueryValue('20000-30000', self.index.car.price.range(gt=20000, lte=30000)),
FacetQueryValue('30000-*', self.index.car.price.range(gt=30000)),
aggs={'disp_avg': agg.Avg(self.index.car.engine_displacement)}
)
qf = CarQueryFilter()
self.assertIsNot(
CarQueryFilter().get_filter('price').get_value('*-10000'),
qf.get_filter('price').get_value('*-10000')
)
sq = self.index.query()
sq = qf.apply(sq, {'new': ['true', 'false']})
self.assert_expression(
sq,
{
"aggregations": {
"qf.is_new:true": {
"filter": {
"term": {"state": "new"}
}
},
"qf.price.filter": {
"filter": {
"term": {"state": "new"}
},
"aggregations": {
"qf.price:*-10000": {
"filter": {
"range": {"price": {"lte": 10000}}
},
"aggregations": {
"disp_avg": {
"avg": {"field": "engine_displacement"}
}
}
},
"qf.price:10000-20000": {
"filter": {
"range": {"price": {"gt": 10000, "lte": 20000}}
},
"aggregations": {
"disp_avg": {
"avg": {"field": "engine_displacement"}
}
}
},
"qf.price:20000-30000": {
"filter": {
"range": {"price": {"gt": 20000, "lte": 30000}}
},
"aggregations": {
"disp_avg": {
"avg": {"field": "engine_displacement"}
}
}
},
"qf.price:30000-*": {
"filter": {
"range": {"price": {"gt": 30000}}
},
"aggregations": {
"disp_avg": {
"avg": {"field": "engine_displacement"}
}
}
}
}
}
},
"post_filter": {
"term": {"state": "new"}
}
}
)
self.client.search = MagicMock(
return_value={
"hits": {
"hits": [],
"max_score": 1.829381,
"total": 893
},
"aggregations": {
"qf.is_new:true": {
"doc_count": 82
},
"qf.price.filter": {
"doc_count": 82,
"qf.price:*-10000": {
"doc_count": 11,
"disp_avg": {"value": 1.56}
},
"qf.price:10000-20000": {
"doc_count": 16,
"disp_avg": {"value": 2.4}
},
"qf.price:20000-30000": {
"doc_count": 23,
"disp_avg": {"value": 2.85}
},
"qf.price:30000-*": {
"doc_count": 32,
"disp_avg": {"value": 2.92}
}
}
}
}
)
qf.process_results(sq.get_result())
self.assertEqual(len(qf.is_new.all_values), 1)
self.assertEqual(len(qf.is_new.selected_values), 1)
self.assertEqual(len(qf.is_new.values), 0)
self.assertEqual(qf.is_new.get_value('true').value, 'true')
self.assertEqual(qf.is_new.get_value('true').count, 82)
self.assertEqual(qf.is_new.get_value('true').count_text, '82')
self.assertEqual(qf.is_new.get_value('true').selected, True)
self.assertEqual(len(qf.price.all_values), 4)
self.assertEqual(len(qf.price.selected_values), 0)
self.assertEqual(len(qf.price.values), 4)
self.assertEqual(qf.price.get_value('*-10000').value, '*-10000')
self.assertEqual(qf.price.get_value('*-10000').count, 11)
self.assertEqual(qf.price.get_value('*-10000').count_text, '11')
self.assertEqual(qf.price.get_value('*-10000').selected, False)
self.assertEqual(qf.price.get_value('*-10000').agg.get_aggregation('disp_avg').value, 1.56)
self.assertEqual(qf.price.get_value('10000-20000').value, '10000-20000')
self.assertEqual(qf.price.get_value('10000-20000').count, 16)
self.assertEqual(qf.price.get_value('10000-20000').count_text, '16')
self.assertEqual(qf.price.get_value('10000-20000').selected, False)
self.assertEqual(qf.price.get_value('10000-20000').agg.get_aggregation('disp_avg').value, 2.4)
self.assertEqual(qf.price.get_value('20000-30000').value, '20000-30000')
self.assertEqual(qf.price.get_value('20000-30000').count, 23)
self.assertEqual(qf.price.get_value('20000-30000').count_text, '23')
self.assertEqual(qf.price.get_value('20000-30000').selected, False)
self.assertEqual(qf.price.get_value('20000-30000').agg.get_aggregation('disp_avg').value, 2.85)
self.assertEqual(qf.price.get_value('30000-*').value, '30000-*')
self.assertEqual(qf.price.get_value('30000-*').count, 32)
self.assertEqual(qf.price.get_value('30000-*').count_text, '32')
self.assertEqual(qf.price.get_value('30000-*').selected, False)
self.assertEqual(qf.price.get_value('30000-*').agg.get_aggregation('disp_avg').value, 2.92)
qf = CarQueryFilter()
sq = self.index.query(self.index.car.year == 2014)
sq = qf.apply(sq, {'price': ['*-10000', '10000-20000', 'null']})
self.assert_expression(
sq,
{
"query": {
"term": {"year": 2014}
},
"aggregations": {
"qf.is_new.filter": {
"filter": {
"bool": {
"should": [
{
"range": {
"price": {"lte": 10000}
}
},
{
"range": {
"price": {"gt": 10000, "lte": 20000}
}
}
]
}
},
"aggregations": {
"qf.is_new:true": {
"filter": {
"term": {"state": "new"}
}
}
}
},
"qf.price:*-10000": {
"filter": {
"range": {"price": {"lte": 10000}}
},
"aggregations": {
"disp_avg": {
"avg": {"field": "engine_displacement"}
}
}
},
"qf.price:10000-20000": {
"filter": {
"range": {"price": {"gt": 10000, "lte": 20000}}
},
"aggregations": {
"disp_avg": {
"avg": {"field": "engine_displacement"}
}
}
},
"qf.price:20000-30000": {
"filter": {
"range": {"price": {"gt": 20000, "lte": 30000}}
},
"aggregations": {
"disp_avg": {
"avg": {"field": "engine_displacement"}
}
}
},
"qf.price:30000-*": {
"filter": {
"range": {"price": {"gt": 30000}}
},
"aggregations": {
"disp_avg": {
"avg": {"field": "engine_displacement"}
}
}
}
},
"post_filter": {
"bool": {
"should": [
{
"range": {
"price": {"lte": 10000}
}
},
{
"range": {
"price": {"gt": 10000, "lte": 20000}
}
}
]
}
}
}
)
self.client.search = MagicMock(
return_value={
"hits": {
"hits": [],
"max_score": 1.0,
"total": 514
},
"aggregations": {
"qf.is_new.filter": {
"doc_count": 34,
"qf.is_new:true": {
"doc_count": 32
}
},
"qf.price:*-10000": {
"doc_count": 7,
"disp_avg": {"value": 1.43}
},
"qf.price:10000-20000": {
"doc_count": 11,
"disp_avg": {"value": 1.98}
},
"qf.price:20000-30000": {
"doc_count": 6,
"disp_avg": {"value": 2.14}
},
"qf.price:30000-*": {
"doc_count": 10,
"disp_avg": {"value": 2.67}
}
}
}
)
qf.process_results(sq.get_result())
self.assertEqual(len(qf.is_new.all_values), 1)
self.assertEqual(len(qf.is_new.selected_values), 0)
self.assertEqual(len(qf.is_new.values), 1)
self.assertEqual(qf.is_new.get_value('true').value, 'true')
self.assertEqual(qf.is_new.get_value('true').count, 32)
self.assertEqual(qf.is_new.get_value('true').count_text, '32')
self.assertEqual(qf.is_new.get_value('true').selected, False)
self.assertEqual(len(qf.price.all_values), 4)
self.assertEqual(len(qf.price.selected_values), 2)
self.assertEqual(len(qf.price.values), 2)
self.assertEqual(qf.price.get_value('*-10000').value, '*-10000')
self.assertEqual(qf.price.get_value('*-10000').count, 7)
self.assertEqual(qf.price.get_value('*-10000').count_text, '7')
self.assertEqual(qf.price.get_value('*-10000').selected, True)
self.assertEqual(qf.price.get_value('*-10000').agg.get_aggregation('disp_avg').value, 1.43)
self.assertEqual(qf.price.get_value('10000-20000').value, '10000-20000')
self.assertEqual(qf.price.get_value('10000-20000').count, 11)
self.assertEqual(qf.price.get_value('10000-20000').count_text, '11')
self.assertEqual(qf.price.get_value('10000-20000').selected, True)
self.assertEqual(qf.price.get_value('10000-20000').agg.get_aggregation('disp_avg').value, 1.98)
self.assertEqual(qf.price.get_value('20000-30000').value, '20000-30000')
self.assertEqual(qf.price.get_value('20000-30000').count, 6)
self.assertEqual(qf.price.get_value('20000-30000').count_text, '+6')
self.assertEqual(qf.price.get_value('20000-30000').selected, False)
self.assertEqual(qf.price.get_value('20000-30000').agg.get_aggregation('disp_avg').value, 2.14)
self.assertEqual(qf.price.get_value('30000-*').value, '30000-*')
self.assertEqual(qf.price.get_value('30000-*').count, 10)
self.assertEqual(qf.price.get_value('30000-*').count_text, '+10')
self.assertEqual(qf.price.get_value('30000-*').selected, False)
self.assertEqual(qf.price.get_value('30000-*').agg.get_aggregation('disp_avg').value, 2.67)
def test_facet_query_filter_with_and_conjunction(self):
class ItemQueryFilter(QueryFilter):
available = FacetQueryFilter(
SimpleQueryValue('true', self.index.item.is_available == True),
)
selling_type = FacetQueryFilter(
SimpleQueryValue('retail', self.index.item.selling_type.in_([1, 2, 3])),
SimpleQueryValue('wholesale', self.index.item.selling_type.in_([3, 4, 5])),
conj_operator=QueryFilter.CONJ_AND
)
qf = ItemQueryFilter()
sq = self.index.query()
sq = qf.apply(sq, {})
self.assert_expression(
sq,
{
"aggregations": {
"qf.available:true": {
"filter": {
"term": {"is_available": True}
}
},
"qf.selling_type:retail": {
"filter": {
"terms": {"selling_type": [1, 2, 3]}
}
},
"qf.selling_type:wholesale": {
"filter": {
"terms": {"selling_type": [3, 4, 5]}
}
}
}
}
)
sq = self.index.query()
sq = qf.apply(sq, {'selling_type': ['retail']})
self.assert_expression(
sq,
{
"aggregations": {
"qf.available.filter": {
"filter": {
"terms": {"selling_type": [1, 2, 3]},
},
"aggregations": {
"qf.available:true": {
"filter": {
"term": {"is_available": True}
}
}
}
},
"qf.selling_type.filter": {
"filter": {
"terms": {"selling_type": [1, 2, 3]},
},
"aggregations": {
"qf.selling_type:retail": {
"filter": {
"terms": {"selling_type": [1, 2, 3]}
}
},
"qf.selling_type:wholesale": {
"filter": {
"terms": {"selling_type": [3, 4, 5]}
}
}
}
}
},
"post_filter": {
"terms": {"selling_type": [1, 2, 3]}
}
}
)
def test_ordering(self):
class CarQueryFilter(QueryFilter):
sort = OrderingFilter(
OrderingValue(
'popularity',
[self.index.car.popularity.desc(),
self.index.car.opinion_count.desc(missing='_last')],
),
OrderingValue('price', [self.index.car.price]),
OrderingValue('-price', [self.index.car.price.desc()]),
alias='o',
default='popularity',
)
sq = self.index.query()
qf = CarQueryFilter()
self.assertIsNot(
CarQueryFilter().get_filter('sort').get_value('popularity'),
qf.get_filter('sort').get_value('popularity')
)
self.assert_expression(
qf.apply(sq, {}),
{
"sort": [
{
"popularity": "desc"
},
{
"opinion_count": {"order": "desc", "missing": "_last"}
}
]
}
)
self.assertEqual(qf.sort.selected_value.value, 'popularity')
self.assertEqual(qf.sort.selected_value.selected, True)
self.assertEqual(qf.sort.get_value('price').selected, False)
self.assertEqual(qf.sort.get_value('-price').selected, False)
qf = CarQueryFilter()
self.assert_expression(
qf.apply(sq, {'o': ['price']}),
{
"sort": [
"price"
]
}
)
self.assertEqual(qf.sort.selected_value.value, 'price')
self.assertEqual(qf.sort.selected_value.selected, True)
self.assertEqual(qf.sort.get_value('popularity').selected, False)
self.assertEqual(qf.sort.get_value('-price').selected, False)
def test_page(self):
class CarQueryFilter(QueryFilter):
page = PageFilter(alias='p', per_page_values=[10, 25, 50])
sq = self.index.search_query()
qf = CarQueryFilter()
self.assert_expression(
qf.apply(sq, {}),
{
"size": 10
}
)
self.assert_expression(
qf.apply(sq, {'p': 3}),
{
"size": 10,
"from": 20
}
)
self.assert_expression(
qf.apply(sq, {'per_page': 25}),
{
"size": 25
}
)
self.assert_expression(
qf.apply(sq, {'p': 201, 'per_page': 50}),
{
"size": 0
}
)
self.assert_expression(
qf.apply(sq, {'p': 3, 'per_page': 100}),
{
"size": 10,
"from": 20
}
)
self.client.search = MagicMock(
return_value={
"hits": {
"hits": [
{"_id": "21", "_type": "car"},
{"_id": "22", "_type": "car"},
{"_id": "23", "_type": "car"},
{"_id": "24", "_type": "car"},
{"_id": "25", "_type": "car"},
{"_id": "26", "_type": "car"},
{"_id": "27", "_type": "car"},
{"_id": "28", "_type": "car"},
{"_id": "29", "_type": "car"},
{"_id": "30", "_type": "car"},
],
"max_score": 5.487631,
"total": 105
}
}
)
qf.process_results(sq.get_result())
self.assertEqual(qf.page.offset, 20)
self.assertEqual(qf.page.limit, 10)
self.assertEqual(qf.page.total, 105)
self.assertEqual(qf.page.pages, 11)
self.assertEqual(qf.page.has_next, True)
self.assertEqual(qf.page.has_prev, True)
self.assertEqual(len(qf.page.items), 10)
def test_page_with_max_items(self):
class CarQueryFilter(QueryFilter):
page = PageFilter(alias='p', per_page_values=[24, 48, 96], max_items=1000)
sq = self.index.search_query()
qf = CarQueryFilter()
self.assert_expression(
qf.apply(sq, {'p': 11, 'per_page': 96}),
{
"size": 40,
"from": 960
}
)
self.assert_expression(
qf.apply(sq, {'p': 500}),
{
"size": 0
}
)
def test_grouped_page_filter(self):
class CarQueryFilter(QueryFilter):
page = GroupedPageFilter(
self.index.car.vendor,
group_kwargs={'size': 2}, per_page_values=[4]
)
sq = self.index.search_query()
qf = CarQueryFilter()
self.assert_expression(
qf.apply(sq, {}),
{
"aggregations": {
"qf.page.pagination": {
"terms": {
"field": "vendor",
"size": 1000,
"order": [
{
"order_0": "desc"
}
]
},
"aggregations": {
"order_0": {
"max": {
"script": "_score"
}
}
}
},
"qf.page": {
"terms": {
"field": "vendor",
"size": 4,
"order": [
{
"order_0": "desc"
}
]
},
"aggregations": {
"top_items": {
"top_hits": {
"size": 2,
}
},
"order_0": {
"max": {
"script": "_score"
}
}
}
}
}
}
)
self.client.search = MagicMock(
return_value={
"hits": {
"hits": [],
"max_score": 1.804551,
"total": 10378992
},
"aggregations": {
"qf.page.pagination": {
"buckets": [
{
"key": "toyota",
"doc_count": 1158096,
"order_0": {
"value": 1.804551
}
},
{
"key": "ford",
"doc_count": 1354892,
"order_0": {
"value": 1.689384
}
},
{
"key": "subaru",
"doc_count": 934756,
"order_0": {
"value": 1.540802
}
},
{
"key": "bmw",
"doc_count": 125871,
"order_0": {
"value": 1.540802
}
},
{
"key": "volkswagen",
"doc_count": 2573903,
"order_0": {
"value": 1.351459
}
},
{
"key": "jeep",
"doc_count": 10327,
"order_0": {
"value": 1.045751
}
}
]
}
}
}
)
qf = CarQueryFilter()
sq = qf.apply(sq, {'page': 2, 'per_page': 3})
self.client.search.assert_called_with(
body={
"aggregations": {
"qf.page.pagination": {
"terms": {
"field": "vendor",
"size": 1000,
"order": [
{
"order_0": "desc"
}
]
},
"aggregations": {
"order_0": {
"max": {
"script": "_score"
}
}
}
}
}
},
doc_type='car',
index='test',
search_type='count',
)
self.assert_expression(
sq,
{
"aggregations": {
"qf.page.filter": {
"filter": {
"terms": {
"vendor": ["volkswagen", "jeep"]
}
},
"aggregations": {
"qf.page": {
"terms": {
"field": "vendor",
"size": 4,
"order": [
{
"order_0": "desc"
}
]
},
"aggregations": {
"top_items": {
"top_hits": {
"size": 2,
}
},
"order_0": {
"max": {
"script": "_score"
}
}
}
}
}
}
}
}
)
self.assertEqual(qf.page.total, 6)
self.assertEqual(qf.page.page, 2)
self.assertEqual(qf.page.pages, 2)
self.assertEqual(qf.page.has_next, False)
self.assertEqual(qf.page.has_prev, True)
self.assertIs(qf.page.items, None)
def test_grouped_page_filter_with_post_filters(self):
class CarQueryFilter(QueryFilter):
page = GroupedPageFilter(
self.index.car.vendor,
group_kwargs={'size': 2}, per_page_values=[2]
)
sq = (
self.index.search_query()
.post_filter(self.index.car.engine_displacement >= 2)
.order_by(self.index.car._score, self.index.car.rank)
)
qf = CarQueryFilter()
self.client.search = MagicMock(
return_value={
"hits": {
"hits": [],
"max_score": 1.804551,
"total": 10378992
},
"aggregations": {
"qf.page.filter": {
"doc_count": 21987654,
"qf.page.pagination": {
"buckets": [
{
"key": "toyota",
"doc_count": 1158096,
"order_0": {
"value": 1.804551
},
"order_1": {
"value": 1.804551
}
},
{
"key": "ford",
"doc_count": 1354892,
"order_0": {
"value": 1.689384
},
"order_1": {
"value": 1.804551
}
},
{
"key": "subaru",
"doc_count": 934756,
"order_0": {
"value": 1.540802
},
"order_1": {
"value": 1.804551
}
},
{
"key": "bmw",
"doc_count": 125871,
"order_0": {
"value": 1.540802
},
"order_1": {
"value": 1.804551
}
},
{
"key": "volkswagen",
"doc_count": 2573903,
"order_0": {
"value": 1.351459
},
"order_1": {
"value": 1.804551
}
},
{
"key": "jeep",
"doc_count": 10327,
"order_0": {
"value": 1.045751
},
"order_1": {
"value": 1.804551
}
}
]
}
}
}
}
)
sq = qf.apply(sq, {'page': 2})
self.assert_expression(
sq,
{
"aggregations": {
"qf.page.filter": {
"filter": {
"bool": {
"must": [
{
"range": {
"engine_displacement": {"gte": 2}
}
},
{
"terms": {
"vendor": ["subaru", "bmw"]
}
}
]
}
},
"aggregations": {
"qf.page": {
"terms": {
"field": "vendor",
"size": 2,
"order": [
{
"order_0": "desc"
},
{
"order_1": "asc"
}
]
},
"aggregations": {
"top_items": {
"top_hits": {
"size": 2,
}
},
"order_0": {
"max": {
"script": "_score"
}
},
"order_1": {
"min": {
"field": "rank"
}
}
}
}
}
}
},
"post_filter": {
"range": {
"engine_displacement": {"gte": 2}
}
},
"sort": [
"_score",
"rank"
]
}
)
self.client.search = MagicMock(
return_value={
"hits": {
"hits": [],
"max_score": 1.804551,
"total": 8409177
},
"aggregations": {
"qf.page.filter": {
"doc_count": 1354892,
"qf.page": {
"buckets": [
{
"key": "subaru",
"doc_count": 196874,
"top_items": {
"hits": {
"total": 196874,
"max_score": 1,
"hits": [
{"_id": "21", "_type": "car"},
{"_id": "22", "_type": "car"},
]
}
},
"order_0": {
"value": 1.804551
},
"order_1": {
"value": 1.804551
}
},
{
"key": "bmw",
"doc_count": 98351,
"top_items": {
"hits": {
"total": 98351,
"max_score": 1,
"hits": [
{"_id": "31", "_type": "car"},
{"_id": "32", "_type": "car"},
]
}
},
"order_0": {
"value": 1.804551
},
"order_1": {
"value": 1.804551
}
}
]
}
}
}
}
)
qf.process_results(sq.get_result())
self.assertEqual(qf.page.total, 6)
self.assertEqual(qf.page.page, 2)
self.assertEqual(qf.page.pages, 3)
self.assertEqual(qf.page.has_next, True)
self.assertEqual(qf.page.has_prev, True)
self.assertEqual(len(qf.page.items), 2)
self.assertEqual(qf.page.items[0].key, 'subaru')
self.assertEqual(qf.page.items[0].get_aggregation('top_items').hits[0]._id, '21')
self.assertEqual(qf.page.items[0].get_aggregation('top_items').hits[1]._id, '22')
self.assertEqual(qf.page.items[1].key, 'bmw')
self.assertEqual(qf.page.items[1].get_aggregation('top_items').hits[0]._id, '31')
self.assertEqual(qf.page.items[1].get_aggregation('top_items').hits[1]._id, '32')
def test_query_filter_inheritance(self):
class SuperBaseItemQueryFilter(QueryFilter):
price = RangeFilter(self.index.item.price)
class BaseItemQueryFilter(SuperBaseItemQueryFilter):
sort = OrderingFilter(
OrderingValue('-score', [self.index.item._score])
)
class ItemQueryFilter(BaseItemQueryFilter):
selling_type = FacetQueryFilter(
SimpleQueryValue('retail', self.index.item.selling_type.in_([1, 2, 3])),
SimpleQueryValue('wholesale', self.index.item.selling_type.in_([3, 4, 5])),
conj_operator=QueryFilter.CONJ_AND
)
page = PageFilter()
sort = OrderingFilter(
OrderingValue('-score', [self.index.item._score]),
OrderingValue('price', [self.index.item.price]),
OrderingValue('-price', [self.index.item.price.desc()]),
)
qf = ItemQueryFilter()
self.assertEqual(
[f.name for f in qf.filters],
['price', 'selling_type', 'page', 'sort']
)
self.assertEqual(
[v.value for v in qf.sort.values],
['-score', 'price', '-price']
)
BaseItemQueryFilter.presence = FacetFilter(self.index.item.presence)
ItemQueryFilter.status = FacetFilter(self.index.item.status)
qf = ItemQueryFilter()
self.assertEqual(
[f.name for f in qf.filters],
['price', 'presence', 'selling_type', 'page', 'sort', 'status']
)
self.assertEqual(
[v.value for v in qf.sort.values],
['-score', 'price', '-price']
)
def test_dynamic_filters(self):
class ItemQueryFilter(QueryFilter):
price = RangeFilter(self.index.item.price)
selling_type = FacetQueryFilter(
SimpleQueryValue('retail', self.index.item.selling_type.in_([1, 2, 3])),
SimpleQueryValue('wholesale', self.index.item.selling_type.in_([3, 4, 5])),
conj_operator=QueryFilter.CONJ_AND
)
page = PageFilter()
qf = ItemQueryFilter()
self.assertEqual(len(qf.filters), 3)
self.assertEqual(qf.price.name, 'price')
self.assertEqual(qf.selling_type.name, 'selling_type')
self.assertEqual(qf.page.name, 'page')
self.assertEqual(qf.page.per_page_values, [10])
qf.remove_filter('selling_type')
self.assertEqual(len(qf.filters), 2)
self.assertEqual(qf.price.name, 'price')
self.assertIs(qf.get_filter('selling_type'), None)
self.assertRaises(AttributeError, lambda: qf.selling_type)
self.assertEqual(qf.page.name, 'page')
qf.add_filter(PageFilter('page', per_page_values=[10, 20]))
self.assertEqual(len(qf.filters), 2)
self.assertEqual(qf.page.per_page_values, [10, 20])
# def test_nested(self):
# f = DynamicDocument.fields
# qf = QueryFilter()
# qf.add_filter(
# FacetFilter('cat', f.category, type=Integer,
# filters=[FacetFilter('manu', f.manufacturer),
# FacetFilter('manu_country', f.manufacturer_country)])
# )
# sq = SearchQuery()
# sq = qf.apply(sq, {'cat__manu': ['1:thl', '2:china', '3']})
# self.assert_expression(
# sq,
# {
# "query": {
# "filtered": {
# "filter": {
# "or": [
# {
# "and": [
# {
# "term": {"category": 1},
# "term": {"manufacturer": "thl"}
# }
# ]
# },
# {
# "and": [
# {
# "term": {"category": 2},
# "term": {"manufacturer_country": "china"},
# }
# ]
# },
# {
# "term": {"category": 3}
# }
# ]
# }
# }
# }
# }
# )
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inverts a non-singular `LinearOperator`."""
from tensorflow.python.framework import ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = []
@tf_export("linalg.LinearOperatorInversion")
@linear_operator.make_composite_tensor
class LinearOperatorInversion(linear_operator.LinearOperator):
"""`LinearOperator` representing the inverse of another operator.
This operator represents the inverse of another operator.
```python
# Create a 2 x 2 linear operator.
operator = LinearOperatorFullMatrix([[1., 0.], [0., 2.]])
operator_inv = LinearOperatorInversion(operator)
operator_inv.to_dense()
==> [[1., 0.]
[0., 0.5]]
operator_inv.shape
==> [2, 2]
operator_inv.log_abs_determinant()
==> - log(2)
x = ... Shape [2, 4] Tensor
operator_inv.matmul(x)
==> Shape [2, 4] Tensor, equal to operator.solve(x)
```
#### Performance
The performance of `LinearOperatorInversion` depends on the underlying
operators performance: `solve` and `matmul` are swapped, and determinant is
inverted.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operator,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorInversion`.
`LinearOperatorInversion` is initialized with an operator `A`. The `solve`
and `matmul` methods are effectively swapped. E.g.
```
A = MyLinearOperator(...)
B = LinearOperatorInversion(A)
x = [....] # a vector
assert A.matvec(x) == B.solvevec(x)
```
Args:
operator: `LinearOperator` object. If `operator.is_non_singular == False`,
an exception is raised. We do allow `operator.is_non_singular == None`,
in which case this operator will have `is_non_singular == None`.
Similarly for `is_self_adjoint` and `is_positive_definite`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is `operator.name +
"_inv"`.
Raises:
ValueError: If `operator.is_non_singular` is False.
"""
parameters = dict(
operator=operator,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
self._operator = operator
# Auto-set and check hints.
if operator.is_non_singular is False or is_non_singular is False:
raise ValueError(
f"Argument `is_non_singular` or argument `operator` must have "
f"supplied hint `is_non_singular` equal to `True` or `None`. "
f"Found `operator.is_non_singular`: {operator.is_non_singular}, "
f"`is_non_singular`: {is_non_singular}.")
if operator.is_square is False or is_square is False:
raise ValueError(
f"Argument `is_square` or argument `operator` must have supplied "
f"hint `is_square` equal to `True` or `None`. Found "
f"`operator.is_square`: {operator.is_square}, "
f"`is_square`: {is_square}.")
# The congruency of is_non_singular and is_self_adjoint was checked in the
# base operator. Other hints are, in this special case of inversion, ones
# that must be the same for base/derived operator.
combine_hint = (
linear_operator_util.use_operator_or_provided_hint_unless_contradicting)
is_square = combine_hint(
operator, "is_square", is_square,
"An operator is square if and only if its inverse is square.")
is_non_singular = combine_hint(
operator, "is_non_singular", is_non_singular,
"An operator is non-singular if and only if its inverse is "
"non-singular.")
is_self_adjoint = combine_hint(
operator, "is_self_adjoint", is_self_adjoint,
"An operator is self-adjoint if and only if its inverse is "
"self-adjoint.")
is_positive_definite = combine_hint(
operator, "is_positive_definite", is_positive_definite,
"An operator is positive-definite if and only if its inverse is "
"positive-definite.")
# Initialization.
if name is None:
name = operator.name + "_inv"
with ops.name_scope(name, values=operator.graph_parents):
super(LinearOperatorInversion, self).__init__(
dtype=operator.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
# TODO(b/143910018) Remove graph_parents in V3.
self._set_graph_parents(operator.graph_parents)
@property
def operator(self):
"""The operator before inversion."""
return self._operator
def _assert_non_singular(self):
return self.operator.assert_non_singular()
def _assert_positive_definite(self):
return self.operator.assert_positive_definite()
def _assert_self_adjoint(self):
return self.operator.assert_self_adjoint()
def _shape(self):
return self.operator.shape
def _shape_tensor(self):
return self.operator.shape_tensor()
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return self.operator.solve(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _determinant(self):
return 1. / self.operator.determinant()
def _log_abs_determinant(self):
return -1. * self.operator.log_abs_determinant()
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
return self.operator.matmul(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _eigvals(self):
return 1. / self.operator.eigvals()
def _cond(self):
return self.operator.cond()
@property
def _composite_tensor_fields(self):
return ("operator",)
|
|
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import struct
import subprocess
import sys
import unittest
import xml.etree.ElementTree
import gdb_rsp
NACL_SIGTRAP = 5
NACL_SIGSEGV = 11
# These are set up by Main().
ARCH = None
NM_TOOL = None
SEL_LDR_COMMAND = None
def AssertEquals(x, y):
if x != y:
raise AssertionError('%r != %r' % (x, y))
def DecodeHex(data):
assert len(data) % 2 == 0, data
return ''.join([chr(int(data[index * 2 : (index + 1) * 2], 16))
for index in xrange(len(data) / 2)])
def EncodeHex(data):
return ''.join('%02x' % ord(byte) for byte in data)
X86_32_REG_DEFS = [
('eax', 'I'),
('ecx', 'I'),
('edx', 'I'),
('ebx', 'I'),
('esp', 'I'),
('ebp', 'I'),
('esi', 'I'),
('edi', 'I'),
('eip', 'I'),
('eflags', 'I'),
('cs', 'I'),
('ss', 'I'),
('ds', 'I'),
('es', 'I'),
('fs', 'I'),
('gs', 'I'),
]
X86_64_REG_DEFS = [
('rax', 'Q'),
('rbx', 'Q'),
('rcx', 'Q'),
('rdx', 'Q'),
('rsi', 'Q'),
('rdi', 'Q'),
('rbp', 'Q'),
('rsp', 'Q'),
('r8', 'Q'),
('r9', 'Q'),
('r10', 'Q'),
('r11', 'Q'),
('r12', 'Q'),
('r13', 'Q'),
('r14', 'Q'),
('r15', 'Q'),
('rip', 'Q'),
('eflags', 'I'),
('cs', 'I'),
('ss', 'I'),
('ds', 'I'),
('es', 'I'),
('fs', 'I'),
('gs', 'I'),
]
ARM_REG_DEFS = ([('r%d' % regno, 'I') for regno in xrange(16)]
+ [('cpsr', 'I')])
REG_DEFS = {
'x86-32': X86_32_REG_DEFS,
'x86-64': X86_64_REG_DEFS,
'arm': ARM_REG_DEFS,
}
SP_REG = {
'x86-32': 'esp',
'x86-64': 'rsp',
'arm': 'r13',
}
IP_REG = {
'x86-32': 'eip',
'x86-64': 'rip',
'arm': 'r15',
}
X86_TRAP_FLAG = 1 << 8
# RESET_X86_FLAGS_VALUE is what ASM_WITH_REGS() resets the x86 flags
# to. Copied from tests/common/register_set.h.
RESET_X86_FLAGS_VALUE = (1 << 2) | (1 << 6)
KNOWN_X86_FLAGS_MASK = (1<<0) | (1<<2) | (1<<6) | (1<<7) | (1<<11) | (1<<8)
# These are the only ARM CPSR bits that user code and untrusted code
# can read and modify, excluding the IT bits which are for Thumb-2
# (for If-Then-Else instructions). Copied from
# tests/common/register_set.h.
ARM_USER_CPSR_FLAGS_MASK = (
(1<<31) | # N
(1<<30) | # Z
(1<<29) | # C
(1<<28) | # V
(1<<27) | # Q
(1<<19) | (1<<18) | (1<<17) | (1<<16)) # GE bits
def DecodeRegs(reply):
defs = REG_DEFS[ARCH]
names = [reg_name for reg_name, reg_fmt in defs]
fmt = ''.join([reg_fmt for reg_name, reg_fmt in defs])
values = struct.unpack_from(fmt, DecodeHex(reply))
return dict(zip(names, values))
def EncodeRegs(regs):
defs = REG_DEFS[ARCH]
names = [reg_name for reg_name, reg_fmt in defs]
fmt = ''.join([reg_fmt for reg_name, reg_fmt in defs])
values = [regs[r] for r in names]
return EncodeHex(struct.pack(fmt, *values))
def PopenDebugStub(test):
gdb_rsp.EnsurePortIsAvailable()
return subprocess.Popen(SEL_LDR_COMMAND + ['-g', test])
def KillProcess(process):
try:
process.kill()
except OSError:
if sys.platform == 'win32':
# If process is already terminated, kill() throws
# "WindowsError: [Error 5] Access is denied" on Windows.
pass
else:
raise
process.wait()
class LaunchDebugStub(object):
def __init__(self, test):
self._proc = PopenDebugStub(test)
def __enter__(self):
try:
return gdb_rsp.GdbRspConnection()
except:
KillProcess(self._proc)
raise
def __exit__(self, exc_type, exc_value, traceback):
KillProcess(self._proc)
def GetSymbols():
assert '-f' in SEL_LDR_COMMAND
nexe_filename = SEL_LDR_COMMAND[SEL_LDR_COMMAND.index('-f') + 1]
symbols = {}
proc = subprocess.Popen([NM_TOOL, '--format=posix', nexe_filename],
stdout=subprocess.PIPE)
for line in proc.stdout:
match = re.match('(\S+) [TtWwBD] ([0-9a-fA-F]+)', line)
if match is not None:
name = match.group(1)
addr = int(match.group(2), 16)
symbols[name] = addr
result = proc.wait()
assert result == 0, result
return symbols
def ParseThreadStopReply(reply):
match = re.match('T([0-9a-f]{2})thread:([0-9a-f]+);$', reply)
if match is None:
raise AssertionError('Bad thread stop reply: %r' % reply)
return {'signal': int(match.group(1), 16),
'thread_id': int(match.group(2), 16)}
def AssertReplySignal(reply, signal):
AssertEquals(ParseThreadStopReply(reply)['signal'], signal)
def ReadMemory(connection, address, size):
reply = connection.RspRequest('m%x,%x' % (address, size))
assert not reply.startswith('E'), reply
return DecodeHex(reply)
def ReadUint32(connection, address):
return struct.unpack('I', ReadMemory(connection, address, 4))[0]
class DebugStubTest(unittest.TestCase):
def test_initial_breakpoint(self):
# Any arguments to the nexe would work here because we are only
# testing that we get a breakpoint at the _start entry point.
with LaunchDebugStub('test_getting_registers') as connection:
reply = connection.RspRequest('?')
AssertReplySignal(reply, NACL_SIGTRAP)
def CheckTargetXml(self, connection):
reply = connection.RspRequest('qXfer:features:read:target.xml:0,fff')
self.assertEquals(reply[0], 'l')
# Just check that we are given parsable XML.
xml.etree.ElementTree.fromstring(reply[1:])
# Test that we can fetch register values.
# This check corresponds to the last instruction of debugger_test.c
def CheckReadRegisters(self, connection):
registers = DecodeRegs(connection.RspRequest('g'))
if ARCH == 'x86-32':
self.assertEquals(registers['eax'], 0x11000022)
self.assertEquals(registers['ebx'], 0x22000033)
self.assertEquals(registers['ecx'], 0x33000044)
self.assertEquals(registers['edx'], 0x44000055)
self.assertEquals(registers['esi'], 0x55000066)
self.assertEquals(registers['edi'], 0x66000077)
self.assertEquals(registers['ebp'], 0x77000088)
self.assertEquals(registers['esp'], 0x88000099)
self.assertEquals(registers['eflags'] & KNOWN_X86_FLAGS_MASK,
RESET_X86_FLAGS_VALUE)
elif ARCH == 'x86-64':
self.assertEquals(registers['rax'], 0x1100000000000022)
self.assertEquals(registers['rbx'], 0x2200000000000033)
self.assertEquals(registers['rcx'], 0x3300000000000044)
self.assertEquals(registers['rdx'], 0x4400000000000055)
self.assertEquals(registers['rsi'], 0x5500000000000066)
self.assertEquals(registers['rdi'], 0x6600000000000077)
self.assertEquals(registers['r8'], 0x7700000000000088)
self.assertEquals(registers['r9'], 0x8800000000000099)
self.assertEquals(registers['r10'], 0x99000000000000aa)
self.assertEquals(registers['r11'], 0xaa000000000000bb)
self.assertEquals(registers['r12'], 0xbb000000000000cc)
self.assertEquals(registers['r13'], 0xcc000000000000dd)
self.assertEquals(registers['r14'], 0xdd000000000000ee)
self.assertEquals(registers['rsp'], registers['r15'] + 0x12300321)
self.assertEquals(registers['rbp'], registers['r15'] + 0x23400432)
self.assertEquals(registers['eflags'] & KNOWN_X86_FLAGS_MASK,
RESET_X86_FLAGS_VALUE)
elif ARCH == 'arm':
self.assertEquals(registers['r0'], 0x00000001)
self.assertEquals(registers['r1'], 0x10000002)
self.assertEquals(registers['r2'], 0x20000003)
self.assertEquals(registers['r3'], 0x30000004)
self.assertEquals(registers['r4'], 0x40000005)
self.assertEquals(registers['r5'], 0x50000006)
self.assertEquals(registers['r6'], 0x60000007)
self.assertEquals(registers['r7'], 0x70000008)
self.assertEquals(registers['r8'], 0x80000009)
# Skip r9 because it is not supposed to be settable or readable
# by untrusted code.
self.assertEquals(registers['r10'], 0xa000000b)
self.assertEquals(registers['r11'], 0xb000000c)
self.assertEquals(registers['r12'], 0xc000000d)
self.assertEquals(registers['r13'], 0x12345678)
self.assertEquals(registers['r14'], 0xe000000f)
self.assertEquals(registers['cpsr'] & ARM_USER_CPSR_FLAGS_MASK,
(1 << 29) | (1 << 27))
else:
raise AssertionError('Unknown architecture')
expected_fault_addr = GetSymbols()['fault_addr']
if ARCH == 'x86-64':
expected_fault_addr += registers['r15']
self.assertEquals(registers[IP_REG[ARCH]], expected_fault_addr)
# Test that we can write registers.
def CheckWriteRegisters(self, connection):
if ARCH == 'x86-32':
reg_name = 'edx'
elif ARCH == 'x86-64':
reg_name = 'rdx'
elif ARCH == 'arm':
reg_name = 'r0'
else:
raise AssertionError('Unknown architecture')
# Read registers.
regs = DecodeRegs(connection.RspRequest('g'))
# Change a register.
regs[reg_name] += 1
new_value = regs[reg_name]
# Write registers.
self.assertEquals(connection.RspRequest('G' + EncodeRegs(regs)), 'OK')
# Read registers. Check for a new value.
regs = DecodeRegs(connection.RspRequest('g'))
self.assertEquals(regs[reg_name], new_value)
# TODO: Resume execution and check that changing the registers really
# influenced the program's execution. This would require changing
# debugger_test.c.
def CheckReadOnlyRegisters(self, connection):
if ARCH == 'x86-32':
sample_read_only_regs = ['cs', 'ds']
elif ARCH == 'x86-64':
sample_read_only_regs = ['r15', 'cs', 'ds']
elif ARCH == 'arm':
sample_read_only_regs = []
else:
raise AssertionError('Unknown architecture')
for reg_name in sample_read_only_regs:
# Read registers.
regs = DecodeRegs(connection.RspRequest('g'))
# Change a register.
old_value = regs[reg_name]
regs[reg_name] += 1
# Write registers.
self.assertEquals(connection.RspRequest('G' + EncodeRegs(regs)), 'OK')
# Read registers. Check for an old value.
regs = DecodeRegs(connection.RspRequest('g'))
self.assertEquals(regs[reg_name], old_value)
# Test that reading from an unreadable address gives a sensible error.
def CheckReadMemoryAtInvalidAddr(self, connection):
mem_addr = 0
result = connection.RspRequest('m%x,%x' % (mem_addr, 8))
self.assertEquals(result, 'E03')
# Check non-zero address in the first page.
mem_addr = 0xffff
resut = connection.RspRequest('m%x,%x' % (mem_addr, 1))
self.assertEquals(result, 'E03')
# Run tests on debugger_test.c binary.
def test_debugger_test(self):
with LaunchDebugStub('test_getting_registers') as connection:
# Tell the process to continue, because it starts at the
# breakpoint set at its start address.
reply = connection.RspRequest('c')
if ARCH == 'arm':
# The process should have stopped on a BKPT instruction.
AssertReplySignal(reply, NACL_SIGTRAP)
else:
# The process should have stopped on a HLT instruction.
AssertReplySignal(reply, NACL_SIGSEGV)
self.CheckTargetXml(connection)
self.CheckReadRegisters(connection)
self.CheckWriteRegisters(connection)
self.CheckReadOnlyRegisters(connection)
def test_jump_to_address_zero(self):
with LaunchDebugStub('test_jump_to_address_zero') as connection:
# Continue from initial breakpoint.
reply = connection.RspRequest('c')
AssertReplySignal(reply, NACL_SIGSEGV)
registers = DecodeRegs(connection.RspRequest('g'))
if ARCH == 'x86-64':
self.assertEquals(registers[IP_REG[ARCH]], registers['r15'])
else:
self.assertEquals(registers[IP_REG[ARCH]], 0)
def test_reading_and_writing_memory(self):
# Any arguments to the nexe would work here because we do not run
# the executable beyond the initial breakpoint.
with LaunchDebugStub('test_getting_registers') as connection:
mem_addr = GetSymbols()['g_example_var']
# Check reading memory.
expected_data = 'some_debug_stub_test_data\0'
reply = connection.RspRequest('m%x,%x' % (mem_addr, len(expected_data)))
self.assertEquals(DecodeHex(reply), expected_data)
# On x86-64, for reading/writing memory, the debug stub accepts
# untrusted addresses with or without the %r15 sandbox base
# address added, because GDB uses both.
# TODO(eaeltsin): Fix GDB to not use addresses with %r15 added,
# and change the expected result in the check below.
if ARCH == 'x86-64':
registers = DecodeRegs(connection.RspRequest('g'))
sandbox_base_addr = registers['r15']
reply = connection.RspRequest('m%x,%x' % (sandbox_base_addr + mem_addr,
len(expected_data)))
self.assertEquals(DecodeHex(reply), expected_data)
# Check writing memory.
new_data = 'replacement_data\0'
assert len(new_data) < len(expected_data)
reply = connection.RspRequest('M%x,%x:%s' % (mem_addr, len(new_data),
EncodeHex(new_data)))
self.assertEquals(reply, 'OK')
# Check that we can read back what we wrote.
reply = connection.RspRequest('m%x,%x' % (mem_addr, len(new_data)))
self.assertEquals(DecodeHex(reply), new_data)
self.CheckReadMemoryAtInvalidAddr(connection)
def test_exit_code(self):
with LaunchDebugStub('test_exit_code') as connection:
reply = connection.RspRequest('c')
self.assertEquals(reply, 'W02')
# Single-step and check IP corresponds to debugger_test:test_single_step
def CheckSingleStep(self, connection, step_command, thread_id):
if ARCH == 'x86-32':
instruction_sizes = [1, 2, 3, 6]
elif ARCH == 'x86-64':
instruction_sizes = [1, 3, 4, 6]
else:
raise AssertionError('Unknown architecture')
ip = DecodeRegs(connection.RspRequest('g'))[IP_REG[ARCH]]
for size in instruction_sizes:
reply = connection.RspRequest(step_command)
AssertReplySignal(reply, NACL_SIGTRAP)
self.assertEquals(ParseThreadStopReply(reply)['thread_id'], thread_id)
ip += size
regs = DecodeRegs(connection.RspRequest('g'))
self.assertEqual(regs[IP_REG[ARCH]], ip)
# The trap flag should be reported as unset.
self.assertEqual(regs['eflags'] & X86_TRAP_FLAG, 0)
def test_single_step(self):
if ARCH == 'arm':
# Skip this test because single-stepping is not supported on ARM.
# TODO(eaeltsin):
# http://code.google.com/p/nativeclient/issues/detail?id=2911
return
with LaunchDebugStub('test_single_step') as connection:
# We expect test_single_step() to stop at a HLT instruction.
reply = connection.RspRequest('c')
AssertReplySignal(reply, NACL_SIGSEGV)
tid = ParseThreadStopReply(reply)['thread_id']
# Skip past the single-byte HLT instruction.
regs = DecodeRegs(connection.RspRequest('g'))
regs[IP_REG[ARCH]] += 1
AssertEquals(connection.RspRequest('G' + EncodeRegs(regs)), 'OK')
self.CheckSingleStep(connection, 's', tid)
# Check that we can continue after single-stepping.
reply = connection.RspRequest('c')
self.assertEquals(reply, 'W00')
def test_vCont(self):
# Basically repeat test_single_step, but using vCont commands.
if ARCH == 'arm':
# Skip this test because single-stepping is not supported on ARM.
# TODO(eaeltsin):
# http://code.google.com/p/nativeclient/issues/detail?id=2911
return
with LaunchDebugStub('test_single_step') as connection:
# Test if vCont is supported.
reply = connection.RspRequest('vCont?')
self.assertEqual(reply, 'vCont;s;S;c;C')
# Continue using vCont.
# We expect test_single_step() to stop at a HLT instruction.
reply = connection.RspRequest('vCont;c')
AssertReplySignal(reply, NACL_SIGSEGV)
# Get signalled thread id.
tid = ParseThreadStopReply(reply)['thread_id']
# Skip past the single-byte HLT instruction.
regs = DecodeRegs(connection.RspRequest('g'))
regs[IP_REG[ARCH]] += 1
AssertEquals(connection.RspRequest('G' + EncodeRegs(regs)), 'OK')
self.CheckSingleStep(connection, 'vCont;s:%x' % tid, tid)
# Single step one thread and continue all others.
reply = connection.RspRequest('vCont;s:%x;c' % tid)
# WARNING! This check is valid in single-threaded case only!
# In multi-threaded case another thread might stop first.
self.assertEqual(reply, 'T05thread:%x;' % tid)
# Try to continue the thread and to single-step all others.
reply = connection.RspRequest('vCont;c:%x;s' % tid)
self.assertTrue(reply.startswith('E'))
# Try to single-step wrong thread.
reply = connection.RspRequest('vCont;s:%x' % (tid + 2))
self.assertTrue(reply.startswith('E'))
# Try to single-step all threads.
reply = connection.RspRequest('vCont;s')
self.assertTrue(reply.startswith('E'))
def test_interrupt(self):
if ARCH == 'arm':
# Skip this test because single-stepping is not supported on ARM.
# TODO(eaeltsin):
# http://code.google.com/p/nativeclient/issues/detail?id=2911
return
func_addr = GetSymbols()['test_interrupt']
with LaunchDebugStub('test_interrupt') as connection:
# Single stepping inside syscalls doesn't work. So we need to reach
# a point where interrupt will not catch the program inside syscall.
reply = connection.RspRequest('Z0,%x,0' % func_addr)
self.assertEquals(reply, 'OK')
reply = connection.RspRequest('c')
AssertReplySignal(reply, NACL_SIGTRAP)
reply = connection.RspRequest('z0,%x,0' % func_addr)
self.assertEquals(reply, 'OK')
# Continue (program will spin forever), then interrupt.
connection.RspSendOnly('c')
reply = connection.RspInterrupt()
self.assertEqual(reply, 'T00')
# Single-step.
reply = connection.RspRequest('s')
AssertReplySignal(reply, NACL_SIGTRAP)
def test_modifying_code_is_disallowed(self):
with LaunchDebugStub('test_setting_breakpoint') as connection:
# Pick an arbitrary address in the code segment.
func_addr = GetSymbols()['breakpoint_target_func']
# Writing to the code area should be disallowed.
data = '\x00'
write_command = 'M%x,%x:%s' % (func_addr, len(data), EncodeHex(data))
reply = connection.RspRequest(write_command)
self.assertEquals(reply, 'E03')
class DebugStubBreakpointTest(unittest.TestCase):
def CheckInstructionPtr(self, connection, expected_ip):
ip_value = DecodeRegs(connection.RspRequest('g'))[IP_REG[ARCH]]
if ARCH == 'x86-64':
# TODO(mseaborn): The debug stub should probably omit the top
# bits of %rip automatically.
ip_value &= 0xffffffff
self.assertEquals(ip_value, expected_ip)
def test_setting_and_removing_breakpoint(self):
func_addr = GetSymbols()['breakpoint_target_func']
with LaunchDebugStub('test_setting_breakpoint') as connection:
# Set a breakpoint.
reply = connection.RspRequest('Z0,%x,0' % func_addr)
self.assertEquals(reply, 'OK')
# Requesting a breakpoint on an address that already has a
# breakpoint should return an error.
reply = connection.RspRequest('Z0,%x,0' % func_addr)
self.assertEquals(reply, 'E03')
# When we run the program, we should hit the breakpoint. When
# we continue, we should hit the breakpoint again because it has
# not been removed: the debug stub does not step through
# breakpoints automatically.
for i in xrange(2):
reply = connection.RspRequest('c')
AssertReplySignal(reply, NACL_SIGTRAP)
self.CheckInstructionPtr(connection, func_addr)
# If we continue a single thread, the fault the thread receives
# should still be recognized as a breakpoint.
tid = ParseThreadStopReply(reply)['thread_id']
reply = connection.RspRequest('vCont;c:%x' % tid)
AssertReplySignal(reply, NACL_SIGTRAP)
self.CheckInstructionPtr(connection, func_addr)
# Check that we can remove the breakpoint.
reply = connection.RspRequest('z0,%x,0' % func_addr)
self.assertEquals(reply, 'OK')
# Requesting removing a breakpoint on an address that does not
# have one should return an error.
reply = connection.RspRequest('z0,%x,0' % func_addr)
self.assertEquals(reply, 'E03')
# After continuing, we should not hit the breakpoint again, and
# the program should run to completion.
reply = connection.RspRequest('c')
self.assertEquals(reply, 'W00')
def test_setting_breakpoint_on_invalid_address(self):
with LaunchDebugStub('test_exit_code') as connection:
# Requesting a breakpoint on an invalid address should give an error.
reply = connection.RspRequest('Z0,%x,1' % (1 << 32))
self.assertEquals(reply, 'E03')
def test_setting_breakpoint_on_data_address(self):
with LaunchDebugStub('test_exit_code') as connection:
# Pick an arbitrary address in the data segment.
data_addr = GetSymbols()['g_main_thread_var']
# Requesting a breakpoint on a non-code address should give an error.
reply = connection.RspRequest('Z0,%x,1' % data_addr)
self.assertEquals(reply, 'E03')
def test_breakpoint_memory_changes_are_hidden(self):
func_addr = GetSymbols()['breakpoint_target_func']
with LaunchDebugStub('test_setting_breakpoint') as connection:
chunk_size = 32
old_memory = ReadMemory(connection, func_addr, chunk_size)
reply = connection.RspRequest('Z0,%x,0' % func_addr)
self.assertEquals(reply, 'OK')
# The debug stub should hide the memory modification.
new_memory = ReadMemory(connection, func_addr, chunk_size)
self.assertEquals(new_memory, old_memory)
# Check reading a subset of the range. (This will only be a
# proper subset on architectures where the breakpoint size is
# >1, such as ARM not but x86.)
new_memory = ReadMemory(connection, func_addr, 1)
self.assertEquals(new_memory, old_memory[:1])
class DebugStubThreadSuspensionTest(unittest.TestCase):
def SkipBreakpoint(self, connection, stop_reply):
# Skip past the faulting instruction in debugger_test.c's
# breakpoint() function.
regs = DecodeRegs(connection.RspRequest('g'))
if ARCH in ('x86-32', 'x86-64'):
AssertReplySignal(stop_reply, NACL_SIGSEGV)
# Skip past the single-byte HLT instruction.
regs[IP_REG[ARCH]] += 1
elif ARCH == 'arm':
AssertReplySignal(stop_reply, NACL_SIGTRAP)
bundle_size = 16
assert regs['r15'] % bundle_size == 0, regs['r15']
regs['r15'] += bundle_size
else:
raise AssertionError('Unknown architecture')
AssertEquals(connection.RspRequest('G' + EncodeRegs(regs)), 'OK')
def WaitForTestThreadsToStart(self, connection, symbols):
# Wait until:
# * The main thread starts to modify g_main_thread_var.
# * The child thread executes a breakpoint.
old_value = ReadUint32(connection, symbols['g_main_thread_var'])
while True:
reply = connection.RspRequest('c')
self.SkipBreakpoint(connection, reply)
child_thread_id = ParseThreadStopReply(reply)['thread_id']
if ReadUint32(connection, symbols['g_main_thread_var']) != old_value:
break
return child_thread_id
def test_continuing_thread_with_others_suspended(self):
with LaunchDebugStub('test_suspending_threads') as connection:
symbols = GetSymbols()
child_thread_id = self.WaitForTestThreadsToStart(connection, symbols)
# Test continuing a single thread while other threads remain
# suspended.
for _ in range(3):
main_thread_val = ReadUint32(connection, symbols['g_main_thread_var'])
child_thread_val = ReadUint32(connection, symbols['g_child_thread_var'])
reply = connection.RspRequest('vCont;c:%x' % child_thread_id)
self.SkipBreakpoint(connection, reply)
self.assertEquals(ParseThreadStopReply(reply)['thread_id'],
child_thread_id)
# The main thread should not be allowed to run, so should not
# modify g_main_thread_var.
self.assertEquals(
ReadUint32(connection, symbols['g_main_thread_var']),
main_thread_val)
# The child thread should always modify g_child_thread_var
# between each breakpoint.
self.assertNotEquals(
ReadUint32(connection, symbols['g_child_thread_var']),
child_thread_val)
def test_single_stepping_thread_with_others_suspended(self):
with LaunchDebugStub('test_suspending_threads') as connection:
symbols = GetSymbols()
child_thread_id = self.WaitForTestThreadsToStart(connection, symbols)
# Test single-stepping a single thread while other threads
# remain suspended.
for _ in range(3):
main_thread_val = ReadUint32(connection, symbols['g_main_thread_var'])
child_thread_val = ReadUint32(connection, symbols['g_child_thread_var'])
while True:
reply = connection.RspRequest('vCont;s:%x' % child_thread_id)
if (ARCH in ('x86-32', 'x86-64') and
ParseThreadStopReply(reply)['signal'] == NACL_SIGTRAP):
# We single-stepped through an instruction without
# otherwise faulting. We did not hit the breakpoint, so
# there is nothing to do.
pass
else:
self.SkipBreakpoint(connection, reply)
self.assertEquals(ParseThreadStopReply(reply)['thread_id'],
child_thread_id)
# The main thread should not be allowed to run, so should not
# modify g_main_thread_var.
self.assertEquals(
ReadUint32(connection, symbols['g_main_thread_var']),
main_thread_val)
# Eventually, the child thread should modify g_child_thread_var.
if (ReadUint32(connection, symbols['g_child_thread_var'])
!= child_thread_val):
break
def Main():
# TODO(mseaborn): Clean up to remove the global variables. They are
# currently here because unittest does not help with making
# parameterised tests.
index = sys.argv.index('--')
args = sys.argv[index + 1:]
# The remaining arguments go to unittest.main().
sys.argv = sys.argv[:index]
global ARCH
global NM_TOOL
global SEL_LDR_COMMAND
ARCH = args.pop(0)
NM_TOOL = args.pop(0)
SEL_LDR_COMMAND = args
unittest.main()
if __name__ == '__main__':
Main()
|
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides the most used functions in a nicely wrapped API.
This module defines a global environment, so that most methods can be
called without the need to specify an environment or a FormulaManager.
Functions trying to access the global environment should use the
method get_global_env(). Keep in mind that the global state of the
environment might lead to inconsistency and unexpected bugs. This is
particularly true for tests. For tests it is recommended to perform an
environment reset in the setUp phase, to be guaranteed that a fresh
environment is used.
"""
# Enable default deprecation warnings!
import warnings
warnings.simplefilter('default')
import pysmt.typing as types
import pysmt.configuration as config
import pysmt.environment
def get_env():
"""Returns the global environment."""
return pysmt.environment.get_env()
def reset_env():
"""Resets the global environment, and returns the new one."""
return pysmt.environment.reset_env()
##### Shortcuts for FormulaManager #####
def get_type(formula):
"""Returns the type of the formula."""
return get_env().stc.get_type(formula)
def simplify(formula):
"""Returns the simplified version of the formula."""
return get_env().simplifier.simplify(formula)
def substitute(formula, subs):
"""Applies the substitutions defined in the dictionary to the formula."""
return get_env().substituter.substitute(formula, subs)
def serialize(formula, threshold=None):
"""Provides a string representing the formula."""
return get_env().serializer.serialize(formula,
threshold=threshold)
def get_free_variables(formula):
"""Returns the simplified version of the formula."""
return get_env().fvo.get_free_variables(formula)
def get_atoms(formula):
"""Returns the set of atoms of the formula."""
return get_env().ao.get_atoms(formula)
def get_formula_size(formula, measure=None):
"""Returns the size of the formula as measured by the given counting type.
See pysmt.oracles.SizeOracle for details.
"""
return get_env().sizeo.get_size(formula, measure)
##### Nodes Creation #####
def ForAll(variables, formula):
r""".. math:: \forall v_1, \cdots, v_n . \varphi(v_1, \cdots, v_n)"""
return get_env().formula_manager.ForAll(variables, formula)
def Exists(variables, formula):
r""".. math:: \exists v_1, \cdots, v_n . \varphi(v_1, \cdots, v_n)"""
return get_env().formula_manager.Exists(variables, formula)
def Function(vname, params):
r""".. math:: vname(p_1, \cdots, p_n)"""
return get_env().formula_manager.Function(vname, params)
def Not(formula):
r""".. math:: \lnot \varphi"""
return get_env().formula_manager.Not(formula)
def Implies(left, right):
r""".. math:: l \rightarrow r"""
return get_env().formula_manager.Implies(left, right)
def Iff(left, right):
r""".. math:: l \leftrightarrow r """
return get_env().formula_manager.Iff(left, right)
def GE(left, right):
r""".. math:: l \ge r"""
return get_env().formula_manager.GE(left, right)
def Minus(left, right):
r""".. math:: l - r """
return get_env().formula_manager.Minus(left, right)
def Times(left, right):
r""".. math:: l * r"""
return get_env().formula_manager.Times(left, right)
def Div(left, right):
r""".. math:: \frac{l}{r}"""
return get_env().formula_manager.Div(left, right)
def Equals(left, right):
r""".. math:: l = r"""
return get_env().formula_manager.Equals(left, right)
def GT(left, right):
r""".. math:: l > r"""
return get_env().formula_manager.GT(left, right)
def LE(left, right):
r""".. math:: l \le r"""
return get_env().formula_manager.LE(left, right)
def LT(left, right):
r""".. math:: l < r"""
return get_env().formula_manager.LT(left, right)
def Ite(iff, left, right):
r""".. math:: \text{ If } i \text{ Then } l \text{ Else } r"""
return get_env().formula_manager.Ite(iff, left, right)
def Symbol(name, typename=types.BOOL):
"""Returns a symbol with the given name and type."""
return get_env().formula_manager.Symbol(name, typename)
def FreshSymbol(typename=types.BOOL, template=None):
"""Returns a symbol with a fresh name and given type."""
return get_env().formula_manager.FreshSymbol(typename, template)
def Int(value):
"""Returns an Integer constant with the given value."""
return get_env().formula_manager.Int(value)
def Bool(value):
"""Returns a Boolean constant with the given value."""
return get_env().formula_manager.Bool(value)
def Real(value):
"""Returns a Real constant with the given value."""
return get_env().formula_manager.Real(value)
def TRUE():
"""Returns the Boolean constant TRUE."""
return get_env().formula_manager.TRUE()
def FALSE():
"""Returns the Boolean constant FALSE."""
return get_env().formula_manager.FALSE()
def And(*args):
r""".. math:: \varphi_0 \land \cdots \land \varphi_n """
return get_env().formula_manager.And(*args)
def Or(*args):
r""".. math:: \varphi_0 \lor \cdots \lor \varphi_n """
return get_env().formula_manager.Or(*args)
def Plus(*args):
r""".. math:: \varphi_0 + \cdots + \varphi_n """
return get_env().formula_manager.Plus(*args)
def ToReal(formula):
"""Explicit cast of a term into a Real term."""
return get_env().formula_manager.ToReal(formula)
def AtMostOne(*args):
"""At most one can be true at anytime.
Cardinality constraint over a set of boolean expressions.
"""
return get_env().formula_manager.AtMostOne(*args)
def ExactlyOne(*args):
"""Given a set of boolean expressions requires that exactly one holds."""
return get_env().formula_manager.ExactlyOne(*args)
def AllDifferent(*args):
"""Given a set of non-boolean expressions, requires that each of them
has value different from all the others
"""
return get_env().formula_manager.AllDifferent(*args)
def Xor(left, right):
"""Returns the XOR of left and right"""
return get_env().formula_manager.Xor(left, right)
def Min(*args):
"""Minimum over a set of real or integer terms."""
return get_env().formula_manager.Min(*args)
def Max(*args):
"""Maximum over a set of real or integer terms"""
return get_env().formula_manager.Max(*args)
def EqualsOrIff(left, right):
"""Returns Equals() or Iff() depending on the type of the arguments.
This can be used to deal with ambiguous cases where we might be
dealing with both Theory and Boolean atoms.
"""
return get_env().formula_manager.EqualsOrIff(left, right)
# Bit Vectors
def BV(value, width=None):
"""Returns a constant of type BitVector.
value can be either:
- a string of 0s and 1s
- a string starting with "#b" followed by a sequence of 0s and 1s
- an integer number s.t. 0 <= value < 2**width
In order to create the BV representation of a signed integer,
the SBV() method shall be used.
"""
return get_env().formula_manager.BV(value, width)
def SBV(value, width=None):
"""Returns a constant of type BitVector interpreting the sign.
If the specified value is an integer, it is converted in the
2-complement representation of the given number, otherwise the
behavior is the same as BV().
"""
return get_env().formula_manager.SBV(value, width)
def BVOne(width=None):
"""Returns the unsigned one constant BitVector."""
return get_env().formula_manager.BVOne(width)
def BVZero(width=None):
"""Returns the zero constant BitVector."""
return get_env().formula_manager.BVZero(width)
def BVNot(formula):
"""Returns the bitvector Not(bv)"""
return get_env().formula_manager.BVNot(formula)
def BVAnd(left, right):
"""Returns the Bit-wise AND of two bitvectors of the same size."""
return get_env().formula_manager.BVAnd(left, right)
def BVOr(left, right):
"""Returns the Bit-wise OR of two bitvectors of the same size."""
return get_env().formula_manager.BVOr(left, right)
def BVXor(left, right):
"""Returns the Bit-wise XOR of two bitvectors of the same size."""
return get_env().formula_manager.BVXor(left, right)
def BVConcat(left, right):
"""Returns the Concatenation of the two BVs"""
return get_env().formula_manager.BVConcat(left, right)
def BVExtract(formula, start=0, end=None):
"""Returns the slice of formula from start to end (inclusive)."""
return get_env().formula_manager.BVExtract(formula, start=start, end=end)
def BVULT(left, right):
"""Returns the formula left < right."""
return get_env().formula_manager.BVULT(left, right)
def BVUGT(left, right):
"""Returns the formula left > right."""
return get_env().formula_manager.BVUGT(left, right)
def BVULE(left, right):
"""Returns the formula left <= right."""
return get_env().formula_manager.BVULE(left, right)
def BVUGE(left, right):
"""Returns the formula left >= right."""
return get_env().formula_manager.BVUGE(left, right)
def BVNeg(formula):
"""Returns the arithmetic negation of the BV."""
return get_env().formula_manager.BVNeg(formula)
def BVAdd(left, right):
"""Returns the sum of two BV."""
return get_env().formula_manager.BVAdd(left, right)
def BVSub(left, right):
"""Returns the difference of two BV."""
return get_env().formula_manager.BVSub(left, right)
def BVMul(left, right):
"""Returns the product of two BV."""
return get_env().formula_manager.BVMul(left, right)
def BVUDiv(left, right):
"""Returns the division of the two BV."""
return get_env().formula_manager.BVUDiv(left, right)
def BVURem(left, right):
"""Returns the reminder of the two BV."""
return get_env().formula_manager.BVURem(left, right)
def BVLShl(left, right):
"""Returns the logical left shift the BV."""
return get_env().formula_manager.BVLShl(left, right)
def BVLShr(left, right):
"""Returns the logical right shift the BV."""
return get_env().formula_manager.BVLShr(left, right)
def BVRol(formula, steps):
"""Returns the LEFT rotation of the BV by the number of steps."""
return get_env().formula_manager.BVRol(formula, steps)
def BVRor(formula, steps):
"""Returns the RIGHT rotation of the BV by the number of steps."""
return get_env().formula_manager.BVRor(formula, steps)
def BVZExt(formula, increase):
"""Returns the extension of the BV
New bits are set to zero.
"""
return get_env().formula_manager.BVZExt(formula, increase)
def BVSExt(formula, increase):
"""Returns the signed extension of the BV
New bits are set according to the most-significant-bit.
"""
return get_env().formula_manager.BVSExt(formula, increase)
def BVSLT(left, right):
"""Returns the SIGNED LOWER-THAN comparison for BV."""
return get_env().formula_manager.BVSLT(left, right)
def BVSLE(left, right):
"""Returns the SIGNED LOWER-THAN-OR-EQUAL-TO comparison for BV."""
return get_env().formula_manager.BVSLE(left, right)
def BVSGT(left, right):
"""Returns the SIGNED GREATER-THAN comparison for BV."""
return get_env().formula_manager.BVSGT(left, right)
def BVSGE(left, right):
"""Returns the SIGNED GREATER-THAN-OR-EQUAL-TO comparison for BV."""
return get_env().formula_manager.BVSGE(left, right)
def BVSDiv(left, right):
"""Returns the SIGNED DIVISION of left by right"""
return get_env().formula_manager.BVSDiv(left, right)
def BVSRem(left, right):
"""Returns the SIGNED REMAINDER of left divided by right"""
return get_env().formula_manager.BVSRem(left, right)
def BVComp(left, right):
"""Returns a BV of size 1 equal to 0 if left is equal to right,
otherwise 1 is returned."""
return get_env().formula_manager.BVComp(left, right)
def BVAShr(left, right):
"""Returns the RIGHT arithmetic rotation of the left BV by the number
of steps specified by the right BV."""
return get_env().formula_manager.BVAShr(left, right)
#### Shortcuts for Solvers Factory #####
def Solver(quantified=False, name=None, logic=None):
"""Returns a solver."""
return get_env().factory.Solver(quantified=quantified,
name=name,
logic=logic)
def UnsatCoreSolver(quantified=False, name=None, logic=None,
unsat_cores_mode="all"):
"""Returns a solver supporting unsat core extraction."""
return get_env().factory.UnsatCoreSolver(quantified=quantified,
name=name,
logic=logic,
unsat_cores_mode=unsat_cores_mode)
def QuantifierEliminator(name=None, logic=None):
"""Returns a quantifier eliminator"""
return get_env().factory.QuantifierEliminator(name=name, logic=logic)
def Interpolator(name=None, logic=None):
"""Returns an interpolator"""
return get_env().factory.Interpolator(name=name, logic=logic)
def is_sat(formula, solver_name=None, logic=None):
""" Returns whether a formula is satisfiable.
:param formula: The formula to check satisfiability
:type formula: FNode
:param solver_name: Specify the name of the solver to be used.
:param logic: Specify the logic that is going to be used.
:returns: Whether the formula is SAT or UNSAT.
:rtype: bool
"""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_sat")
formula = env.formula_manager.normalize(formula)
return env.factory.is_sat(formula,
solver_name=solver_name,
logic=logic)
def get_model(formula, solver_name=None, logic=None):
""" Similar to :py:func:`is_sat` but returns a model if the formula is
satisfiable, otherwise None."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during get_model")
formula = env.formula_manager.normalize(formula)
return env.factory.get_model(formula,
solver_name=solver_name,
logic=logic)
def get_implicant(formula, solver_name=None, logic=None):
"""Returns a formula f_i such that Implies(f_i, formula) is valid or None
if formula is unsatisfiable.
if complete is set to true, all the variables appearing in the
formula are forced to appear in f_i.
"""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during get_model")
formula = env.formula_manager.normalize(formula)
return env.factory.get_implicant(formula,
solver_name=solver_name,
logic=logic)
def get_unsat_core(clauses, solver_name=None, logic=None):
"""Similar to :py:func:`get_model` but returns the unsat core of the
conjunction of the input clauses"""
env = get_env()
if any(c not in env.formula_manager for c in clauses):
warnings.warn("Warning: Contextualizing formula during get_model")
clauses = [env.formula_manager.normalize(c) for c in clauses]
return env.factory.get_unsat_core(clauses,
solver_name=solver_name,
logic=logic)
def is_valid(formula, solver_name=None, logic=None):
"""Similar to :py:func:`is_sat` but checks validity."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_valid")
formula = env.formula_manager.normalize(formula)
return env.factory.is_valid(formula,
solver_name=solver_name,
logic=logic)
def is_unsat(formula, solver_name=None, logic=None):
"""Similar to :py:func:`is_sat` but checks unsatisfiability."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_unsat")
formula = env.formula_manager.normalize(formula)
return env.factory.is_unsat(formula,
solver_name=solver_name,
logic=logic)
def qelim(formula, solver_name=None, logic=None):
"""Performs quantifier elimination of the given formula."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_unsat")
formula = env.formula_manager.normalize(formula)
return env.factory.qelim(formula,
solver_name=solver_name,
logic=logic)
def binary_interpolant(formula_a, formula_b, solver_name=None, logic=None):
"""Computes an interpolant of (formula_a, formula_b). Returns None
if the conjunction is satisfiable"""
env = get_env()
formulas = [formula_a, formula_b]
for i, f in enumerate(formulas):
if f not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during "
"binary_interpolant")
formulas[i] = env.formula_manager.normalize(f)
return env.factory.binary_interpolant(formulas[0], formulas[1],
solver_name=solver_name,
logic=logic)
def sequence_interpolant(formulas, solver_name=None, logic=None):
"""Computes a sequence interpolant of the formulas. Returns None
if the conjunction is satisfiable"""
env = get_env()
formulas = list(formulas)
for i, f in enumerate(formulas):
if f not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during "
"sequence_interpolant")
formulas[i] = env.formula_manager.normalize(f)
return env.factory.sequence_interpolant(formulas,
solver_name=solver_name,
logic=logic)
def read_configuration(config_filename, environment=None):
"""
Reads the pysmt configuration of the given file path and applies
it on the specified environment. If no environment is specified,
the top-level environment will be used.
"""
if environment is None:
environment = get_env()
config.configure_environment(config_filename, environment)
def write_configuration(config_filename, environment=None):
"""
Dumps the current pysmt configuration to the specified file path
"""
if environment is None:
environment = get_env()
config.write_environment_configuration(config_filename, environment)
|
|
# -*- coding: UTF-8 -*-
"""Easy to use object-oriented thread pool framework.
A thread pool is an object that maintains a pool of worker threads to perform
time consuming operations in parallel. It assigns jobs to the threads
by putting them in a work request queue, where they are picked up by the
next available thread. This then performs the requested operation in the
background and puts the results in another queue.
The thread pool object can then collect the results from all threads from
this queue as soon as they become available or after all threads have
finished their work. It's also possible, to define callbacks to handle
each result as it comes in.
The basic concept and some code was taken from the book "Python in a Nutshell,
2nd edition" by Alex Martelli, O'Reilly 2006, ISBN 0-596-10046-9, from section
14.5 "Threaded Program Architecture". I wrapped the main program logic in the
ThreadPool class, added the WorkRequest class and the callback system and
tweaked the code here and there. Kudos also to Florent Aide for the exception
handling mechanism.
Basic usage::
>>> pool = ThreadPool(poolsize)
>>> requests = makeRequests(some_callable, list_of_args, callback)
>>> [pool.putRequest(req) for req in requests]
>>> pool.wait()
See the end of the module code for a brief, annotated usage example.
Website : http://chrisarndt.de/projects/threadpool/
"""
__docformat__ = "restructuredtext en"
__all__ = [
'makeRequests',
'NoResultsPending',
'NoWorkersAvailable',
'ThreadPool',
'WorkRequest',
'WorkerThread'
]
__author__ = "Christopher Arndt"
__version__ = '1.2.7'
__revision__ = "$Revision: 416 $"
__date__ = "$Date: 2009-10-07 05:41:27 +0200 (Wed, 07 Oct 2009) $"
__license__ = "MIT license"
# standard library modules
import sys
import threading
import Queue
import traceback
# exceptions
class NoResultsPending(Exception):
"""All work requests have been processed."""
pass
class NoWorkersAvailable(Exception):
"""No worker threads available to process remaining requests."""
pass
# internal module helper functions
def _handle_thread_exception(request, exc_info):
"""Default exception handler callback function.
This just prints the exception info via ``traceback.print_exception``.
"""
traceback.print_exception(*exc_info)
# utility functions
def makeRequests(callable_, args_list, callback=None,
exc_callback=_handle_thread_exception):
"""Create several work requests for same callable with different arguments.
Convenience function for creating several work requests for the same
callable where each invocation of the callable receives different values
for its arguments.
``args_list`` contains the parameters for each invocation of callable.
Each item in ``args_list`` should be either a 2-item tuple of the list of
positional arguments and a dictionary of keyword arguments or a single,
non-tuple argument.
See docstring for ``WorkRequest`` for info on ``callback`` and
``exc_callback``.
"""
requests = []
for item in args_list:
if isinstance(item, tuple):
requests.append(
WorkRequest(callable_, item[0], item[1], callback=callback,
exc_callback=exc_callback)
)
else:
requests.append(
WorkRequest(callable_, [item], None, callback=callback,
exc_callback=exc_callback)
)
return requests
# classes
class WorkerThread(threading.Thread):
"""Background thread connected to the requests/results queues.
A worker thread sits in the background and picks up work requests from
one queue and puts the results in another until it is dismissed.
"""
def __init__(self, requests_queue, results_queue, poll_timeout=5, **kwds):
"""Set up thread in daemonic mode and start it immediatedly.
``requests_queue`` and ``results_queue`` are instances of
``Queue.Queue`` passed by the ``ThreadPool`` class when it creates a new
worker thread.
"""
threading.Thread.__init__(self, **kwds)
self.setDaemon(1)
self._requests_queue = requests_queue
self._results_queue = results_queue
self._poll_timeout = poll_timeout
self._dismissed = threading.Event()
self.start()
def run(self):
"""Repeatedly process the job queue until told to exit."""
while True:
if self._dismissed.isSet():
# we are dismissed, break out of loop
break
# get next work request. If we don't get a new request from the
# queue after self._poll_timout seconds, we jump to the start of
# the while loop again, to give the thread a chance to exit.
try:
request = self._requests_queue.get(True, self._poll_timeout)
except Queue.Empty:
continue
else:
if self._dismissed.isSet():
# we are dismissed, put back request in queue and exit loop
self._requests_queue.put(request)
break
try:
result = request.callable(*request.args, **request.kwds)
self._results_queue.put((request, result))
except:
request.exception = True
self._results_queue.put((request, sys.exc_info()))
def dismiss(self):
"""Sets a flag to tell the thread to exit when done with current job."""
self._dismissed.set()
class WorkRequest:
"""A request to execute a callable for putting in the request queue later.
See the module function ``makeRequests`` for the common case
where you want to build several ``WorkRequest`` objects for the same
callable but with different arguments for each call.
"""
def __init__(self, callable_, args=None, kwds=None, requestID=None,
callback=None, exc_callback=_handle_thread_exception):
"""Create a work request for a callable and attach callbacks.
A work request consists of the a callable to be executed by a
worker thread, a list of positional arguments, a dictionary
of keyword arguments.
A ``callback`` function can be specified, that is called when the
results of the request are picked up from the result queue. It must
accept two anonymous arguments, the ``WorkRequest`` object and the
results of the callable, in that order. If you want to pass additional
information to the callback, just stick it on the request object.
You can also give custom callback for when an exception occurs with
the ``exc_callback`` keyword parameter. It should also accept two
anonymous arguments, the ``WorkRequest`` and a tuple with the exception
details as returned by ``sys.exc_info()``. The default implementation
of this callback just prints the exception info via
``traceback.print_exception``. If you want no exception handler
callback, just pass in ``None``.
``requestID``, if given, must be hashable since it is used by
``ThreadPool`` object to store the results of that work request in a
dictionary. It defaults to the return value of ``id(self)``.
"""
if requestID is None:
self.requestID = id(self)
else:
try:
self.requestID = hash(requestID)
except TypeError:
raise TypeError("requestID must be hashable.")
self.exception = False
self.callback = callback
self.exc_callback = exc_callback
self.callable = callable_
self.args = args or []
self.kwds = kwds or {}
def __str__(self):
return "<WorkRequest id=%s args=%r kwargs=%r exception=%s>" % \
(self.requestID, self.args, self.kwds, self.exception)
class ThreadPool:
"""A thread pool, distributing work requests and collecting results.
See the module docstring for more information.
"""
def __init__(self, num_workers, q_size=0, resq_size=0, poll_timeout=5):
"""Set up the thread pool and start num_workers worker threads.
``num_workers`` is the number of worker threads to start initially.
If ``q_size > 0`` the size of the work *request queue* is limited and
the thread pool blocks when the queue is full and it tries to put
more work requests in it (see ``putRequest`` method), unless you also
use a positive ``timeout`` value for ``putRequest``.
If ``resq_size > 0`` the size of the *results queue* is limited and the
worker threads will block when the queue is full and they try to put
new results in it.
.. warning:
If you set both ``q_size`` and ``resq_size`` to ``!= 0`` there is
the possibilty of a deadlock, when the results queue is not pulled
regularly and too many jobs are put in the work requests queue.
To prevent this, always set ``timeout > 0`` when calling
``ThreadPool.putRequest()`` and catch ``Queue.Full`` exceptions.
"""
self._requests_queue = Queue.Queue(q_size)
self._results_queue = Queue.Queue(resq_size)
self.workers = []
self.dismissedWorkers = []
self.workRequests = {}
self.createWorkers(num_workers, poll_timeout)
def createWorkers(self, num_workers, poll_timeout=5):
"""Add num_workers worker threads to the pool.
``poll_timout`` sets the interval in seconds (int or float) for how
ofte threads should check whether they are dismissed, while waiting for
requests.
"""
for i in range(num_workers):
self.workers.append(WorkerThread(self._requests_queue,
self._results_queue, poll_timeout=poll_timeout))
def dismissWorkers(self, num_workers, do_join=False):
"""Tell num_workers worker threads to quit after their current task."""
dismiss_list = []
for i in range(min(num_workers, len(self.workers))):
worker = self.workers.pop()
worker.dismiss()
dismiss_list.append(worker)
if do_join:
for worker in dismiss_list:
worker.join()
else:
self.dismissedWorkers.extend(dismiss_list)
def joinAllDismissedWorkers(self):
"""Perform Thread.join() on all worker threads that have been dismissed.
"""
for worker in self.dismissedWorkers:
worker.join()
self.dismissedWorkers = []
def putRequest(self, request, block=True, timeout=None):
"""Put work request into work queue and save its id for later."""
assert isinstance(request, WorkRequest)
# don't reuse old work requests
assert not getattr(request, 'exception', None)
self._requests_queue.put(request, block, timeout)
self.workRequests[request.requestID] = request
def poll(self, block=False):
"""Process any new results in the queue."""
while True:
# still results pending?
if not self.workRequests:
raise NoResultsPending
# are there still workers to process remaining requests?
elif block and not self.workers:
raise NoWorkersAvailable
try:
# get back next results
request, result = self._results_queue.get(block=block)
# has an exception occured?
if request.exception and request.exc_callback:
request.exc_callback(request, result)
# hand results to callback, if any
if request.callback and not \
(request.exception and request.exc_callback):
request.callback(request, result)
del self.workRequests[request.requestID]
except Queue.Empty:
break
def wait(self):
"""Wait for results, blocking until all have arrived."""
while 1:
try:
self.poll(True)
except NoResultsPending:
break
################
# USAGE EXAMPLE
################
if __name__ == '__main__':
import random
import time
# the work the threads will have to do (rather trivial in our example)
def do_something(data):
time.sleep(random.randint(1,5))
result = round(random.random() * data, 5)
# just to show off, we throw an exception once in a while
if result > 5:
raise RuntimeError("Something extraordinary happened!")
return result
# this will be called each time a result is available
def print_result(request, result):
print "**** Result from request #%s: %r" % (request.requestID, result)
# this will be called when an exception occurs within a thread
# this example exception handler does little more than the default handler
def handle_exception(request, exc_info):
if not isinstance(exc_info, tuple):
# Something is seriously wrong...
print request
print exc_info
raise SystemExit
print "**** Exception occured in request #%s: %s" % \
(request.requestID, exc_info)
# assemble the arguments for each job to a list...
data = [random.randint(1,10) for i in range(20)]
# ... and build a WorkRequest object for each item in data
requests = makeRequests(do_something, data, print_result, handle_exception)
# to use the default exception handler, uncomment next line and comment out
# the preceding one.
#requests = makeRequests(do_something, data, print_result)
# or the other form of args_lists accepted by makeRequests: ((,), {})
data = [((random.randint(1,10),), {}) for i in range(20)]
requests.extend(
makeRequests(do_something, data, print_result, handle_exception)
#makeRequests(do_something, data, print_result)
# to use the default exception handler, uncomment next line and comment
# out the preceding one.
)
# we create a pool of 3 worker threads
print "Creating thread pool with 3 worker threads."
main = ThreadPool(3)
# then we put the work requests in the queue...
for req in requests:
main.putRequest(req)
print "Work request #%s added." % req.requestID
# or shorter:
# [main.putRequest(req) for req in requests]
# ...and wait for the results to arrive in the result queue
# by using ThreadPool.wait(). This would block until results for
# all work requests have arrived:
# main.wait()
# instead we can poll for results while doing something else:
i = 0
while True:
try:
time.sleep(0.5)
main.poll()
print "Main thread working...",
print "(active worker threads: %i)" % (threading.activeCount()-1, )
if i == 10:
print "**** Adding 3 more worker threads..."
main.createWorkers(3)
if i == 20:
print "**** Dismissing 2 worker threads..."
main.dismissWorkers(2)
i += 1
except KeyboardInterrupt:
print "**** Interrupted!"
break
except NoResultsPending:
print "**** No pending results."
break
if main.dismissedWorkers:
print "Joining all dismissed worker threads..."
main.joinAllDismissedWorkers()
|
|
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
import numpy as np
from .constants import FIFF
from .tag import find_tag, has_tag, read_tag
from .tree import dir_tree_find
from ..utils import logger, verbose
def hex2dec(s):
return int(s, 16)
def _read_named_matrix(fid, node, matkind):
"""read_named_matrix(fid,node)
Read named matrix from the given node
Parameters
----------
fid : file
The file descriptor
node : dict
Node
matkind : mat kind
XXX
Returns
-------
mat : dict
The matrix with row and col names.
"""
# Descend one level if necessary
if node['block'] != FIFF.FIFFB_MNE_NAMED_MATRIX:
for k in range(node['nchild']):
if node['children'][k]['block'] == FIFF.FIFFB_MNE_NAMED_MATRIX:
if has_tag(node['children'][k], matkind):
node = node['children'][k]
break
else:
raise ValueError('Desired named matrix (kind = %d) not'
' available' % matkind)
else:
if not has_tag(node, matkind):
raise ValueError('Desired named matrix (kind = %d) not available'
% matkind)
# Read everything we need
tag = find_tag(fid, node, matkind)
if tag is None:
raise ValueError('Matrix data missing')
else:
data = tag.data
nrow, ncol = data.shape
tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
if tag is not None:
if tag.data != nrow:
raise ValueError('Number of rows in matrix data and '
'FIFF_MNE_NROW tag do not match')
tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
if tag is not None:
if tag.data != ncol:
raise ValueError('Number of columns in matrix data and '
'FIFF_MNE_NCOL tag do not match')
tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
if tag is not None:
row_names = tag.data
else:
row_names = None
tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES)
if tag is not None:
col_names = tag.data
else:
col_names = None
# Put it together
mat = dict(nrow=nrow, ncol=ncol)
if row_names is not None:
mat['row_names'] = row_names.split(':')
else:
mat['row_names'] = None
if col_names is not None:
mat['col_names'] = col_names.split(':')
else:
mat['col_names'] = None
mat['data'] = data.astype(np.float)
return mat
@verbose
def read_ctf_comp(fid, node, chs, verbose=None):
"""Read the CTF software compensation data from the given node
Parameters
----------
fid : file
The file descriptor.
node : dict
The node in the FIF tree.
chs : list
The list of channels # XXX unclear.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
compdata : list
The compensation data
"""
compdata = []
comps = dir_tree_find(node, FIFF.FIFFB_MNE_CTF_COMP_DATA)
for node in comps:
# Read the data we need
mat = _read_named_matrix(fid, node, FIFF.FIFF_MNE_CTF_COMP_DATA)
for p in range(node['nent']):
kind = node['directory'][p].kind
pos = node['directory'][p].pos
if kind == FIFF.FIFF_MNE_CTF_COMP_KIND:
tag = read_tag(fid, pos)
break
else:
raise Exception('Compensation type not found')
# Get the compensation kind and map it to a simple number
one = dict(ctfkind=tag.data)
del tag
if one['ctfkind'] == int('47314252', 16): # hex2dec('47314252'):
one['kind'] = 1
elif one['ctfkind'] == int('47324252', 16): # hex2dec('47324252'):
one['kind'] = 2
elif one['ctfkind'] == int('47334252', 16): # hex2dec('47334252'):
one['kind'] = 3
else:
one['kind'] = int(one['ctfkind'])
for p in range(node['nent']):
kind = node['directory'][p].kind
pos = node['directory'][p].pos
if kind == FIFF.FIFF_MNE_CTF_COMP_CALIBRATED:
tag = read_tag(fid, pos)
calibrated = tag.data
break
else:
calibrated = False
one['save_calibrated'] = calibrated
one['rowcals'] = np.ones(mat['data'].shape[0], dtype=np.float)
one['colcals'] = np.ones(mat['data'].shape[1], dtype=np.float)
row_cals, col_cals = None, None # initialize cals
if not calibrated:
#
# Calibrate...
#
# Do the columns first
#
ch_names = [c['ch_name'] for c in chs]
col_cals = np.zeros(mat['data'].shape[1], dtype=np.float)
for col in range(mat['data'].shape[1]):
p = ch_names.count(mat['col_names'][col])
if p == 0:
raise Exception('Channel %s is not available in data'
% mat['col_names'][col])
elif p > 1:
raise Exception('Ambiguous channel %s' %
mat['col_names'][col])
idx = ch_names.index(mat['col_names'][col])
col_cals[col] = 1.0 / (chs[idx]['range'] * chs[idx]['cal'])
# Then the rows
row_cals = np.zeros(mat['data'].shape[0])
for row in range(mat['data'].shape[0]):
p = ch_names.count(mat['row_names'][row])
if p == 0:
raise Exception('Channel %s is not available in data'
% mat['row_names'][row])
elif p > 1:
raise Exception('Ambiguous channel %s' %
mat['row_names'][row])
idx = ch_names.index(mat['row_names'][row])
row_cals[row] = chs[idx]['range'] * chs[idx]['cal']
mat['data'] = row_cals[:, None] * mat['data'] * col_cals[None, :]
one['rowcals'] = row_cals
one['colcals'] = col_cals
one['data'] = mat
compdata.append(one)
if row_cals is not None:
del row_cals
if col_cals is not None:
del col_cals
if len(compdata) > 0:
logger.info(' Read %d compensation matrices' % len(compdata))
return compdata
###############################################################################
# Writing
from .write import start_block, end_block, write_int
from .matrix import write_named_matrix
def write_ctf_comp(fid, comps):
"""Write the CTF compensation data into a fif file
Parameters
----------
fid : file
The open FIF file descriptor
comps : list
The compensation data to write
"""
if len(comps) <= 0:
return
# This is very simple in fact
start_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
for comp in comps:
start_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
# Write the compensation kind
write_int(fid, FIFF.FIFF_MNE_CTF_COMP_KIND, comp['ctfkind'])
write_int(fid, FIFF.FIFF_MNE_CTF_COMP_CALIBRATED,
comp['save_calibrated'])
if not comp['save_calibrated']:
# Undo calibration
comp = deepcopy(comp)
data = ((1. / comp['rowcals'][:, None]) * comp['data']['data']
* (1. / comp['colcals'][None, :]))
comp['data']['data'] = data
write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp['data'])
end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
end_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
|
|
from __future__ import unicode_literals
import json
from decimal import Decimal
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from django.core.cache import caches
from dynamic_preferences.registries import (
global_preferences_registry as registry
)
from dynamic_preferences.users.registries import (
user_preferences_registry as user_registry
)
from dynamic_preferences.api import serializers
from dynamic_preferences.users.serializers import UserPreferenceSerializer
class BaseTest(object):
def tearDown(self):
caches['default'].clear()
class TestSerializers(BaseTest, TestCase):
def test_can_serialize_preference(self):
manager = registry.manager()
pref = manager.get_db_pref(section='user', name='max_users')
serializer = serializers.GlobalPreferenceSerializer(pref)
data = serializer.data
self.assertEqual(data['default'], pref.preference.api_repr(pref.preference.default))
self.assertEqual(data['value'], pref.preference.api_repr(pref.value))
self.assertEqual(data['identifier'], pref.preference.identifier())
self.assertEqual(data['section'], pref.section)
self.assertEqual(data['name'], pref.name)
self.assertEqual(data['verbose_name'], pref.preference.verbose_name)
self.assertEqual(data['help_text'], pref.preference.help_text)
self.assertEqual(data['field']['class'], 'IntegerField')
self.assertEqual(data['field']['input_type'], 'number')
self.assertEqual(data['field']['widget']['class'], 'NumberInput')
pref = manager.get_db_pref(section='exam', name='duration')
serializer = serializers.GlobalPreferenceSerializer(pref)
data = serializer.data
self.assertEqual(data['value'], '03:00:00')
pref = manager.get_db_pref(section='company', name='RegistrationDate')
serializer = serializers.GlobalPreferenceSerializer(pref)
data = serializer.data
self.assertEqual(data['value'], '1998-09-04')
pref = manager.get_db_pref(section='child', name='BirthDateTime')
serializer = serializers.GlobalPreferenceSerializer(pref)
data = serializer.data
self.assertEqual(data['value'], '1992-05-04T03:04:10.000150+00:00')
def test_can_change_preference_value_using_serializer(self):
manager = registry.manager()
pref = manager.get_db_pref(section='user', name='max_users')
data = {'value': 666}
serializer = serializers.GlobalPreferenceSerializer(pref, data=data)
is_valid = serializer.is_valid()
self.assertTrue(is_valid)
serializer.save()
pref = manager.get_db_pref(section='user', name='max_users')
self.assertEqual(pref.value, data['value'])
def test_serializer_also_uses_custom_clean_method(self):
manager = registry.manager()
pref = manager.get_db_pref(section='user', name='max_users')
# will fail because of preference cleaning
data = {'value': 1001}
serializer = serializers.GlobalPreferenceSerializer(pref, data=data)
is_valid = serializer.is_valid()
self.assertFalse(is_valid)
self.assertIn('value', serializer.errors)
def test_serializer_includes_additional_data_if_any(self):
user = User(
username="user",
email="[email protected]")
user.set_password('test')
user.save()
manager = user_registry.manager(instance=user)
pref = manager.get_db_pref(
section='user', name='favorite_vegetable')
serializer = UserPreferenceSerializer(pref)
self.assertEqual(
serializer.data['additional_data']['choices'],
pref.preference.choices)
class TestViewSets(BaseTest, TestCase):
def setUp(self):
self.admin = User(
username="admin",
email="[email protected]",
is_superuser=True,
is_staff=True)
self.admin.set_password('test')
self.admin.save()
def test_global_preference_list_requires_permission(self):
url = reverse('api:global-list')
# anonymous
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
# not authorized
user = User(
username="user",
email="[email protected]")
user.set_password('test')
user.save()
self.client.login(username='test', password='test')
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_can_list_preferences(self):
manager = registry.manager()
url = reverse('api:global-list')
self.client.login(username='admin', password="test")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
payload = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(payload), len(registry.preferences()))
for e in payload:
pref = manager.get_db_pref(section=e['section'], name=e['name'])
serializer = serializers.GlobalPreferenceSerializer(pref)
self.assertEqual(pref.preference.identifier(), e['identifier'])
def test_can_list_preferences_with_section_filter(self):
manager = registry.manager()
url = reverse('api:global-list')
self.client.login(username='admin', password="test")
response = self.client.get(url, {'section': 'user'})
self.assertEqual(response.status_code, 200)
payload = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(payload), len(registry.preferences('user')))
for e in payload:
pref = manager.get_db_pref(section=e['section'], name=e['name'])
serializers.GlobalPreferenceSerializer(pref)
self.assertEqual(pref.preference.identifier(), e['identifier'])
def test_can_detail_preference(self):
manager = registry.manager()
pref = manager.get_db_pref(section='user', name='max_users')
url = reverse(
'api:global-detail',
kwargs={'pk': pref.preference.identifier()})
self.client.login(username='admin', password="test")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
payload = json.loads(response.content.decode('utf-8'))
self.assertEqual(pref.preference.identifier(), payload['identifier'])
self.assertEqual(pref.value, payload['value'])
def test_can_update_preference(self):
manager = registry.manager()
pref = manager.get_db_pref(section='user', name='max_users')
url = reverse(
'api:global-detail',
kwargs={'pk': pref.preference.identifier()})
self.client.login(username='admin', password="test")
response = self.client.patch(
url, json.dumps({'value': 16}), content_type='application/json')
self.assertEqual(response.status_code, 200)
pref = manager.get_db_pref(section='user', name='max_users')
self.assertEqual(pref.value, 16)
def test_can_update_decimal_preference(self):
manager = registry.manager()
pref = manager.get_db_pref(section='type', name='cost')
url = reverse(
'api:global-detail',
kwargs={'pk': pref.preference.identifier()})
self.client.login(username='admin', password="test")
response = self.client.patch(
url, json.dumps({'value': '111.11'}), content_type='application/json')
self.assertEqual(response.status_code, 200)
pref = manager.get_db_pref(section='type', name='cost')
self.assertEqual(pref.value, Decimal('111.11'))
def test_can_update_multiple_preferences(self):
manager = registry.manager()
pref = manager.get_db_pref(section='user', name='max_users')
url = reverse('api:global-bulk')
self.client.login(username='admin', password="test")
payload = {
'user__max_users': 16,
'user__registration_allowed': True,
}
response = self.client.post(
url, json.dumps(payload), content_type='application/json')
self.assertEqual(response.status_code, 200)
pref1 = manager.get_db_pref(section='user', name='max_users')
pref2 = manager.get_db_pref(
section='user', name='registration_allowed')
self.assertEqual(pref1.value, 16)
self.assertEqual(pref2.value, True)
def test_update_preference_returns_validation_error(self):
manager = registry.manager()
pref = manager.get_db_pref(section='user', name='max_users')
url = reverse(
'api:global-detail',
kwargs={'pk': pref.preference.identifier()})
self.client.login(username='admin', password="test")
response = self.client.patch(
url, json.dumps({'value': 1001}), content_type='application/json')
self.assertEqual(response.status_code, 400)
payload = json.loads(response.content.decode('utf-8'))
self.assertEqual(payload['value'], ['Wrong value!'])
def test_update_multiple_preferences_with_validation_errors_rollback(self):
manager = registry.manager()
pref = manager.get_db_pref(section='user', name='max_users')
url = reverse('api:global-bulk')
self.client.login(username='admin', password="test")
payload = {
'user__max_users': 1001,
'user__registration_allowed': True,
}
response = self.client.post(
url, json.dumps(payload), content_type='application/json')
self.assertEqual(response.status_code, 400)
errors = json.loads(response.content.decode('utf-8'))
self.assertEqual(
errors[pref.preference.identifier()]['value'], ['Wrong value!'])
pref1 = manager.get_db_pref(section='user', name='max_users')
pref2 = manager.get_db_pref(
section='user', name='registration_allowed')
self.assertEqual(pref1.value, pref1.preference.default)
self.assertEqual(pref2.value, pref2.preference.default)
|
|
#! /usr/bin/env python
from __future__ import absolute_import as _absolute_import
'''
Copyright 2015 Tim Nonner
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import time, os, copy, collections
def solve(scenario,time_limit=None,copy_scenario=False,msg=0) :
"""
Integration of the ortools scheduling solver
"""
try :
from ortools.constraint_solver import pywrapcp
except :
raise Exception('ERROR: ortools is not installed')
S = scenario
if copy_scenario :
S = copy.deepcopy(scenario)
ort_solver = pywrapcp.Solver(S.name)
# tasks
task_to_interval = collections.OrderedDict()
resource_to_intervals = resource_to_intervals = { R : list() for R in S.resources() }
resource_task_to_interval = collections.OrderedDict()
for T in S.tasks() :
I = ort_solver.FixedDurationIntervalVar(0,S.horizon-T.length,T.length,False,T.name)
task_to_interval[T] = I
# resourcee requirements
for T in S.tasks():
I = task_to_interval[T]
for RA in T.resources_req:
RA_tasks = list()
for R in RA :
I_ = ort_solver.FixedDurationIntervalVar(0,S.horizon-T.length,T.length,True,T.name+'_'+R.name)
resource_to_intervals[R].append(I_)
RA_tasks.append(I_)
resource_task_to_interval[(R,T)] = I_
ort_solver.Add( I.StaysInSync(I_) )
# if resources are fixed
if T.resources is not None and R in T.resources :
ort_solver.Add( I_.PerformedExpr() == 1 )
# one resource needs to get selected
ort_solver.Add(ort_solver.Sum([ I_.PerformedExpr() for I_ in RA_tasks ]) == 1)
ra_to_tasks = S.resources_req_tasks()
for RA in ra_to_tasks:
tasks = list(ra_to_tasks[RA])
T = tasks[0]
for T_ in tasks[1:] :
for R in RA :
I = resource_task_to_interval[(R,T)]
I_ = resource_task_to_interval[(R,T_)]
ort_solver.Add( I.PerformedExpr() == I_.PerformedExpr() )
# resources
sequences = collections.OrderedDict()
for R in S.resources() :
disj = ort_solver.DisjunctiveConstraint(resource_to_intervals[R],R.name)
sequences[R] = disj.SequenceVar()
ort_solver.Add(disj)
# move objective
# TODO: bug, variables that are not part of the objective might not be finally defined
ort_objective_var = ort_solver.Sum([ task_to_interval[T].EndExpr()*T.delay_cost
for T in S.tasks() if T in task_to_interval
and 'delay_cost' in T ])
ort_objective = ort_solver.Minimize(ort_objective_var, 1)
# precedences lax
for P in S.precs_lax() :
ort_solver.Add( task_to_interval[P.task_right].StartsAfterEnd( task_to_interval[P.task_left] ) )
# TODO: add offset, but this requires DependecyGraph which is not exposed via swig?
# precedences tight
for P in S.precs_tight() :
ort_solver.Add( task_to_interval[P.task_right].StartsAtEnd( task_to_interval[P.task_left] ) )
# TODO: add offset, but this requires DependecyGraph which is not exposed via swig?
# bound low
for P in S.bounds_low() :
ort_solver.Add( task_to_interval[P.task].StartsAfter(P.bound) )
# bound up
for P in S.bounds_up() :
ort_solver.Add( task_to_interval[P.task].StartsBefore(P.bound) )
# tight bound low
for P in S.bounds_low_tight() :
ort_solver.Add( task_to_interval[P.task].StartsAt(P.bound) )
# tight bound up
for P in S.bounds_up_tight() :
ort_solver.Add( task_to_interval[P.task].EndsAt(P.bound) )
# capacity lower bounds
for C in S.capacity_low():
# ignore sliced capacity constraints
if C._start != None or C._end != None:
continue
R = C.resource
cap_tasks = [ (resource_task_to_interval[R,T],C.weight(T=T,t=0)) for T in S.tasks() ]
ort_solver.Add( ort_solver.Sum([ I.PerformedExpr()*w for I,w in cap_tasks ]) >= C.bound )
# capacity lower bounds
for C in S.capacity_low():
# ignore sliced capacity constraints
if C._start != None or C._end != None:
continue
R = C.resource
cap_tasks = [ (resource_task_to_interval[R,T],C.weight(T=T,t=0)) for T in S.tasks() ]
ort_solver.Add( ort_solver.Sum([ I.PerformedExpr()*w for I,w in cap_tasks ]) <= C.bound )
# creates search phases.
vars_phase = ort_solver.Phase([ort_objective_var],
ort_solver.CHOOSE_FIRST_UNBOUND,
ort_solver.ASSIGN_MIN_VALUE)
sequence_phase = ort_solver.Phase(list(sequences.values()),
ort_solver.SEQUENCE_DEFAULT)
main_phase = ort_solver.Compose([sequence_phase, vars_phase])
# creates the search log.
search_log = ort_solver.SearchLog(100, ort_objective_var)
# collect solution
solution = ort_solver.Assignment()
for T in S.tasks() :
solution.Add(task_to_interval[T])
for R in S.resources() :
for I in resource_to_intervals[R] :
solution.Add(I)
collector = ort_solver.LastSolutionCollector(solution)
# set limits (time limit im ms)
if time_limit :
ort_time_limit = int(time_limit*1000)
else :
ort_time_limit = 100000000
branch_limit = 100000000
failures_limit = 100000000
solutions_limit = 10000000
limits = (ort_solver.Limit(ort_time_limit, branch_limit, failures_limit, solutions_limit, True))
# add log if mst is requested
search_params = [limits,collector,ort_objective]
if msg :
search_params.append(search_log)
# solves the problem.
ort_solver.Solve(main_phase,search_params)
# check for a solution
if not collector.SolutionCount():
if msg:
print('ERROR: no solution found')
return 0
solution = collector.Solution(0)
# read last solution
for T in S.tasks() :
T.start_value = int(solution.StartMin(task_to_interval[T])) #collector.StartValue(0, task_to_interval[T])
T.resources = [ R \
for RA in T.resources_req for R in RA \
if collector.PerformedValue(0,resource_task_to_interval[(R,T)]) == 1 ]
return 1
|
|
"""
SleekXMPP: The Sleek XMPP Library
Implementation of xeps for Internet of Things
http://wiki.xmpp.org/web/Tech_pages/IoT_systems
Copyright (C) 2013 Sustainable Innovation, [email protected], [email protected]
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import datetime
import logging
class Device(object):
"""
Example implementation of a device readout object.
Is registered in the XEP_0323.register_node call
The device object may be any custom implementation to support
specific devices, but it must implement the functions:
has_field
request_fields
"""
def __init__(self, nodeId, fields={}):
self.nodeId = nodeId
self.fields = fields # see fields described below
# {'type':'numeric',
# 'name':'myname',
# 'value': 42,
# 'unit':'Z'}];
self.timestamp_data = {}
self.momentary_data = {}
self.momentary_timestamp = ""
logging.debug("Device object started nodeId %s",nodeId)
def has_field(self, field):
"""
Returns true if the supplied field name exists in this device.
Arguments:
field -- The field name
"""
if field in self.fields.keys():
return True;
return False;
def refresh(self, fields):
"""
override method to do the refresh work
refresh values from hardware or other
"""
pass
def request_fields(self, fields, flags, session, callback):
"""
Starts a data readout. Verifies the requested fields,
refreshes the data (if needed) and calls the callback
with requested data.
Arguments:
fields -- List of field names to readout
flags -- [optional] data classifier flags for the field, e.g. momentary
Formatted as a dictionary like { "flag name": "flag value" ... }
session -- Session id, only used in the callback as identifier
callback -- Callback function to call when data is available.
The callback function must support the following arguments:
session -- Session id, as supplied in the request_fields call
nodeId -- Identifier for this device
result -- The current result status of the readout. Valid values are:
"error" - Readout failed.
"fields" - Contains readout data.
"done" - Indicates that the readout is complete. May contain
readout data.
timestamp_block -- [optional] Only applies when result != "error"
The readout data. Structured as a dictionary:
{
timestamp: timestamp for this datablock,
fields: list of field dictionary (one per readout field).
readout field dictionary format:
{
type: The field type (numeric, boolean, dateTime, timeSpan, string, enum)
name: The field name
value: The field value
unit: The unit of the field. Only applies to type numeric.
dataType: The datatype of the field. Only applies to type enum.
flags: [optional] data classifier flags for the field, e.g. momentary
Formatted as a dictionary like { "flag name": "flag value" ... }
}
}
error_msg -- [optional] Only applies when result == "error".
Error details when a request failed.
"""
logging.debug("request_fields called looking for fields %s",fields)
if len(fields) > 0:
# Check availiability
for f in fields:
if f not in self.fields.keys():
self._send_reject(session, callback)
return False;
else:
# Request all fields
fields = self.fields.keys();
# Refresh data from device
# ...
logging.debug("about to refresh device fields %s",fields)
self.refresh(fields)
if "momentary" in flags and flags['momentary'] == "true" or \
"all" in flags and flags['all'] == "true":
ts_block = {};
timestamp = "";
if len(self.momentary_timestamp) > 0:
timestamp = self.momentary_timestamp;
else:
timestamp = self._get_timestamp();
field_block = [];
for f in self.momentary_data:
if f in fields:
field_block.append({"name": f,
"type": self.fields[f]["type"],
"unit": self.fields[f]["unit"],
"dataType": self.fields[f]["dataType"],
"value": self.momentary_data[f]["value"],
"flags": self.momentary_data[f]["flags"]});
ts_block["timestamp"] = timestamp;
ts_block["fields"] = field_block;
callback(session, result="done", nodeId=self.nodeId, timestamp_block=ts_block);
return
from_flag = self._datetime_flag_parser(flags, 'from')
to_flag = self._datetime_flag_parser(flags, 'to')
for ts in sorted(self.timestamp_data.keys()):
tsdt = datetime.datetime.strptime(ts, "%Y-%m-%dT%H:%M:%S")
if not from_flag is None:
if tsdt < from_flag:
#print (str(tsdt) + " < " + str(from_flag))
continue
if not to_flag is None:
if tsdt > to_flag:
#print (str(tsdt) + " > " + str(to_flag))
continue
ts_block = {};
field_block = [];
for f in self.timestamp_data[ts]:
if f in fields:
field_block.append({"name": f,
"type": self.fields[f]["type"],
"unit": self.fields[f]["unit"],
"dataType": self.fields[f]["dataType"],
"value": self.timestamp_data[ts][f]["value"],
"flags": self.timestamp_data[ts][f]["flags"]});
ts_block["timestamp"] = ts;
ts_block["fields"] = field_block;
callback(session, result="fields", nodeId=self.nodeId, timestamp_block=ts_block);
callback(session, result="done", nodeId=self.nodeId, timestamp_block=None);
def _datetime_flag_parser(self, flags, flagname):
if not flagname in flags:
return None
dt = None
try:
dt = datetime.datetime.strptime(flags[flagname], "%Y-%m-%dT%H:%M:%S")
except ValueError:
# Badly formatted datetime, ignore it
pass
return dt
def _get_timestamp(self):
"""
Generates a properly formatted timestamp of current time
"""
return datetime.datetime.now().replace(microsecond=0).isoformat()
def _send_reject(self, session, callback):
"""
Sends a reject to the caller
Arguments:
session -- Session id, see definition in request_fields function
callback -- Callback function, see definition in request_fields function
"""
callback(session, result="error", nodeId=self.nodeId, timestamp_block=None, error_msg="Reject");
def _add_field(self, name, typename, unit=None, dataType=None):
"""
Adds a field to the device
Arguments:
name -- Name of the field
typename -- Type of the field (numeric, boolean, dateTime, timeSpan, string, enum)
unit -- [optional] only applies to "numeric". Unit for the field.
dataType -- [optional] only applies to "enum". Datatype for the field.
"""
self.fields[name] = {"type": typename, "unit": unit, "dataType": dataType};
def _add_field_timestamp_data(self, name, timestamp, value, flags=None):
"""
Adds timestamped data to a field
Arguments:
name -- Name of the field
timestamp -- Timestamp for the data (string)
value -- Field value at the timestamp
flags -- [optional] data classifier flags for the field, e.g. momentary
Formatted as a dictionary like { "flag name": "flag value" ... }
"""
if not name in self.fields.keys():
return False;
if not timestamp in self.timestamp_data:
self.timestamp_data[timestamp] = {};
self.timestamp_data[timestamp][name] = {"value": value, "flags": flags};
return True;
def _add_field_momentary_data(self, name, value, flags=None):
"""
Sets momentary data to a field
Arguments:
name -- Name of the field
value -- Field value at the timestamp
flags -- [optional] data classifier flags for the field, e.g. momentary
Formatted as a dictionary like { "flag name": "flag value" ... }
"""
if name not in self.fields:
return False;
if flags is None:
flags = {};
flags["momentary"] = "true"
self.momentary_data[name] = {"value": value, "flags": flags};
return True;
def _set_momentary_timestamp(self, timestamp):
"""
This function is only for unit testing to produce predictable results.
"""
self.momentary_timestamp = timestamp;
|
|
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
GMP2 in spin-orbital form
E(MP2) = 1/4 <ij||ab><ab||ij>/(ei+ej-ea-eb)
'''
import time
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.mp import mp2
from pyscf import scf
from pyscf import __config__
WITH_T2 = getattr(__config__, 'mp_gmp2_with_t2', True)
def kernel(mp, mo_energy=None, mo_coeff=None, eris=None, with_t2=WITH_T2, verbose=None):
if mo_energy is not None or mo_coeff is not None:
# For backward compatibility. In pyscf-1.4 or earlier, mp.frozen is
# not supported when mo_energy or mo_coeff is given.
assert(mp.frozen == 0 or mp.frozen is None)
if eris is None:
eris = mp.ao2mo(mo_coeff)
if mo_energy is None:
mo_energy = eris.mo_energy
nocc = mp.nocc
nvir = mp.nmo - nocc
moidx = mp.get_frozen_mask()
eia = mo_energy[:nocc,None] - mo_energy[None,nocc:]
if with_t2:
t2 = numpy.empty((nocc,nocc,nvir,nvir), dtype=eris.oovv.dtype)
else:
t2 = None
emp2 = 0
for i in range(nocc):
gi = numpy.asarray(eris.oovv[i]).reshape(nocc,nvir,nvir)
t2i = gi.conj()/lib.direct_sum('jb+a->jba', eia, eia[i])
emp2 += numpy.einsum('jab,jab', t2i, gi) * .25
if with_t2:
t2[i] = t2i
return emp2.real, t2
def energy(mp, t2, eris):
'''MP2 energy'''
eris_oovv = numpy.array(eris.oovv)
e = 0.25*numpy.einsum('ijab,ijab', t2, eris_oovv)
if abs(e.imag) > 1e-4:
logger.warn(cc, 'Non-zero imaginary part found in GMP2 energy %s', e)
return e.real
def update_amps(mp, t2, eris):
'''Update non-canonical MP2 amplitudes'''
#assert(isinstance(eris, _PhysicistsERIs))
nocc, nvir = t2.shape[1:3]
fock = eris.fock
mo_e_o = eris.mo_energy[:nocc]
mo_e_v = eris.mo_energy[nocc:] + mp.level_shift
foo = fock[:nocc,:nocc] - numpy.diag(mo_e_o)
fvv = fock[nocc:,nocc:] - numpy.diag(mo_e_v)
t2new = lib.einsum('ijac,bc->ijab', t2, fvv)
t2new -= lib.einsum('ki,kjab->ijab', foo, t2)
t2new = t2new + t2new.transpose(1,0,3,2)
t2new += numpy.asarray(eris.oovv).conj()
eia = mo_e_o[:,None] - mo_e_v
t2new /= lib.direct_sum('ia,jb->ijab', eia, eia)
return t2new
def make_rdm1(mp, t2=None, ao_repr=False):
r'''
One-particle density matrix in the molecular spin-orbital representation
(the occupied-virtual blocks from the orbital response contribution are
not included).
dm1[p,q] = <q^\dagger p> (p,q are spin-orbitals)
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
from pyscf.cc import gccsd_rdm
if t2 is None: t2 = mp.t2
doo, dvv = _gamma1_intermediates(mp, t2)
nocc, nvir = t2.shape[1:3]
dov = numpy.zeros((nocc,nvir))
d1 = doo, dov, dov.T, dvv
return gccsd_rdm._make_rdm1(mp, d1, with_frozen=True, ao_repr=ao_repr)
def _gamma1_intermediates(mp, t2):
doo = lib.einsum('imef,jmef->ij', t2.conj(), t2) *-.5
dvv = lib.einsum('mnea,mneb->ab', t2, t2.conj()) * .5
return doo, dvv
# spin-orbital rdm2 in Chemist's notation
def make_rdm2(mp, t2=None, ao_repr=False):
r'''
Two-particle density matrix in the molecular spin-orbital representation
dm2[p,q,r,s] = <p^\dagger r^\dagger s q>
where p,q,r,s are spin-orbitals. p,q correspond to one particle and r,s
correspond to another particle. The contraction between ERIs (in
Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
if t2 is None: t2 = mp.t2
nmo = nmo0 = mp.nmo
nocc = nocc0 = mp.nocc
if mp.frozen is None:
dm2 = numpy.zeros((nmo0,nmo0,nmo0,nmo0), dtype=t2.dtype) # Chemist's notation
#dm2[:nocc,nocc:,:nocc,nocc:] = t2.transpose(0,2,1,3) * .5 - t2.transpose(0,3,1,2) * .5
# using t2.transpose(0,2,1,3) == -t2.transpose(0,3,1,2)
dm2[:nocc,nocc:,:nocc,nocc:] = t2.transpose(0,2,1,3)
dm2[nocc:,:nocc,nocc:,:nocc] = dm2[:nocc,nocc:,:nocc,nocc:].transpose(1,0,3,2).conj()
else:
nmo0 = mp.mo_occ.size
nocc0 = numpy.count_nonzero(mp.mo_occ > 0)
moidx = mp.get_frozen_mask()
oidx = numpy.where(moidx & (mp.mo_occ > 0))[0]
vidx = numpy.where(moidx & (mp.mo_occ ==0))[0]
dm2 = numpy.zeros((nmo0,nmo0,nmo0,nmo0), dtype=t2.dtype) # Chemist's notation
dm2[oidx[:,None,None,None],vidx[:,None,None],oidx[:,None],vidx] = \
t2.transpose(0,2,1,3)
dm2[nocc0:,:nocc0,nocc0:,:nocc0] = \
dm2[:nocc0,nocc0:,:nocc0,nocc0:].transpose(1,0,3,2).conj()
dm1 = make_rdm1(mp, t2)
dm1[numpy.diag_indices(nocc0)] -= 1
# Be careful with convention of dm1 and dm2
# dm1[q,p] = <p^\dagger q>
# dm2[p,q,r,s] = < p^\dagger r^\dagger s q >
# E = einsum('pq,qp', h1, dm1) + .5 * einsum('pqrs,pqrs', eri, dm2)
# When adding dm1 contribution, dm1 subscripts need to be flipped
for i in range(nocc0):
dm2[i,i,:,:] += dm1.T
dm2[:,:,i,i] += dm1.T
dm2[:,i,i,:] -= dm1.T
dm2[i,:,:,i] -= dm1
for i in range(nocc0):
for j in range(nocc0):
dm2[i,i,j,j] += 1
dm2[i,j,j,i] -= 1
if ao_repr:
from pyscf.cc import ccsd_rdm
dm2 = ccsd_rdm._rdm2_mo2ao(dm2, mp.mo_coeff)
return dm2
class GMP2(mp2.MP2):
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
assert(isinstance(mf, scf.ghf.GHF))
mp2.MP2.__init__(self, mf, frozen, mo_coeff, mo_occ)
def ao2mo(self, mo_coeff=None):
if mo_coeff is None: mo_coeff = self.mo_coeff
nmo = self.nmo
nocc = self.nocc
nvir = nmo - nocc
mem_incore = nocc**2*nvir**2*3 * 8/1e6
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory) or self.mol.incore_anyway):
return _make_eris_incore(self, mo_coeff, verbose=self.verbose)
elif getattr(self._scf, 'with_df', None):
raise NotImplementedError
else:
return _make_eris_outcore(self, mo_coeff, self.verbose)
make_rdm1 = make_rdm1
make_rdm2 = make_rdm2
def nuc_grad_method(self):
raise NotImplementedError
# For non-canonical MP2
energy = energy
update_amps = update_amps
def init_amps(self, mo_energy=None, mo_coeff=None, eris=None, with_t2=WITH_T2):
return kernel(self, mo_energy, mo_coeff, eris, with_t2)
MP2 = GMP2
from pyscf import scf
scf.ghf.GHF.MP2 = lib.class_as_method(MP2)
#TODO: Merge this _PhysicistsERIs class with gccsd._PhysicistsERIs class
class _PhysicistsERIs:
def __init__(self, mol=None):
self.mol = mol
self.mo_coeff = None
self.nocc = None
self.fock = None
self.e_hf = None
self.orbspin = None
self.oovv = None
def _common_init_(self, mp, mo_coeff=None):
if mo_coeff is None:
mo_coeff = mp.mo_coeff
if mo_coeff is None:
raise RuntimeError('mo_coeff, mo_energy are not initialized.\n'
'You may need to call mf.kernel() to generate them.')
mp_mo_coeff = mo_coeff
self.mol = mp.mol
mo_idx = mp.get_frozen_mask()
if getattr(mo_coeff, 'orbspin', None) is not None:
self.orbspin = mo_coeff.orbspin[mo_idx]
mo_coeff = lib.tag_array(mo_coeff[:,mo_idx], orbspin=self.orbspin)
else:
orbspin = scf.ghf.guess_orbspin(mo_coeff)
mo_coeff = mo_coeff[:,mo_idx]
if not numpy.any(orbspin == -1):
self.orbspin = orbspin[mo_idx]
mo_coeff = lib.tag_array(mo_coeff, orbspin=self.orbspin)
self.mo_coeff = mo_coeff
if mp_mo_coeff is mp._scf.mo_coeff and mp._scf.converged:
self.mo_energy = mp._scf.mo_energy[mo_idx]
self.fock = numpy.diag(self.mo_energy)
self.e_hf = mp._scf.e_tot
else:
dm = mp._scf.make_rdm1(mo_coeff, mp.mo_occ)
vhf = mp._scf.get_veff(mp.mol, dm)
fockao = mp._scf.get_fock(vhf=vhf, dm=dm)
self.fock = self.mo_coeff.conj().T.dot(fockao).dot(self.mo_coeff)
self.e_hf = mp._scf.energy_tot(dm=dm, vhf=vhf)
self.mo_energy = self.fock.diagonal().real
def _make_eris_incore(mp, mo_coeff=None, ao2mofn=None, verbose=None):
eris = _PhysicistsERIs()
eris._common_init_(mp, mo_coeff)
nocc = mp.nocc
nao, nmo = eris.mo_coeff.shape
nvir = nmo - nocc
orbspin = eris.orbspin
if callable(ao2mofn):
orbo = eris.mo_coeff[:,:nocc]
orbv = eris.mo_coeff[:,nocc:]
orbo = lib.tag_array(orbo, orbspin=orbspin)
eri = ao2mofn((orbo,orbv,orbo,orbv)).reshape(nocc,nvir,nocc,nvir)
else:
orboa = eris.mo_coeff[:nao//2,:nocc]
orbob = eris.mo_coeff[nao//2:,:nocc]
orbva = eris.mo_coeff[:nao//2,nocc:]
orbvb = eris.mo_coeff[nao//2:,nocc:]
if orbspin is None:
eri = ao2mo.kernel(mp._scf._eri, (orboa,orbva,orboa,orbva))
eri += ao2mo.kernel(mp._scf._eri, (orbob,orbvb,orbob,orbvb))
eri1 = ao2mo.kernel(mp._scf._eri, (orboa,orbva,orbob,orbvb))
eri += eri1
eri += eri1.T
eri = eri.reshape(nocc,nvir,nocc,nvir)
else:
co = orboa + orbob
cv = orbva + orbvb
eri = ao2mo.kernel(mp._scf._eri, (co,cv,co,cv)).reshape(nocc,nvir,nocc,nvir)
sym_forbid = (orbspin[:nocc,None] != orbspin[nocc:])
eri[sym_forbid,:,:] = 0
eri[:,:,sym_forbid] = 0
eris.oovv = eri.transpose(0,2,1,3) - eri.transpose(0,2,3,1)
return eris
def _make_eris_outcore(mp, mo_coeff=None, verbose=None):
cput0 = (time.clock(), time.time())
log = logger.Logger(mp.stdout, mp.verbose)
eris = _PhysicistsERIs()
eris._common_init_(mp, mo_coeff)
nocc = mp.nocc
nao, nmo = eris.mo_coeff.shape
nvir = nmo - nocc
assert(eris.mo_coeff.dtype == numpy.double)
orboa = eris.mo_coeff[:nao//2,:nocc]
orbob = eris.mo_coeff[nao//2:,:nocc]
orbva = eris.mo_coeff[:nao//2,nocc:]
orbvb = eris.mo_coeff[nao//2:,nocc:]
orbspin = eris.orbspin
feri = eris.feri = lib.H5TmpFile()
dtype = numpy.result_type(eris.mo_coeff).char
eris.oovv = feri.create_dataset('oovv', (nocc,nocc,nvir,nvir), dtype)
if orbspin is None:
max_memory = mp.max_memory-lib.current_memory()[0]
blksize = min(nocc, max(2, int(max_memory*1e6/8/(nocc*nvir**2*2))))
max_memory = max(2000, max_memory)
fswap = lib.H5TmpFile()
ao2mo.kernel(mp.mol, (orboa,orbva,orboa,orbva), fswap, 'aaaa',
max_memory=max_memory, verbose=log)
ao2mo.kernel(mp.mol, (orboa,orbva,orbob,orbvb), fswap, 'aabb',
max_memory=max_memory, verbose=log)
ao2mo.kernel(mp.mol, (orbob,orbvb,orboa,orbva), fswap, 'bbaa',
max_memory=max_memory, verbose=log)
ao2mo.kernel(mp.mol, (orbob,orbvb,orbob,orbvb), fswap, 'bbbb',
max_memory=max_memory, verbose=log)
for p0, p1 in lib.prange(0, nocc, blksize):
tmp = numpy.asarray(fswap['aaaa'][p0*nvir:p1*nvir])
tmp += numpy.asarray(fswap['aabb'][p0*nvir:p1*nvir])
tmp += numpy.asarray(fswap['bbaa'][p0*nvir:p1*nvir])
tmp += numpy.asarray(fswap['bbbb'][p0*nvir:p1*nvir])
tmp = tmp.reshape(p1-p0,nvir,nocc,nvir)
eris.oovv[p0:p1] = tmp.transpose(0,2,1,3) - tmp.transpose(0,2,3,1)
else: # with orbspin
orbo = orboa + orbob
orbv = orbva + orbvb
max_memory = mp.max_memory-lib.current_memory()[0]
blksize = min(nocc, max(2, int(max_memory*1e6/8/(nocc*nvir**2*2))))
max_memory = max(2000, max_memory)
fswap = lib.H5TmpFile()
ao2mo.kernel(mp.mol, (orbo,orbv,orbo,orbv), fswap,
max_memory=max_memory, verbose=log)
sym_forbid = orbspin[:nocc,None] != orbspin[nocc:]
for p0, p1 in lib.prange(0, nocc, blksize):
tmp = numpy.asarray(fswap['eri_mo'][p0*nvir:p1*nvir])
tmp = tmp.reshape(p1-p0,nvir,nocc,nvir)
tmp[sym_forbid[p0:p1]] = 0
tmp[:,:,sym_forbid] = 0
eris.oovv[p0:p1] = tmp.transpose(0,2,1,3) - tmp.transpose(0,2,3,1)
cput0 = log.timer_debug1('transforming oovv', *cput0)
return eris
del(WITH_T2)
if __name__ == '__main__':
from functools import reduce
from pyscf import scf
from pyscf import gto
mol = gto.Mole()
mol.atom = [['O', (0., 0., 0.)],
['O', (1.21, 0., 0.)]]
mol.basis = 'cc-pvdz'
mol.spin = 2
mol.build()
mf = scf.UHF(mol).run()
mf = scf.addons.convert_to_ghf(mf)
frozen = [0,1,2,3]
pt = GMP2(mf, frozen=frozen)
emp2, t2 = pt.kernel()
print(emp2 - -0.345306881488508)
pt.max_memory = 1
emp2, t2 = pt.kernel()
print(emp2 - -0.345306881488508)
dm1 = pt.make_rdm1(t2)
dm2 = pt.make_rdm2(t2)
nao = mol.nao_nr()
mo_a = mf.mo_coeff[:nao]
mo_b = mf.mo_coeff[nao:]
nmo = mo_a.shape[1]
eri = ao2mo.kernel(mf._eri, mo_a+mo_b, compact=False).reshape([nmo]*4)
orbspin = mf.mo_coeff.orbspin
sym_forbid = (orbspin[:,None] != orbspin)
eri[sym_forbid,:,:] = 0
eri[:,:,sym_forbid] = 0
hcore = scf.RHF(mol).get_hcore()
h1 = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))
h1+= reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))
e1 = numpy.einsum('ij,ji', h1, dm1)
e1+= numpy.einsum('ijkl,ijkl', eri, dm2) * .5
e1+= mol.energy_nuc()
print(e1 - pt.e_tot)
mf = scf.UHF(mol).run(max_cycle=1)
mf = scf.addons.convert_to_ghf(mf)
pt = GMP2(mf)
print(pt.kernel()[0] - -0.371240143556976)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities related to layer/model functionality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.conv_utils import convert_kernel
from tensorflow.python.util.tf_export import tf_export
def get_source_inputs(tensor, layer=None, node_index=None):
"""Returns the list of input tensors necessary to compute `tensor`.
Output will always be a list of tensors
(potentially with 1 element).
Arguments:
tensor: The tensor to start from.
layer: Origin layer of the tensor. Will be
determined via tensor._keras_history if not provided.
node_index: Origin node index of the tensor.
Returns:
List of input tensors.
"""
if not hasattr(tensor, '_keras_history'):
return tensor
if layer is None or node_index:
layer, node_index, _ = tensor._keras_history
if not layer._inbound_nodes:
return [tensor]
else:
node = layer._inbound_nodes[node_index]
if not node.inbound_layers:
# Reached an Input layer, stop recursion.
return node.input_tensors
else:
source_tensors = []
for i in range(len(node.inbound_layers)):
x = node.input_tensors[i]
layer = node.inbound_layers[i]
node_index = node.node_indices[i]
previous_sources = get_source_inputs(x, layer, node_index)
# Avoid input redundancy.
for x in previous_sources:
if x not in source_tensors:
source_tensors.append(x)
return source_tensors
def count_params(weights):
"""Count the total number of scalars composing the weights.
Arguments:
weights: An iterable containing the weights on which to compute params
Returns:
The total number of scalars composing the weights
"""
return int(np.sum([np.prod(p.get_shape().as_list()) for p in set(weights)]))
def print_summary(model, line_length=None, positions=None, print_fn=None):
"""Prints a summary of a model.
Arguments:
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
It defaults to `print` (prints to stdout).
"""
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
elif not model._is_graph_network:
# We treat subclassed models as a simple sequence of layers, for logging
# purposes.
sequential_like = True
else:
sequential_like = True
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1):
# if the model has multiple nodes
# or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
nodes += v
if sequential_like:
# search for shared layers
for layer in model.layers:
flag = False
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 98
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
"""Prints a summary for a single layer.
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
except RuntimeError: # output_shape unknown in Eager mode.
output_shape = '?'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer (including topological connections).
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = node.node_indices[i]
inbound_tensor_index = node.tensor_indices[i]
connections.append(inbound_layer + '[' + str(inbound_node_index) +
'][' + str(inbound_tensor_index) + ']')
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [
name + ' (' + cls_name + ')', output_shape,
layer.count_params(), first_connection
]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
model._check_trainable_weights_consistency()
if hasattr(model, '_collected_trainable_weights'):
trainable_count = count_params(model._collected_trainable_weights)
else:
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def gather_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected trainable weights/variables.
"""
if not trainable:
return []
weights = []
for layer in sub_layers:
weights += layer.trainable_weights
trainable_extra_variables = [
v for v in extra_variables if v.trainable]
return weights + trainable_extra_variables
def gather_non_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the non-trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected non-trainable weights/variables.
"""
trainable_extra_variables = []
non_trainable_extra_variables = []
for v in extra_variables:
if v.trainable:
trainable_extra_variables.append(v)
else:
non_trainable_extra_variables.append(v)
weights = []
for layer in sub_layers:
weights += layer.non_trainable_weights
if not trainable:
trainable_weights = []
for layer in sub_layers:
trainable_weights += layer.trainable_weights
return (trainable_weights + trainable_extra_variables
+ weights + non_trainable_extra_variables)
return weights + non_trainable_extra_variables
@tf_export('keras.utils.convert_all_kernels_in_model')
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
Arguments:
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
Arguments:
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
|
|
from common_fixtures import * # NOQA
def test_container_port_create_start(super_client, new_context):
image_uuid = new_context.image_uuid
host = new_context.host
host_ip = new_context.host_ip.address
client = new_context.client
assert host_ip is not None
c = client.create_container(imageUuid=image_uuid,
startOnCreate=False,
ports=[
80,
'8081:81',
'8082:82/udp'])
assert c.state == 'creating'
c = client.wait_success(c)
assert c.state == 'stopped'
c_admin = super_client.update(c, requestedHostId=host.id)
assert c_admin.requestedHostId == host.id
ports = c.ports_link()
assert len(ports) == 3
count = 0
for port in ports:
assert port.kind == 'userPort'
if port.privatePort == 80:
count += 1
assert port.publicPort is None
assert port.protocol == 'tcp'
assert port.instanceId == c.id
assert port.state == 'inactive'
elif port.privatePort == 81:
count += 1
assert port.publicPort == 8081
assert port.protocol == 'tcp'
assert port.instanceId == c.id
assert port.state == 'inactive'
elif port.privatePort == 82:
count += 1
assert port.publicPort == 8082
assert port.protocol == 'udp'
assert port.instanceId == c.id
assert port.state == 'inactive'
assert count == 3
c = client.wait_success(c.start())
assert super_client.reload(c).hosts()[0].id == host.id
for port in c.ports_link():
assert port.state == 'active'
private_ip = port.privateIpAddress()
public_ip = port.publicIpAddress()
assert private_ip.address == c.primaryIpAddress
assert public_ip.address == host_ip
assert port.id in [x.id for x in private_ip.privatePorts()]
assert port.id in [x.id for x in public_ip.publicPorts()]
assert port.id not in [x.id for x in public_ip.privatePorts()]
assert port.id not in [x.id for x in private_ip.publicPorts()]
def test_container_port_start(new_context):
client = new_context.client
image_uuid = new_context.image_uuid
c = client.create_container(imageUuid=image_uuid,
ports=[
80,
'8081:81',
'8082:82/udp'])
assert c.state == 'creating'
c = client.wait_success(c)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 3
count = 0
for port in ports:
if port.privatePort == 80:
count += 1
assert port.protocol == 'tcp'
assert port.instanceId == c.id
assert port.state == 'active'
elif port.privatePort == 81:
count += 1
assert port.publicPort == 8081
assert port.protocol == 'tcp'
assert port.instanceId == c.id
assert port.state == 'active'
elif port.privatePort == 82:
count += 1
assert port.publicPort == 8082
assert port.protocol == 'udp'
assert port.instanceId == c.id
assert port.state == 'active'
assert count == 3
def test_container_port_stop(new_context):
client = new_context.client
image_uuid = new_context.image_uuid
c = client.create_container(imageUuid=image_uuid,
ports=[
80,
'8081:81',
'8082:82/udp'])
assert c.state == 'creating'
c = client.wait_success(c)
assert c.state == 'running'
c = client.wait_success(c.stop())
assert c.state == 'stopped'
ports = c.ports_link()
assert len(ports) == 3
count = 0
for port in ports:
assert port.state == 'inactive'
assert port.publicIpAddressId is not None
assert port.privateIpAddressId is not None
if port.privatePort == 80:
count += 1
elif port.privatePort == 81:
count += 1
assert port.publicPort == 8081
elif port.privatePort == 82:
count += 1
assert port.publicPort == 8082
assert count == 3
def test_container_port_purge(new_context):
client = new_context.client
image_uuid = new_context.image_uuid
c = client.create_container(imageUuid=image_uuid,
ports=[
80,
'8081:81',
'8082:82/udp'])
assert c.state == 'creating'
c = client.wait_success(c)
assert c.state == 'running'
c = client.wait_success(c.stop(remove=True))
assert c.state == 'removed'
# instance.remove will remove the ports_links
assert len(c.ports_link()) == 0
def test_port_validation(client, context):
try:
client.create_container(imageUuid=context.image_uuid,
ports=['a'])
assert False
except cattle.ApiError as e:
assert e.error.code == 'PortWrongFormat'
def test_ports_service(super_client, new_context):
client = new_context.client
c = new_context.create_container(ports=['80'])
agent = super_client.reload(c).hosts()[0].agent()
assert agent is not None
port = c.ports_link()[0]
assert port.publicPort is None
port = client.update(port, publicPort=12345)
assert port.state == 'updating-active'
assert port.publicPort == 12345
port = client.wait_success(port)
assert port.state == 'active'
def test_ports_overlapping(new_context):
port_specs = [
'1234:80/tcp',
'2345:80/tcp',
'1234:80/udp',
'2345:80/udp',
]
c = new_context.create_container(ports=port_specs)
ports = c.ports_link()
assert len(ports) == 4
found = {'{}:{}/{}'.format(x.publicPort, x.privatePort, x.protocol)
for x in ports}
assert set(port_specs) == found
|
|
#
# [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Specialized {@link Set}{@code <}{@link ATNConfig}{@code >} that can track
# info about the set, with support for combining similar configurations using a
# graph-structured stack.
#/
from io import StringIO
from antlr4.PredictionContext import PredictionContext, merge
from antlr4.Utils import str_list
from antlr4.atn.ATN import ATN
from antlr4.atn.SemanticContext import SemanticContext
from antlr4.error.Errors import UnsupportedOperationException, IllegalStateException
class ATNConfigSet(object):
#
# The reason that we need this is because we don't want the hash map to use
# the standard hash code and equals. We need all configurations with the same
# {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles
# the number of objects associated with ATNConfigs. The other solution is to
# use a hash table that lets us specify the equals/hashcode operation.
def __init__(self, fullCtx=True):
# All configs but hashed by (s, i, _, pi) not including context. Wiped out
# when we go readonly as this set becomes a DFA state.
self.configLookup = set()
# Indicates that this configuration set is part of a full context
# LL prediction. It will be used to determine how to merge $. With SLL
# it's a wildcard whereas it is not for LL context merge.
self.fullCtx = fullCtx
# Indicates that the set of configurations is read-only. Do not
# allow any code to manipulate the set; DFA states will point at
# the sets and they must not change. This does not protect the other
# fields; in particular, conflictingAlts is set after
# we've made this readonly.
self.readonly = False
# Track the elements as they are added to the set; supports get(i)#/
self.configs = []
# TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation
# TODO: can we track conflicts as they are added to save scanning configs later?
self.uniqueAlt = 0
self.conflictingAlts = None
# Used in parser and lexer. In lexer, it indicates we hit a pred
# while computing a closure operation. Don't make a DFA state from this.
self.hasSemanticContext = False
self.dipsIntoOuterContext = False
self.cachedHashCode = -1
def __iter__(self):
return self.configs.__iter__()
# Adding a new config means merging contexts with existing configs for
# {@code (s, i, pi, _)}, where {@code s} is the
# {@link ATNConfig#state}, {@code i} is the {@link ATNConfig#alt}, and
# {@code pi} is the {@link ATNConfig#semanticContext}. We use
# {@code (s,i,pi)} as key.
#
# <p>This method updates {@link #dipsIntoOuterContext} and
# {@link #hasSemanticContext} when necessary.</p>
#/
def add(self, config, mergeCache=None):
if self.readonly:
raise Exception("This set is readonly")
if config.semanticContext is not SemanticContext.NONE:
self.hasSemanticContext = True
if config.reachesIntoOuterContext > 0:
self.dipsIntoOuterContext = True
existing = self.getOrAdd(config)
if existing is config:
self.cachedHashCode = -1
self.configs.append(config) # track order here
return True
# a previous (s,i,pi,_), merge with it and save result
rootIsWildcard = not self.fullCtx
merged = merge(existing.context, config.context, rootIsWildcard, mergeCache)
# no need to check for existing.context, config.context in cache
# since only way to create new graphs is "call rule" and here. We
# cache at both places.
existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext)
# make sure to preserve the precedence filter suppression during the merge
if config.precedenceFilterSuppressed:
existing.precedenceFilterSuppressed = True
existing.context = merged # replace context; no need to alt mapping
return True
def getOrAdd(self, config):
for c in self.configLookup:
if c==config:
return c
self.configLookup.add(config)
return config
def getStates(self):
states = set()
for c in self.configs:
states.add(c.state)
return states
def getPredicates(self):
preds = list()
for c in self.configs:
if c.semanticContext!=SemanticContext.NONE:
preds.append(c.semanticContext)
return preds
def get(self, i):
return self.configs[i]
def optimizeConfigs(self, interpreter):
if self.readonly:
raise IllegalStateException("This set is readonly")
if len(self.configLookup)==0:
return
for config in self.configs:
config.context = interpreter.getCachedContext(config.context)
def addAll(self, coll):
for c in coll:
self.add(c)
return False
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, ATNConfigSet):
return False
same = self.configs is not None and \
self.configs==other.configs and \
self.fullCtx == other.fullCtx and \
self.uniqueAlt == other.uniqueAlt and \
self.conflictingAlts == other.conflictingAlts and \
self.hasSemanticContext == other.hasSemanticContext and \
self.dipsIntoOuterContext == other.dipsIntoOuterContext
return same
def __hash__(self):
if self.readonly:
if self.cachedHashCode == -1:
self.cachedHashCode = self.hashConfigs()
return self.cachedHashCode
return self.hashConfigs()
def hashConfigs(self):
with StringIO() as buf:
for cfg in self.configs:
buf.write(unicode(cfg))
return hash(buf.getvalue())
def __len__(self):
return len(self.configs)
def isEmpty(self):
return len(self.configs)==0
def __contains__(self, item):
if self.configLookup is None:
raise UnsupportedOperationException("This method is not implemented for readonly sets.")
return item in self.configLookup
def containsFast(self, obj):
if self.configLookup is None:
raise UnsupportedOperationException("This method is not implemented for readonly sets.")
return self.configLookup.containsFast(obj)
def clear(self):
if self.readonly:
raise IllegalStateException("This set is readonly")
self.configs.clear()
self.cachedHashCode = -1
self.configLookup.clear()
def setReadonly(self, readonly):
self.readonly = readonly
self.configLookup = None # can't mod, no need for lookup cache
def __str__(self):
return unicode(self)
def __unicode__(self):
with StringIO() as buf:
buf.write(str_list(self.configs))
if self.hasSemanticContext:
buf.write(u",hasSemanticContext=")
buf.write(unicode(self.hasSemanticContext))
if self.uniqueAlt!=ATN.INVALID_ALT_NUMBER:
buf.write(u",uniqueAlt=")
buf.write(unicode(self.uniqueAlt))
if self.conflictingAlts is not None:
buf.write(u",conflictingAlts=")
buf.write(unicode(self.conflictingAlts))
if self.dipsIntoOuterContext:
buf.write(u",dipsIntoOuterContext")
return buf.getvalue()
class OrderedATNConfigSet(ATNConfigSet):
def __init__(self):
super(OrderedATNConfigSet, self).__init__()
# self.configLookup = set()
|
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of battery_utils.py
"""
# pylint: disable=W0613
import logging
import os
import sys
import unittest
from pylib import constants
from pylib.device import battery_utils
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.device import device_utils_test
from pylib.utils import mock_calls
# RunCommand from third_party/android_testrunner/run_command.py is mocked
# below, so its path needs to be in sys.path.
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=F0401
_DUMPSYS_OUTPUT = [
'9,0,i,uid,1000,test_package1',
'9,0,i,uid,1001,test_package2',
'9,1000,l,pwi,uid,1',
'9,1001,l,pwi,uid,2'
]
class BatteryUtilsTest(mock_calls.TestCase):
_NEXUS_5 = {
'name': 'Nexus 5',
'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
'enable_command': (
'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'echo 1 > /sys/class/power_supply/usb/online'),
'disable_command': (
'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'chmod 644 /sys/class/power_supply/usb/online && '
'echo 0 > /sys/class/power_supply/usb/online'),
'charge_counter': None,
'voltage': None,
'current': None,
}
_NEXUS_6 = {
'name': 'Nexus 6',
'witness_file': None,
'enable_command': None,
'disable_command': None,
'charge_counter': (
'/sys/class/power_supply/max170xx_battery/charge_counter_ext'),
'voltage': '/sys/class/power_supply/max170xx_battery/voltage_now',
'current': '/sys/class/power_supply/max170xx_battery/current_now',
}
_NEXUS_10 = {
'name': 'Nexus 10',
'witness_file': None,
'enable_command': None,
'disable_command': None,
'charge_counter': (
'/sys/class/power_supply/ds2784-fuelgauge/charge_counter_ext'),
'voltage': '/sys/class/power_supply/ds2784-fuelgauge/voltage_now',
'current': '/sys/class/power_supply/ds2784-fuelgauge/current_now',
}
def ShellError(self, output=None, status=1):
def action(cmd, *args, **kwargs):
raise device_errors.AdbShellCommandFailedError(
cmd, output, status, str(self.device))
if output is None:
output = 'Permission denied\n'
return action
def setUp(self):
self.adb = device_utils_test._AdbWrapperMock('0123456789abcdef')
self.device = device_utils.DeviceUtils(
self.adb, default_timeout=10, default_retries=0)
self.watchMethodCalls(self.call.adb, ignore=['GetDeviceSerial'])
self.battery = battery_utils.BatteryUtils(
self.device, default_timeout=10, default_retries=0)
class BatteryUtilsInitTest(unittest.TestCase):
def testInitWithDeviceUtil(self):
serial = '0fedcba987654321'
d = device_utils.DeviceUtils(serial)
b = battery_utils.BatteryUtils(d)
self.assertEqual(d, b._device)
def testInitWithMissing_fails(self):
with self.assertRaises(TypeError):
battery_utils.BatteryUtils(None)
with self.assertRaises(TypeError):
battery_utils.BatteryUtils('')
class BatteryUtilsSetChargingTest(BatteryUtilsTest):
@mock.patch('time.sleep', mock.Mock())
def testSetCharging_enabled(self):
self.battery._cache['profile'] = self._NEXUS_5
with self.assertCalls(
(self.call.device.RunShellCommand(
mock.ANY, check_return=True, as_root=True), []),
(self.call.battery.GetCharging(), False),
(self.call.device.RunShellCommand(
mock.ANY, check_return=True, as_root=True), []),
(self.call.battery.GetCharging(), True)):
self.battery.SetCharging(True)
def testSetCharging_alreadyEnabled(self):
self.battery._cache['profile'] = self._NEXUS_5
with self.assertCalls(
(self.call.device.RunShellCommand(
mock.ANY, check_return=True, as_root=True), []),
(self.call.battery.GetCharging(), True)):
self.battery.SetCharging(True)
@mock.patch('time.sleep', mock.Mock())
def testSetCharging_disabled(self):
self.battery._cache['profile'] = self._NEXUS_5
with self.assertCalls(
(self.call.device.RunShellCommand(
mock.ANY, check_return=True, as_root=True), []),
(self.call.battery.GetCharging(), True),
(self.call.device.RunShellCommand(
mock.ANY, check_return=True, as_root=True), []),
(self.call.battery.GetCharging(), False)):
self.battery.SetCharging(False)
class BatteryUtilsSetBatteryMeasurementTest(BatteryUtilsTest):
@mock.patch('time.sleep', mock.Mock())
def testBatteryMeasurementWifi(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
mock.ANY, retries=0, single_line=True,
timeout=10, check_return=True), '22'),
(self.call.battery._ClearPowerData(), True),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'ac', '0'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'usb', '0'], check_return=True), []),
(self.call.battery.GetCharging(), False),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'reset'], check_return=True), []),
(self.call.battery.GetCharging(), False),
(self.call.device.RunShellCommand(
['dumpsys', 'battery'], check_return=True), ['UPDATES STOPPED']),
(self.call.battery.GetCharging(), False),
(self.call.device.RunShellCommand(
['dumpsys', 'battery'], check_return=True), [])):
with self.battery.BatteryMeasurement():
pass
@mock.patch('time.sleep', mock.Mock())
def testBatteryMeasurementUsb(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
mock.ANY, retries=0, single_line=True,
timeout=10, check_return=True), '22'),
(self.call.battery._ClearPowerData(), True),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'ac', '0'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'usb', '0'], check_return=True), []),
(self.call.battery.GetCharging(), False),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'reset'], check_return=True), []),
(self.call.battery.GetCharging(), False),
(self.call.device.RunShellCommand(
['dumpsys', 'battery'], check_return=True), ['UPDATES STOPPED']),
(self.call.battery.GetCharging(), True)):
with self.battery.BatteryMeasurement():
pass
class BatteryUtilsGetPowerData(BatteryUtilsTest):
def testGetPowerData(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True),
_DUMPSYS_OUTPUT)):
data = self.battery.GetPowerData()
check = {
'test_package1': {'uid': '1000', 'data': [1.0]},
'test_package2': {'uid': '1001', 'data': [2.0]}
}
self.assertEqual(data, check)
def testGetPowerData_packageCollisionSame(self):
self.battery._cache['uids'] = {'test_package1': '1000'}
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True),
_DUMPSYS_OUTPUT):
data = self.battery.GetPowerData()
check = {
'test_package1': {'uid': '1000', 'data': [1.0]},
'test_package2': {'uid': '1001', 'data': [2.0]}
}
self.assertEqual(data, check)
def testGetPowerData_packageCollisionDifferent(self):
self.battery._cache['uids'] = {'test_package1': '1'}
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True),
_DUMPSYS_OUTPUT):
with self.assertRaises(device_errors.CommandFailedError):
self.battery.GetPowerData()
def testGetPowerData_cacheCleared(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True),
_DUMPSYS_OUTPUT)):
self.battery._cache.clear()
data = self.battery.GetPowerData()
check = {
'test_package1': {'uid': '1000', 'data': [1.0]},
'test_package2': {'uid': '1001', 'data': [2.0]}
}
self.assertEqual(data, check)
def testGetPackagePowerData(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True),
_DUMPSYS_OUTPUT)):
data = self.battery.GetPackagePowerData('test_package2')
self.assertEqual(data, {'uid': '1001', 'data': [2.0]})
def testGetPackagePowerData_badPackage(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True),
_DUMPSYS_OUTPUT)):
data = self.battery.GetPackagePowerData('not_a_package')
self.assertEqual(data, None)
class BatteryUtilsChargeDevice(BatteryUtilsTest):
@mock.patch('time.sleep', mock.Mock())
def testChargeDeviceToLevel(self):
with self.assertCalls(
(self.call.battery.SetCharging(True)),
(self.call.battery.GetBatteryInfo(), {'level': '50'}),
(self.call.battery.GetBatteryInfo(), {'level': '100'})):
self.battery.ChargeDeviceToLevel(95)
class BatteryUtilsGetBatteryInfoTest(BatteryUtilsTest):
def testGetBatteryInfo_normal(self):
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'battery'], check_return=True),
[
'Current Battery Service state:',
' AC powered: false',
' USB powered: true',
' level: 100',
' temperature: 321',
]):
self.assertEquals(
{
'AC powered': 'false',
'USB powered': 'true',
'level': '100',
'temperature': '321',
},
self.battery.GetBatteryInfo())
def testGetBatteryInfo_nothing(self):
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'battery'], check_return=True), []):
self.assertEquals({}, self.battery.GetBatteryInfo())
class BatteryUtilsGetChargingTest(BatteryUtilsTest):
def testGetCharging_usb(self):
with self.assertCall(
self.call.battery.GetBatteryInfo(), {'USB powered': 'true'}):
self.assertTrue(self.battery.GetCharging())
def testGetCharging_usbFalse(self):
with self.assertCall(
self.call.battery.GetBatteryInfo(), {'USB powered': 'false'}):
self.assertFalse(self.battery.GetCharging())
def testGetCharging_ac(self):
with self.assertCall(
self.call.battery.GetBatteryInfo(), {'AC powered': 'true'}):
self.assertTrue(self.battery.GetCharging())
def testGetCharging_wireless(self):
with self.assertCall(
self.call.battery.GetBatteryInfo(), {'Wireless powered': 'true'}):
self.assertTrue(self.battery.GetCharging())
def testGetCharging_unknown(self):
with self.assertCall(
self.call.battery.GetBatteryInfo(), {'level': '42'}):
self.assertFalse(self.battery.GetCharging())
class BatteryUtilsGetNetworkDataTest(BatteryUtilsTest):
def testGetNetworkData_noDataUsage(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True),
_DUMPSYS_OUTPUT),
(self.call.device.ReadFile('/proc/uid_stat/1000/tcp_snd'),
self.ShellError()),
(self.call.device.ReadFile('/proc/uid_stat/1000/tcp_rcv'),
self.ShellError())):
self.assertEquals(self.battery.GetNetworkData('test_package1'), (0, 0))
def testGetNetworkData_badPackage(self):
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True),
_DUMPSYS_OUTPUT):
self.assertEqual(self.battery.GetNetworkData('asdf'), None)
def testGetNetworkData_packageNotCached(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True),
_DUMPSYS_OUTPUT),
(self.call.device.ReadFile('/proc/uid_stat/1000/tcp_snd'), 1),
(self.call.device.ReadFile('/proc/uid_stat/1000/tcp_rcv'), 2)):
self.assertEqual(self.battery.GetNetworkData('test_package1'), (1,2))
def testGetNetworkData_packageCached(self):
self.battery._cache['uids'] = {'test_package1': '1000'}
with self.assertCalls(
(self.call.device.ReadFile('/proc/uid_stat/1000/tcp_snd'), 1),
(self.call.device.ReadFile('/proc/uid_stat/1000/tcp_rcv'), 2)):
self.assertEqual(self.battery.GetNetworkData('test_package1'), (1,2))
def testGetNetworkData_clearedCache(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True),
_DUMPSYS_OUTPUT),
(self.call.device.ReadFile('/proc/uid_stat/1000/tcp_snd'), 1),
(self.call.device.ReadFile('/proc/uid_stat/1000/tcp_rcv'), 2)):
self.battery._cache.clear()
self.assertEqual(self.battery.GetNetworkData('test_package1'), (1,2))
class BatteryUtilsLetBatteryCoolToTemperatureTest(BatteryUtilsTest):
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_startUnder(self):
with self.assertCalls(
(self.call.battery.EnableBatteryUpdates(), []),
(self.call.battery.GetBatteryInfo(), {'temperature': '500'})):
self.battery.LetBatteryCoolToTemperature(600)
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_startOver(self):
with self.assertCalls(
(self.call.battery.EnableBatteryUpdates(), []),
(self.call.battery.GetBatteryInfo(), {'temperature': '500'}),
(self.call.battery.GetBatteryInfo(), {'temperature': '400'})):
self.battery.LetBatteryCoolToTemperature(400)
class BatteryUtilsSupportsFuelGaugeTest(BatteryUtilsTest):
def testSupportsFuelGauge_false(self):
self.battery._cache['profile'] = self._NEXUS_5
self.assertFalse(self.battery.SupportsFuelGauge())
def testSupportsFuelGauge_trueMax(self):
self.battery._cache['profile'] = self._NEXUS_6
# TODO(rnephew): Change this to assertTrue when we have support for
# disabling hardware charging on nexus 6.
self.assertFalse(self.battery.SupportsFuelGauge())
def testSupportsFuelGauge_trueDS(self):
self.battery._cache['profile'] = self._NEXUS_10
# TODO(rnephew): Change this to assertTrue when we have support for
# disabling hardware charging on nexus 10.
self.assertFalse(self.battery.SupportsFuelGauge())
class BatteryUtilsGetFuelGaugeChargeCounterTest(BatteryUtilsTest):
def testGetFuelGaugeChargeCounter_noFuelGauge(self):
self.battery._cache['profile'] = self._NEXUS_5
with self.assertRaises(device_errors.CommandFailedError):
self.battery.GetFuelGaugeChargeCounter()
def testGetFuelGaugeChargeCounter_fuelGaugePresent(self):
self.battery._cache['profile']= self._NEXUS_6
with self.assertCalls(
(self.call.battery.SupportsFuelGauge(), True),
(self.call.device.ReadFile(mock.ANY), '123')):
self.assertEqual(self.battery.GetFuelGaugeChargeCounter(), 123)
class BatteryUtilsTieredSetCharging(BatteryUtilsTest):
@mock.patch('time.sleep', mock.Mock())
def testTieredSetCharging_softwareSetTrue(self):
self.battery._cache['profile'] = self._NEXUS_6
with self.assertCalls(
(self.call.battery.GetCharging(), False),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'reset'], check_return=True), []),
(self.call.battery.GetCharging(), False),
(self.call.device.RunShellCommand(
['dumpsys', 'battery'], check_return=True), ['UPDATES STOPPED']),
(self.call.battery.GetCharging(), True)):
self.battery.TieredSetCharging(True)
@mock.patch('time.sleep', mock.Mock())
def testTieredSetCharging_softwareSetFalse(self):
self.battery._cache['profile'] = self._NEXUS_6
with self.assertCalls(
(self.call.battery.GetCharging(), True),
(self.call.battery._ClearPowerData(), True),
(self.call.battery._ClearPowerData(), True),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'ac', '0'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'usb', '0'], check_return=True), []),
(self.call.battery.GetCharging(), False)):
self.battery.TieredSetCharging(False)
@mock.patch('time.sleep', mock.Mock())
def testTieredSetCharging_hardwareSetTrue(self):
self.battery._cache['profile'] = self._NEXUS_5
with self.assertCalls(
(self.call.battery.GetCharging(), False),
(self.call.battery.SetCharging(True))):
self.battery.TieredSetCharging(True)
@mock.patch('time.sleep', mock.Mock())
def testTieredSetCharging_hardwareSetFalse(self):
self.battery._cache['profile'] = self._NEXUS_5
with self.assertCalls(
(self.call.battery.GetCharging(), True),
(self.call.battery._ClearPowerData(), True),
(self.call.battery.SetCharging(False))):
self.battery.TieredSetCharging(False)
def testTieredSetCharging_expectedStateAlreadyTrue(self):
with self.assertCalls((self.call.battery.GetCharging(), True)):
self.battery.TieredSetCharging(True)
def testTieredSetCharging_expectedStateAlreadyFalse(self):
with self.assertCalls((self.call.battery.GetCharging(), False)):
self.battery.TieredSetCharging(False)
class BatteryUtilsPowerMeasurement(BatteryUtilsTest):
def testPowerMeasurement_hardware(self):
self.battery._cache['profile'] = self._NEXUS_5
with self.assertCalls(
(self.call.battery.GetCharging(), True),
(self.call.battery._ClearPowerData(), True),
(self.call.battery.SetCharging(False)),
(self.call.battery.GetCharging(), False),
(self.call.battery.SetCharging(True))):
with self.battery.PowerMeasurement():
pass
@mock.patch('time.sleep', mock.Mock())
def testPowerMeasurement_software(self):
self.battery._cache['profile'] = self._NEXUS_6
with self.assertCalls(
(self.call.battery.GetCharging(), True),
(self.call.battery._ClearPowerData(), True),
(self.call.battery._ClearPowerData(), True),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'ac', '0'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'usb', '0'], check_return=True), []),
(self.call.battery.GetCharging(), False),
(self.call.battery.GetCharging(), False),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'reset'], check_return=True), []),
(self.call.battery.GetCharging(), False),
(self.call.device.RunShellCommand(
['dumpsys', 'battery'], check_return=True), ['UPDATES STOPPED']),
(self.call.battery.GetCharging(), True)):
with self.battery.PowerMeasurement():
pass
class BatteryUtilsDiscoverDeviceProfile(BatteryUtilsTest):
def testDiscoverDeviceProfile_known(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.product.model'), "Nexus 4")):
self.battery._DiscoverDeviceProfile()
self.assertEqual(self.battery._cache['profile']['name'], "Nexus 4")
def testDiscoverDeviceProfile_unknown(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.product.model'), "Other")):
self.battery._DiscoverDeviceProfile()
self.assertEqual(self.battery._cache['profile']['name'], None)
class BatteryUtilsClearPowerData(BatteryUtilsTest):
def testClearPowerData_preL(self):
with self.assertCalls(
(self.call.device.RunShellCommand(mock.ANY, retries=0,
single_line=True, timeout=10, check_return=True), '20')):
self.assertFalse(self.battery._ClearPowerData())
def testClearPowerData_clearedL(self):
with self.assertCalls(
(self.call.device.RunShellCommand(mock.ANY, retries=0,
single_line=True, timeout=10, check_return=True), '22'),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'usb', '1'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'ac', '1'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '--reset'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '--charged', '--checkin'],
check_return=True, large_output=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'reset'], check_return=True), [])):
self.assertTrue(self.battery._ClearPowerData())
def testClearPowerData_notClearedL(self):
with self.assertCalls(
(self.call.device.RunShellCommand(mock.ANY, retries=0,
single_line=True, timeout=10, check_return=True), '22'),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'usb', '1'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'ac', '1'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '--reset'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '--charged', '--checkin'],
check_return=True, large_output=True),
['9,1000,l,pwi,uid,0.0327']),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'reset'], check_return=True), [])):
with self.assertRaises(device_errors.CommandFailedError):
self.battery._ClearPowerData()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import ftplib
import os.path
from typing import Any, List, Optional, Tuple
from airflow.hooks.base import BaseHook
class FTPHook(BaseHook):
"""
Interact with FTP.
Errors that may occur throughout but should be handled downstream.
You can specify mode for data transfers in the extra field of your
connection as ``{"passive": "true"}``.
:param ftp_conn_id: The :ref:`ftp connection id <howto/connection:ftp>`
reference.
"""
conn_name_attr = 'ftp_conn_id'
default_conn_name = 'ftp_default'
conn_type = 'ftp'
hook_name = 'FTP'
def __init__(self, ftp_conn_id: str = default_conn_name) -> None:
super().__init__()
self.ftp_conn_id = ftp_conn_id
self.conn: Optional[ftplib.FTP] = None
def __enter__(self):
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
if self.conn is not None:
self.close_conn()
def get_conn(self) -> ftplib.FTP:
"""Returns a FTP connection object"""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
pasv = params.extra_dejson.get("passive", True)
self.conn = ftplib.FTP(params.host, params.login, params.password)
self.conn.set_pasv(pasv)
return self.conn
def close_conn(self):
"""
Closes the connection. An error will occur if the
connection wasn't ever opened.
"""
conn = self.conn
conn.quit()
self.conn = None
def describe_directory(self, path: str) -> dict:
"""
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
"""
conn = self.get_conn()
conn.cwd(path)
files = dict(conn.mlsd())
return files
def list_directory(self, path: str) -> List[str]:
"""
Returns a list of files on the remote system.
:param path: full path to the remote directory to list
"""
conn = self.get_conn()
conn.cwd(path)
files = conn.nlst()
return files
def create_directory(self, path: str) -> None:
"""
Creates a directory on the remote system.
:param path: full path to the remote directory to create
"""
conn = self.get_conn()
conn.mkd(path)
def delete_directory(self, path: str) -> None:
"""
Deletes a directory on the remote system.
:param path: full path to the remote directory to delete
"""
conn = self.get_conn()
conn.rmd(path)
def retrieve_file(self, remote_full_path, local_full_path_or_buffer, callback=None):
"""
Transfers the remote file to a local location.
If local_full_path_or_buffer is a string path, the file will be put
at that location; if it is a file-like buffer, the file will
be written to the buffer but not closed.
:param remote_full_path: full path to the remote file
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:param callback: callback which is called each time a block of data
is read. if you do not use a callback, these blocks will be written
to the file or buffer passed in. if you do pass in a callback, note
that writing to a file or buffer will need to be handled inside the
callback.
[default: output_handle.write()]
.. code-block:: python
hook = FTPHook(ftp_conn_id="my_conn")
remote_path = "/path/to/remote/file"
local_path = "/path/to/local/file"
# with a custom callback (in this case displaying progress on each read)
def print_progress(percent_progress):
self.log.info("Percent Downloaded: %s%%" % percent_progress)
total_downloaded = 0
total_file_size = hook.get_size(remote_path)
output_handle = open(local_path, "wb")
def write_to_file_with_progress(data):
total_downloaded += len(data)
output_handle.write(data)
percent_progress = (total_downloaded / total_file_size) * 100
print_progress(percent_progress)
hook.retrieve_file(remote_path, None, callback=write_to_file_with_progress)
# without a custom callback data is written to the local_path
hook.retrieve_file(remote_path, local_path)
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, str)
# without a callback, default to writing to a user-provided file or
# file-like buffer
if not callback:
if is_path:
output_handle = open(local_full_path_or_buffer, 'wb')
else:
output_handle = local_full_path_or_buffer
callback = output_handle.write
else:
output_handle = None
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
self.log.info('Retrieving file from FTP: %s', remote_full_path)
conn.retrbinary(f'RETR {remote_file_name}', callback)
self.log.info('Finished retrieving file from FTP: %s', remote_full_path)
if is_path and output_handle:
output_handle.close()
def store_file(self, remote_full_path: str, local_full_path_or_buffer: Any) -> None:
"""
Transfers a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location; if it is a file-like buffer, the file will
be read from the buffer but not closed.
:param remote_full_path: full path to the remote file
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, str)
if is_path:
input_handle = open(local_full_path_or_buffer, 'rb')
else:
input_handle = local_full_path_or_buffer
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
conn.storbinary(f'STOR {remote_file_name}', input_handle)
if is_path:
input_handle.close()
def delete_file(self, path: str) -> None:
"""
Removes a file on the FTP Server.
:param path: full path to the remote file
"""
conn = self.get_conn()
conn.delete(path)
def rename(self, from_name: str, to_name: str) -> str:
"""
Rename a file.
:param from_name: rename file from name
:param to_name: rename file to name
"""
conn = self.get_conn()
return conn.rename(from_name, to_name)
def get_mod_time(self, path: str) -> datetime.datetime:
"""
Returns a datetime object representing the last time the file was modified
:param path: remote file path
"""
conn = self.get_conn()
ftp_mdtm = conn.sendcmd('MDTM ' + path)
time_val = ftp_mdtm[4:]
# time_val optionally has microseconds
try:
return datetime.datetime.strptime(time_val, "%Y%m%d%H%M%S.%f")
except ValueError:
return datetime.datetime.strptime(time_val, '%Y%m%d%H%M%S')
def get_size(self, path: str) -> Optional[int]:
"""
Returns the size of a file (in bytes)
:param path: remote file path
"""
conn = self.get_conn()
size = conn.size(path)
return int(size) if size else None
def test_connection(self) -> Tuple[bool, str]:
"""Test the FTP connection by calling path with directory"""
try:
conn = self.get_conn()
conn.pwd
return True, "Connection successfully tested"
except Exception as e:
return False, str(e)
class FTPSHook(FTPHook):
"""Interact with FTPS."""
def get_conn(self) -> ftplib.FTP:
"""Returns a FTPS connection object."""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
pasv = params.extra_dejson.get("passive", True)
if params.port:
ftplib.FTP_TLS.port = params.port
self.conn = ftplib.FTP_TLS(params.host, params.login, params.password)
self.conn.set_pasv(pasv)
return self.conn
|
|
import MySQLdb
import CFGAPI
import DecisionTree
import socketAPI
import os
class DataConverter:
def matchData(self, src_index, dest_index):
if self.extractedlist[src_index]:
self.requiredlist[dest_index] = self.extractedlist[src_index]
self.ismatched[src_index] = True
self.xmlhead = '<?xml version="1.0" encoding="UTF-8"?>\n<hello xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">\n<capabilities>\n<capability>urn:ietf:params:netconf:base:1.0</capability>\n</capabilities>\n</hello>\n]]>]]>\n<?xml version="1.0" encoding="UTF-8"?>\n<rpc message-id="1" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">\n<edit-config>\n <target>\n<running />\n</target>\n<config>\n<i2nsf-security-policy xmlns="urn:ietf:params:xml:ns:yang:ietf-i2nsf-policy-rule-for-nsf" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">'
self.xmltail = '</i2nsf-security-policy>\n</config>\n</edit-config>\n</rpc>\n]]>]]>\n<?xml version="1.0" encoding="UTF-8"?>\n<rpc message-id="2" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">\n<close-session />\n</rpc>\n]]>]]>'
def setData(self, data, dest_index):
if not self.requiredlist[dest_index]:
self.requiredlist[dest_index] = data
else:
self.requiredlist[dest_index].append(data[0])
def initializeDB(self):
nsfdb = MySQLdb.connect(host="localhost", user="root", passwd="secu", db="nsfdb")
nsfcur = nsfdb.cursor()
nsfcur.execute("DROP TABLE nsftable")
nsfcur.execute("CREATE TABLE nsftable (nname VARCHAR(255), processing VARCHAR(30), outbound VARCHAR(30), inbound VARCHAR(30), initiated VARCHAR(5))")
"""
nsfcur.execute("INSERT INTO nsftable VALUES ('general_firewall', '1000,5000', '1000,5000', '1000,5000', 'False')")
nsfcur.execute("INSERT INTO nsftable VALUES ('time_based_firewall', '1000,5000', '1000,5000', '1000,5000', 'False')")
nsfcur.execute("INSERT INTO nsftable VALUES ('voip_volte_filter', '1000,5000', '1000,5000', '1000,5000', 'False')")
nsfcur.execute("INSERT INTO nsftable VALUES ('web_filter', '1000,5000', '1000,5000', '1000,5000', 'False')")
nsfcur.execute("INSERT INTO nsftable VALUES ('http_and_https_flood_mitigation', '1000,5000', '1000,5000', '1000,5000', 'False')")
"""
nsfcur.execute("DROP TABLE capabilitytable")
nsfcur.execute("CREATE TABLE capabilitytable (nname VARCHAR(255), cname VARCHAR(255))")
"""
nsfcur.execute("INSERT INTO capabilitytable VALUES ('general_firewall', 'ipv4-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('general_firewall', 'tcp-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('general_firewall', 'ingress-action-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('general_firewall', 'egress-action-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('time_based_firewall', 'time-capabilities')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('time_based_firewall', 'ipv4-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('time_based_firewall', 'ingress-action-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('time_based_firewall', 'egress-action-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('voip_volte_filter', 'voip-volte-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('voip_volte_filter', 'ingress-action-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('voip_volte_filter', 'egress-action-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('web_filter', 'http-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('web_filter', 'ingress-action-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('web_filter', 'egress-action-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('http_and_https_flood_mitigation', 'antiddos-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('http_and_https_flood_mitigation', 'ingress-action-capa')")
nsfcur.execute("INSERT INTO capabilitytable VALUES ('http_and_https_flood_mitigation', 'egress-action-capa')")
"""
nsfcur.execute("DROP TABLE fieldtable")
nsfcur.execute("CREATE TABLE fieldtable (cname VARCHAR(255), fieldname VARCHAR(255))")
"""
nsfcur.execute("INSERT INTO fieldtable VALUES ('ipv4-capa', 'range-ipv4-address')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('ipv4-capa', 'exact-ipv4-address')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('ipv4-capa', 'ipv4-protocol')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('tcp-capa', 'exact-tcp-port-num')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('tcp-capa', 'range-tcp-port-num')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('ingress-capa', 'alert')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('ingress-capa', 'drop')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('ingress-capa', 'pass')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('egress-capa', 'alert')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('egress-capa', 'drop')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('egress-capa', 'pass')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('time-capabilities', 'absolute-time')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('time-capabilities', 'periodic-time')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('voip-volte-capa', 'voice-id')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('http-capa', 'url')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('antiddos-capa', 'http-flood-action')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('antiddos-capa', 'https-flood-action')")
nsfcur.execute("INSERT INTO fieldtable VALUES ('antiddos-capa', 'mitigation')")
"""
nsfcur.execute("DROP TABLE endpointtable")
nsfcur.execute("CREATE TABLE endpointtable (ename VARCHAR(255), id INT(1), data VARCHAR(255))")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('sns-websites', 114, 'facebook,instagram')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('sns-websites', 123, 'url-filtering')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('employees', 50, '10.0.0.2')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('employees', 51, '10.0.0.10')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('employees', 43, '10.0.0.2')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('employees', 44, '10.0.0.10')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('malicious-id', 84, '5060,5061')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('malicious-id', 115, '[email protected],[email protected]')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('malicious-id', 123, 'voip-volte')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('webservers', 46, '221.159.112.95')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('webservers', 84, '80,443')")
nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('webservers', 124, 'http_and_https_flood')")
#nsfcur.execute("INSERT INTO endpointtable (ename, id, data) VALUES ('webservers', 124, 'https_flood')")
nsfdb.commit()
nsfcur.close()
nsfdb.close()
print('NSF Database is constructed')
def registerNSF(self, data):
nsfdb = MySQLdb.connect(host="localhost", user="root", passwd="secu", db="nsfdb")
nsfcur = nsfdb.cursor()
nsf_name, processing, outbound, inbound = '', '', '', ''
print(data)
linelist = data.split('\n')
for line in linelist:
if line == '': continue
linetemp = line.split(': ')
if len(linetemp) < 2: continue
capa = linetemp[0]
field = linetemp[1]
#print(capa)
#print(field)
if 'nsf-name' in capa: nsf_name = field
elif 'processing' in capa: processing = field
elif 'Bandwidth Outbound' in capa: outbound = field
elif 'Bandwidth Inbound' in capa: inbound = field
else:
nsfcur.execute("INSERT INTO capabilitytable VALUES ('"+nsf_name+"', '"+capa+"')")
fieldlist = field.split(',')
for field in fieldlist:
nsfcur.execute("INSERT INTO fieldtable VALUES ('"+capa+"', '"+field+"')")
nsfcur.execute("INSERT INTO nsftable VALUES ('"+nsf_name+"', '"+processing+"', '"+outbound+"', '"+inbound+"', 'False')")
nsfdb.commit()
nsfcur.close()
nsfdb.close()
print(nsf_name+": NSF and Capabilities are Registered")
# convert data
def convertData(self):
self.matchData(0, 0) # match policy-name
self.matchData(1, 4) # match rule-name
self.matchData(15, 118) # match packet-per-second
self.matchData(20, 120) # match ingress-action
self.matchData(20, 121) # match egress-action
# match start-time
if self.extractedlist[7]:
self.requiredlist[8].append(self.extractedlist[7][0]+':00Z')
# match end-time
if self.extractedlist[8]:
self.requiredlist[9].append(self.extractedlist[8][0]+':00Z')
# register endpoint information to NSF Database
# need to fill
# match via NSF Database
nsfdb = MySQLdb.connect(host="localhost", user="root", passwd="secu", db="nsfdb")
nsfcur = nsfdb.cursor()
infolen = len(self.extractedinfo)
for i in range(infolen):
if self.ismatched[i]: continue
if self.extractedlist[i]:
nsfcur.execute("SELECT id, data FROM endpointtable WHERE ename='"+self.extractedlist[i][0]+"'")
rows = nsfcur.fetchall()
for ptr in rows:
self.setData(ptr[1].split(','), ptr[0])
nsfcur.close()
nsfdb.close()
def constructDecisionTree(self):
# initialize
nsfdb = MySQLdb.connect(host="localhost", user="root", passwd="secu", db="nsfdb")
nsfcur = nsfdb.cursor()
self.capabilitylist = []
self.learning_input = []
self.learning_output = []
self.nsflist = []
self.nsf_capability = []
# find all registered capabilities
nsfcur.execute("SELECT DISTINCT cname FROM capabilitytable")
rows = nsfcur.fetchall()
rowlen = len(rows)
for ptr in rows:
self.capabilitylist.append(ptr[0])
self.learning_output.append([])
# construct database for decision tree
nsfcur.execute("SELECT DISTINCT nname FROM nsftable")
rows = nsfcur.fetchall()
for ptr in rows:
self.nsflist.append(ptr[0])
for nsf in self.nsflist:
temp_capalist = []
nsfcur.execute("SELECT cname FROM capabilitytable WHERE nname='"+nsf+"'")
rows = nsfcur.fetchall()
for ptr in rows:
temp_capalist.append(ptr[0])
self.nsf_capability.append(temp_capalist)
"""
learning_temp = []
for i in range(len(self.capabilitylist)):
learning_temp.append(False)
self.learning_input.append(learning_temp)
for i in range(len(self.nsf_capability)):
self.learning_output[i].append(False)
"""
for x in range(len(self.nsf_capability)):
learning_temp = []
for i in range(len(self.capabilitylist)):
if self.capabilitylist[i] in self.nsf_capability[x]:
learning_temp.append(True)
else:
learning_temp.append(False)
self.learning_input.append(learning_temp)
for y in range(len(self.nsf_capability)):
self.learning_output[y].append(x==y)
for i in range(len(self.nsf_capability)):
capa_temp = []
for j in range(len(self.nsf_capability[i])):
capa_temp.append(self.nsf_capability[i][j])
for j in range(len(self.nsf_capability)):
if i!=j:
for k in range(len(self.nsf_capability[j])):
if self.nsf_capability[j][k] in capa_temp:
capa_temp.remove(self.nsf_capability[j][k])
learning_temp = []
for j in range(len(self.capabilitylist)):
if self.capabilitylist[j] in capa_temp:
learning_temp.append(True)
else:
learning_temp.append(False)
self.learning_input.append(learning_temp)
for y in range(len(self.nsf_capability)):
self.learning_output[y].append((i==y and len(capa_temp)>0))
# construct Decision Tree
self.dtlist = []
for i in range(len(self.nsf_capability)):
self.dtlist.append(DecisionTree.DecisionTreeNode(self.learning_input, self.learning_output[i], 0.005, self.capabilitylist))
nsfcur.close()
nsfdb.close()
def policyprovisioning(self, cfglist, requestIP, requestPORT):
nsfdb = MySQLdb.connect(host="localhost", user="root", passwd="secu", db="nsfdb")
nsfcur = nsfdb.cursor()
capalen = len(self.capabilitylist)
infolen = len(self.requiredinfo)
nsflen = len(self.nsf_capability)
# vector for investigating policy provisioning
test_input = []
# vector generation
for i in range(capalen):
isExist = False
for j in range(infolen):
if self.requiredlist[j]:
if cfglist[j].isAncestor(self.capabilitylist[i]):
isExist = True
break
else:
nsfcur.execute("SELECT fieldname FROM fieldtable WHERE cname='"+self.capabilitylist[i]+"'")
rows = nsfcur.fetchall()
for ptr in rows:
if cfglist[j].isAncestor(ptr[0]):
isExist = True
break
if isExist: break
test_input.append(isExist)
# endpoint information: exception
if not self.requiredlist[0]:
test_input = []
for i in range(capalen):
test_input.append(False)
# policy provisioning
selectednsfstring = ''
for i in range(nsflen):
isSelected = self.dtlist[i].running(test_input)
if isSelected:
nsfcur.execute("SELECT initiated FROM nsftable WHERE nname='"+self.nsflist[i]+"'")
rows = nsfcur.fetchall()
if rows[0][0] == 'False':
# initiate
print('Initiate NSF: '+self.nsflist[i]+'\n')
selectednsfstring += (self.nsflist[i]+',')
#socketAPI.request_nsf('10.0.0.12', 55560, self.nsflist[i])
nsfcur.execute("UPDATE nsftable SET initiated = 'True' WHERE nname='"+self.nsflist[i]+"'")
nsfdb.commit()
# provide data for required capabilities
requiredlist = []
for j in range(infolen):
isExist = False
for capa in self.nsf_capability[i]:
if cfglist[j].isAncestor(capa):
isExist = True
break
else:
nsfcur.execute("SELECT fieldname FROM fieldtable WHERE cname='"+capa+"'")
rows = nsfcur.fetchall()
for ptr in rows:
if cfglist[j].isAncestor(ptr[0]):
isExist = True
break
if isExist: break
if isExist:
if (j == 120 or j == 121) and ('firewall' in self.nsflist[i]): requiredlist.append(['pass']) # action exception for firewall
elif (j == 50 or j == 51) and (not self.requiredlist[115]): requiredlist.append([]) # dest-ip exception for firewall
elif (j == 43 or j == 44) and (not self.requiredlist[114]): requiredlist.append([]) # src-ip exception for firewall
elif (j == 123 or j == 124) and (not 'firewall' in self.nsflist[i]): requiredlist.append([]) # advanced-security exception for filter
else: requiredlist.append(self.requiredlist[j])
else:
if j == 0 or j == 4: requiredlist.append(self.requiredlist[j]) # name exception for all NSFs
elif (j == 123 or j == 124) and ('firewall' in self.nsflist[i]): requiredlist.append(self.requiredlist[j]) # advanced-security exception for firewall
elif (j == 118) and (not 'firewall' in self.nsflist[i]): requiredlist.append(self.requiredlist[j]) # packet-per-second exception for firewall
else: requiredlist.append([])
# generate and provide low-level policy to NSF
print('Low-level policy for '+self.nsflist[i])
print(self.xmlhead+CFGAPI.generating_policy(cfglist, self.requiredinfo, requiredlist).rstrip()+self.xmltail)
fo = open('./LowLevelPolicy/'+self.nsflist[i]+'.txt', 'w')
fo.write(self.xmlhead+CFGAPI.generating_policy(cfglist, self.requiredinfo, requiredlist).rstrip()+self.xmltail)
fo.close()
fo = open('./LowLevelPolicy/'+self.nsflist[i]+'.xml', 'w')
fo.write(self.xmlhead+CFGAPI.generating_policy(cfglist, self.requiredinfo, requiredlist).rstrip()+self.xmltail)
fo.close()
print('')
if selectednsfstring != '':
socketAPI.request_nsf(requestIP, requestPORT, selectednsfstring[:-1])
#socketAPI.request_nsf('10.0.0.12', 55560, selectednsfstring[:-1])
nsfcur.close()
nsfdb.close()
def __init__(self, extractedinfo, requiredinfo):
self.extractedinfo = extractedinfo
self.requiredinfo = requiredinfo
def inputExtractedData(self, extractedlist):
self.extractedlist = extractedlist
self.requiredlist = []
self.ismatched = []
infolen = len(self.requiredinfo)
for i in range(infolen):
self.requiredlist.append([])
infolen = len(self.extractedinfo)
for i in range(infolen):
self.ismatched.append(False)
|
|
import json, os, datetime, time
from django.shortcuts import render_to_response, get_object_or_404, render
from django.template.loader import render_to_string
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.core.cache import cache, caches, get_cache
import StringIO
from PIL import Image, ImageEnhance
import umemcache
from tilejetutil.base import webmercator_bbox
from tilejetutil.tilemath import flip_y, tms_to_bbox, quadkey_to_tms, tms_to_quadkey, tms_to_geojson
from tilejetutil.nav import getNearbyTiles, getChildrenTiles, getParentTiles
from tilejetutil.tilefactory import blankTile, redTile, solidTile
from tilejetlogs.mongodb import clearLogs, reloadLogs
from tilejetstats.mongodb import clearStats, reloadStats
from tilejetcache.cache import getTileFromCache, get_from_cache, check_cache_availability
from .models import TileService
from tilejetserver.utils import bbox_intersects_source, getYValues, TYPE_TMS, TYPE_TMS_FLIPPED, TYPE_BING, TYPE_WMS, getIPAddress, getValue, url_to_pattern, string_to_list, get_from_file
from tilejetserver.source.utils import getTileOrigins, reloadTileOrigins, getTileSources, reloadTileSources, getTileServices, reloadTileServices, requestTileFromSource
from tilejetserver.utils import logs_tilerequest, formatMemorySize
from tilejetserver.stats import stats_cache, stats_tilerequest
from tilejetserver.logs import logTileRequest, logTileRequestError
from tilejetserver.source.models import TileOrigin,TileSource
from tilejetserver.cache.tasks import taskRequestTile, taskWriteBackTile, taskUpdateStats
from tilejetserver.cache.forms import TileOriginForm, TileSourceForm, TileServiceForm
from tilejetserver.geowatch import provision_client_logs_requests
import json
from bson.json_util import dumps
from geojson import Polygon, Feature, FeatureCollection, GeometryCollection
import time
from geowatchdjango.utils import provision_geowatch_client
def render(request, template='capabilities/services.html', ctx=None, contentType=None):
if not (contentType is None):
return render_to_response(template, RequestContext(request, ctx), content_type=contentType)
else:
return render_to_response(template, RequestContext(request, ctx))
def capabilities_all_xml(request, template='capabilities/capabilities_1_0_0.xml'):
return capabilities_all(request,template,'xml')
def capabilities_all(request, template=None, extension=None):
ctx = {'tileservices': TileService.objects.filter(type__in=[TYPE_TMS,TYPE_TMS_FLIPPED]),'title':'All Tile Services', 'SITEURL': settings.SITEURL,}
if extension=="xml":
if template is None:
template = 'capabilities/capabilities_1_0_0.xml'
return render(request,template,ctx,'text/xml')
else:
if template is None:
template ='capabilities/services.html'
return render(request,template,ctx)
def capabilities_service(request, template='capabilities/capabilities_service_1_0_0.xml', slug=None):
print settings.SITEURL
ctx = {'tileservice': TileService.objects.get(slug=slug), 'SITEURL': settings.SITEURL, }
return render(request,template,ctx,'text/xml')
@login_required
def flush(request):
# Using raw umemcache flush_all function
#defaultcache = umemcache.Client(settings.CACHES['default']['LOCATION'])
#defaultcache.connect()
#defaultcache.flush_all()
#tilecache = umemcache.Client(settings.CACHES['tiles']['LOCATION'])
#tilecache.connect()
#tilecache.flush_all()
#resultscache = umemcache.Client(settings.CACHES['tiles']['LOCATION'])
#resultscache.connect()
#resultscache.flush_all()
#==#
# Using custom clear function from https://github.com/mozilla/django-memcached-pool/blob/master/memcachepool/cache.py
if(check_cache_availability(settings.CACHES['default']['LOCATION'], settings.CACHES['default'])):
defaultcache = caches['default']
defaultcache.clear()
if(check_cache_availability(settings.CACHES['tiles']['LOCATION'], settings.CACHES['tiles'])):
tilecache = caches['tiles']
tilecache.clear()
if(check_cache_availability(settings.CACHES['celery_results']['LOCATION'], settings.CACHES['celery_results'])):
resultscache = caches['celery_results']
resultscache.clear()
return HttpResponse("Tile cache flushed.",
content_type="text/plain"
)
@login_required
def logs_json(request):
logs = logs_tilerequest()
return HttpResponse(dumps(logs),
content_type="application/json"
)
@login_required
def logs_clear(request):
clearLogs(
host = settings.TILEJET_DBHOST,
port = settings.TILEJET_DBPORT,
dbname = settings.TILEJET_DBNAME,
GEVENT_MONKEY_PATCH = True)
return HttpResponse("Logs cleared.",
content_type="text/plain"
)
@login_required
def logs_reload(request):
clearLogs(
host = settings.TILEJET_DBHOST,
port = settings.TILEJET_DBPORT,
dbname = settings.TILEJET_DBNAME,
GEVENT_MONKEY_PATCH = True)
reloadLogs(
settings.TILEJET_LOGS_REQUEST_ROOT,
host = settings.TILEJET_DBHOST,
port = settings.TILEJET_DBPORT,
dbname = settings.TILEJET_DBNAME,
GEVENT_MONKEY_PATCH = True)
return HttpResponse("Logs reloaded from disk.",
content_type="text/plain"
)
def stats_clear(request):
clearStats(
settings.TILEJET_LIST_STATS,
host = settings.TILEJET_DBHOST,
port = settings.TILEJET_DBPORT,
dbname = settings.TILEJET_DBNAME,
GEVENT_MONKEY_PATCH = True)
return HttpResponse("Tile stats cleared.",
content_type="text/plain"
)
def stats_reload(request):
reloadStats(
settings.TILEJET_LIST_STATS,
host = settings.TILEJET_DBHOST,
port = settings.TILEJET_DBPORT,
dbname = settings.TILEJET_DBNAME,
collection_logs = settings.TILEJET_COLLECTION_LOGS,
MONGO_AGG_FLAG = settings.MONGO_AGG_FLAG,
GEVENT_MONKEY_PATCH = True)
taskUpdateStats.apply_async(
args=[],
kwargs=None,
queue="default")
return HttpResponse("Stats updating from MongoDB Logs.",
content_type="text/plain"
)
@login_required
def stats_json(request):
stats = None
if settings.STATS_SAVE_MEMORY:
cache, stats = get_from_cache(
settings.CACHES['default']['LOCATION'],
settings.CACHES['default'],
'default',
'stats_tilerequests',
GEVENT_MONKEY_PATCH=settings.TILEJET_GEVENT_MONKEY_PATCH)
if settings.STATS_SAVE_FILE and not stats:
stats = get_from_file(settings.STATS_REQUEST_FILE, filetype='json')
if not stats:
stats = {}
return HttpResponse(json.dumps(stats),
content_type="application/json"
)
@login_required
def stats_cache_json(request):
stats = {}
target = settings.TILEJET['cache']['memory']['target']
if(check_cache_availability(settings.CACHES[target]['LOCATION'], settings.CACHES[target])):
location = settings.CACHES[target]['LOCATION']
tilecache = umemcache.Client(location)
tilecache.connect()
stats = tilecache.stats()
return HttpResponse(json.dumps(stats),
content_type="application/json"
)
@login_required
def stats_tms(request, t=None, stat=None, z=None, x=None, y=None, u=None, ext=None):
#==#
verbose = True
ix = None
iy = None
iyf = None
iz = None
if u:
iz, ix, iy = quadkey_to_tms(u)
elif x and y and z:
ix = int(x)
iy = int(y)
iz = int(z)
if t == "regular":
ify = flip_y(ix,iy,iz,256,webmercator_bbox)
else:
ify = iy
iy = flip_y(ix,ify,iz,256,webmercator_bbox)
#stats = stats_tilerequest()
cache, stats = get_from_cache('default','stats_tilerequests')
key = z+"/"+x+"/"+y
if not stats:
return None
if not stat:
return None
image = None
if key in stats['global'][stat]:
blue = (256.0 * stats['global'][stat][key]) / stats['tile']['max']
image = solidTile(width=256, height=256, b=int(blue), a=128)
else:
image = blankTile(width=256, height=256)
if image:
response = HttpResponse(content_type="image/png")
image.save(response, "PNG")
return response
else:
return None
def stats_dashboard(request, origin=None, source=None, date=None):
stats = None
if settings.STATS_SAVE_MEMORY:
cache, stats = get_from_cache('default','stats_tilerequests')
if settings.STATS_SAVE_FILE and not stats:
stats = get_from_file(settings.STATS_REQUEST_FILE, filetype='json')
dates = []
if stats:
if 'by_date' in stats:
dates = stats['by_date'].keys()
context_dict = {
'date': date,
'origins': getTileOrigins(),
'sources': getTileSources(),
'dates': dates
}
try:
context_dict['origin'] = TileOrigin.objects.get(name=origin)
except:
context_dict['origin'] = None
try:
context_dict['source'] = TileSource.objects.get(name=source)
except:
context_dict['source'] = None
return render_to_response(
"cache/stats_dashboard.html",
RequestContext(request, context_dict))
@login_required
def stats_map(request, origin=None, source=None, date=None):
stats = None
if settings.STATS_SAVE_MEMORY:
cache, stats = get_from_cache('default','stats_tilerequests')
if settings.STATS_SAVE_FILE and not stats:
stats = get_from_file(settings.STATS_REQUEST_FILE, filetype='json')
dates = []
if stats:
if 'by_date' in stats:
dates = stats['by_date'].keys()
#print stats['by_date_location'].keys()
context_dict = {
'date': date,
'origins': getTileOrigins(),
'sources': getTileSources(),
'dates': dates
}
try:
context_dict['origin'] = TileOrigin.objects.get(name=origin)
except:
context_dict['origin'] = None
try:
context_dict['source'] = TileSource.objects.get(name=source)
except:
context_dict['source'] = None
return render_to_response(
"cache/stats_map_3.html",
RequestContext(request, context_dict))
@login_required
def stats_geojson_source(request, z=None, source=None):
return stats_geojson(request, z=z, source=source)
@login_required
def stats_geojson(request, z=None, origin=None, source=None, date=None):
iz = int(z)
features = []
stats = stats_tilerequest()
root = None
if origin and date:
root = getValue(getValue(stats['by_origin_date_location'],origin),date)
elif source and date:
root = getValue(getValue(stats['by_source_date_location'],source),date)
elif origin:
root = stats['by_origin_location'][origin]
elif source:
root = stats['by_source_location'][source]
elif date:
root = stats['by_date_location'][date]
else:
root = stats['by_location']
i = 0
for key in root:
i = i + 1
t = key.split("/")
tz = int(t[0])
tx = int(t[1])
ty = int(t[2])
if iz == tz:
#count = stats['global'][stat][key]
count = root[key]
geom = tms_to_geojson(tx,ty,tz)
props = {"x":tx, "y":ty, "z":tz, "location": key, "count": count}
features.append( Feature(geometry=geom, id=i, properties=props) )
geojson = FeatureCollection( features )
return HttpResponse(json.dumps(geojson),
content_type="application/json"
)
@login_required
def info(request):
#stats_tr = stats_tilerequest()
#cache, stats_tr = get_from_cache(
# settings.CACHES['default']['LOCATION'],
# settings.CACHES['default'],
# 'default',
# 'stats_tilerequests',
# GEVENT_MONKEY_PATCH=settings.TILEJET_GEVENT_MONKEY_PATCH)
caches = []
c = settings.TILEJET['cache']['memory']
stats_c = stats_cache()
if stats_c:
size = int(stats_c['bytes'])
maxsize = int(stats_c['limit_maxbytes'])
size_percentage = format(((100.0 * size) / maxsize),'.4f')+"%"
items = int(stats_c['curr_items'])
caches.append({
'name': 'memory',
'enabled': c['enabled'],
'description': c['description'],
'type': c['type'],
'size': formatMemorySize(size, original='B'),
'maxsize': formatMemorySize(maxsize, original='B'),
'size_percentage': size_percentage,
'items': items,
'minzoom': c['minZoom'],
'maxzoom': c['maxZoom'],
'expiration': c['expiration'],
'link_memcached': '/cache/stats/export/cache.json'
})
heuristics = []
h = settings.TILEJET['heuristic']['down']
heuristics.append({
'name': 'down',
'enabled': h['enabled'],
'description': h['description']
})
h = settings.TILEJET['heuristic']['up']
heuristics.append({
'name': 'up',
'enabled': h['enabled'],
'description': h['description']
})
h = settings.TILEJET['heuristic']['nearby']
heuristics.append({
'name': 'nearby',
'enabled': h['enabled'],
'description': h['description']
})
# Build Queues List
queues = []
try:
import celery
for key, raw_queues in celery.current_app.control.inspect().active_queues().items():
for q in raw_queues:
queues.append({
'name': getValue(q, u'name', fallback=''),
'routing_key': getValue(q, u'routing_key', fallback=''),
'durable': getValue(q, u'durable', fallback=False),
'ttl': getValue(q[u'queue_arguments'], u'x-message-ttl', fallback=-1)
})
#import pyrabbit.api
#pyrabbit_client = pyrabbit.api.Client(settings.BROKER_DOMAIN+':'+settings.BROKER_PORT, settings.BROKER_USER, settings.BROKER_PASSWORD)
for q in queues:
q['messages'] = 0
except:
print "Could not generate queues. Is celery or RabbitMQ offline?"
# Build Schedules Tasks
scheduled = []
try:
import celery
s = beat.Scheduler(app=celery.current_app)
scheduled = s.schedule.keys()
except:
print "Could not build scheduled tasks. Is celery beat running?"
#topics = []
#try:
# from kafka import KafkaClient
# kafka = KafkaClient(settings.TILEJET_GEOWATCH_HOST)
# for topic in kafka.topics:
# topic2 = {
# 'name': topic,
# 'partitions': len(kafka.topic_partitions.get(topic, []))
# }
# topics.append(topic2)
#except:
# print "Could not generate topics. Is Kafka offline?"
client = provision_geowatch_client()
topics = client.list_topics()
context_dict = {
'origins': getTileOrigins(),
'sources': getTileSources(),
'caches': caches,
'heuristics': heuristics,
'queues': queues,
'topics': topics,
'scheduled': scheduled,
'stats': settings.TILEJET_LIST_STATS,
'hosts': settings.PROXY_ALLOWED_HOSTS
}
return render_to_response(
"cache/info.html",
RequestContext(request, context_dict))
@login_required
def origins_list(request):
#cache, stats = get_from_cache('default','stats_tilerequests')
context_dict = {
'origins': getTileOrigins()
}
return render_to_response(
"cache/origins_list.html",
RequestContext(request, context_dict))
@login_required
def sources_list(request):
#cache, stats = get_from_cache('default','stats_tilerequests')
context_dict = {
'sources': getTileSources()
}
return render_to_response(
"cache/sources_list.html",
RequestContext(request, context_dict))
@login_required
def services_list(request):
#cache, stats = get_from_cache('default','stats_tilerequests')
context_dict = {
'services': TileService.objects.all().order_by('name','type'),
}
return render_to_response(
"cache/services_list.html",
RequestContext(request, context_dict))
@login_required
def origins_new(request, template="cache/origins_edit.html"):
if request.method == "POST":
origin_form = TileOriginForm(request.POST)
if origin_form.is_valid():
origin_form.save()
reloadTileOrigins(proxy=False)
reloadTileOrigins(proxy=True)
###
context_dict = {
'origin_form': TileOriginForm()
}
return HttpResponseRedirect(reverse('origins_list',args=()))
else:
context_dict = {
'origin_form': TileOriginForm()
}
return render_to_response(
template,
RequestContext(request, context_dict))
@login_required
def origins_edit(request, origin=None, template="cache/origins_edit.html"):
if request.method == "POST":
instance = TileOrigin.objects.get(name=origin)
origin_form = TileOriginForm(request.POST,instance=instance)
if origin_form.is_valid():
origin_form.save()
reloadTileOrigins(proxy=False)
reloadTileOrigins(proxy=True)
###
context_dict = {
'origin': instance,
'origin_form': TileOriginForm(instance=instance)
}
return HttpResponseRedirect(reverse('origins_list',args=()))
else:
instance = TileOrigin.objects.get(name=origin)
context_dict = {
'origin': instance,
'origin_form': TileOriginForm(instance=instance)
}
return render_to_response(
template,
RequestContext(request, context_dict))
@login_required
def sources_new(request, origin=None, template="cache/sources_edit.html"):
if request.method == "POST":
source_form = TileSourceForm(request.POST)
if source_form.is_valid():
source_form.save()
reloadTileSources(proxy=False)
reloadTileSources(proxy=True)
###
context_dict = {
'source_form': TileSourceForm()
}
return HttpResponseRedirect(reverse('sources_list',args=()))
else:
return HttpResponse(
'An unknown error has occured.'+json.dumps(source_form.errors),
content_type="text/plain",
status=401
)
else:
source_form = None
if origin:
origin_object = TileOrigin.objects.get(name=origin)
if origin_object.multiple:
source_form = TileSourceForm(initial={'origin': origin_object, 'auto': False, 'type': origin_object.type, 'url': origin_object.url, 'extensions': [u'png']})
else:
source_form = TileSourceForm(initial={'origin': origin_object, 'auto': False, 'type': origin_object.type, 'url': origin_object.url, 'extensions': [u'png']})
else:
source_form = TileSourceForm()
context_dict = {
'source_form': source_form
}
return render_to_response(
template,
RequestContext(request, context_dict))
@login_required
def sources_edit(request, source=None, template="cache/sources_edit.html"):
if request.method == "POST":
instance = TileSource.objects.get(name=source)
source_form = TileSourceForm(request.POST,instance=instance)
if source_form.is_valid():
source_form.save()
reloadTileSources(proxy=False)
reloadTileSources(proxy=True)
###
context_dict = {
'source': instance,
'source_form': TileSourceForm(instance=instance)
}
return HttpResponseRedirect(reverse('sources_list',args=()))
else:
return HttpResponse(
'An unknown error has occured.',
content_type="text/plain",
status=401
)
else:
instance = TileSource.objects.get(name=source)
context_dict = {
'source': instance,
'source_form': TileSourceForm(instance=instance)
}
return render_to_response(
template,
RequestContext(request, context_dict))
def sources_delete(request, source=None, template="cache/sources_delete.html"):
if request.method == "POST":
instance = TileSource.objects.get(name=source)
if instance:
instance.delete()
return HttpResponseRedirect(reverse('sources_list',args=()))
else:
return HttpResponse(
'Could not find source with name '+name,
content_type="text/plain",
status=401
)
else:
instance = TileSource.objects.get(name=source)
context_dict = {
'source': instance
}
return render_to_response(
template,
RequestContext(request, context_dict))
@login_required
def services_new(request, source=None, template="cache/services_edit.html"):
if request.method == "POST":
service_form = TileServiceForm(request.POST)
if service_form.is_valid():
service_form.save()
###
context_dict = {
'service_form': TileServiceForm()
}
return HttpResponseRedirect(reverse('services_list',args=()))
else:
service_form = None
if source:
source_object = TileSource.objects.get(name=source)
service_form = TileServiceForm(initial={'source': source_object, 'name': source_object.name, 'description': source_object.description, 'type': source_object.type, 'url': '/cache/tms/', 'extensions': [u'png']})
else:
service_form = TileServiceForm()
context_dict = {
'service_form': service_form
}
return render_to_response(
template,
RequestContext(request, context_dict))
@login_required
def services_edit(request, service=None, template="cache/services_edit.html"):
if request.method == "POST":
instance = TileService.objects.get(name=service)
service_form = TileServiceForm(request.POST,instance=instance)
if service_form.is_valid():
service_form.save()
###
context_dict = {
'service': instance,
'service_form': TileServiceForm(instance=instance)
}
return HttpResponseRedirect(reverse('services_list',args=()))
else:
return HttpResponse(
'An unknown error has occured.',
content_type="text/plain",
status=401
)
else:
instance = TileService.objects.get(name=service)
context_dict = {
'service': instance,
'service_form': TileServiceForm(instance=instance)
}
return render_to_response(
template,
RequestContext(request, context_dict))
def services_delete(request, service=None, template="cache/services_delete.html"):
if request.method == "POST":
instance = TileService.objects.get(name=service)
if instance:
instance.delete()
return HttpResponseRedirect(reverse('services_list',args=()))
else:
return HttpResponse(
'Could not find service with name '+name,
content_type="text/plain",
status=401
)
else:
instance = TileService.objects.get(name=service)
context_dict = {
'service': instance
}
return render_to_response(
template,
RequestContext(request, context_dict))
@login_required
def origins_json(request):
now = datetime.datetime.now()
dt = now
#######
#stats = stats_tilerequest()
cache, stats = get_from_cache(
settings.CACHES['default']['LOCATION'],
settings.CACHES['default'],
'default',
'stats_tilerequests',
GEVENT_MONKEY_PATCH=settings.TILEJET_GEVENT_MONKEY_PATCH)
origins = []
for origin in TileOrigin.objects.all().order_by('name','type'):
link_geojson = settings.SITEURL+'cache/stats/export/geojson/15/origin/'+origin.name+'.geojson'
if stats:
origins.append({
'name': origin.name,
'description': origin.description,
'type': origin.type_title(),
'multiple': origin.multiple,
'auto': origin.auto,
'url': origin.url,
'requests_all': getValue(stats['by_origin'], origin.name,0),
'requests_year': getValue(getValue(stats['by_year_origin'],dt.strftime('%Y')),origin.name, 0),
'requests_month': getValue(getValue(stats['by_month_origin'],dt.strftime('%Y-%m')),origin.name, 0),
'requests_today': getValue(getValue(stats['by_date_origin'],dt.strftime('%Y-%m-%d')),origin.name, 0),
'link_geojson': link_geojson,
'link_geojsonio': 'http://geojson.io/#data=data:text/x-url,'+link_geojson
})
else:
origins.append({
'name': origin.name,
'description': origin.description,
'type': origin.type_title(),
'multiple': origin.multiple,
'auto': origin.auto,
'url': origin.url,
'requests_all': 0,
'requests_year': 0,
'requests_month': 0,
'requests_today': 0,
'link_geojson': link_geojson,
'link_geojsonio': 'http://geojson.io/#data=data:text/x-url,'+link_geojson
})
return HttpResponse(json.dumps(origins),
content_type="application/json"
)
@login_required
def sources_json(request):
now = datetime.datetime.now()
dt = now
#######
stats = None
if settings.STATS_SAVE_MEMORY:
cache, stats = get_from_cache(
settings.CACHES['default']['LOCATION'],
settings.CACHES['default'],
'default',
'stats_tilerequests',
GEVENT_MONKEY_PATCH=settings.TILEJET_GEVENT_MONKEY_PATCH)
if settings.STATS_SAVE_FILE and not stats:
stats = get_from_file(settings.STATS_REQUEST_FILE, filetype='json')
sources = []
#for source in TileSource.objects.all().order_by('name'):
for source in getTileSources():
link_geojson = settings.SITEURL+'cache/stats/export/geojson/15/source/'+source['name']+'.geojson'
link_proxy_internal = settings.SITEURL+'proxy/?url='+(source['url']).replace("{ext}","png")
link_proxy_external = ""
if source['type'] in [TYPE_TMS, TYPE_TMS_FLIPPED]:
link_proxy_external = settings.SITEURL+'cache/proxy/tms/origin/'+source['origin']+'/source/'+source['name']+'/{z}/{x}/{y}.png'
elif source['type'] == TYPE_BING:
link_proxy_external = settings.SITEURL+'cache/proxy/bing/origin/'+source['origin']+'/source/'+source['name']+'{u}.png'
if stats:
sources.append({
'name': source['name'],
'type': source['type_title'],
'origin': source['origin'],
'url': source['url'],
'requests_all': getValue(stats['by_source'], source['name'],0),
'requests_year': getValue(getValue(stats['by_year_source'],dt.strftime('%Y')),source['name'], 0),
'requests_month': getValue(getValue(stats['by_month_source'],dt.strftime('%Y-%m')),source['name'], 0),
'requests_today': getValue(getValue(stats['by_date_source'],dt.strftime('%Y-%m-%d')),source['name'], 0),
'link_proxy': link_proxy_internal,
'link_id': 'http://www.openstreetmap.org/edit#?background=custom:'+link_proxy_external,
'link_geojson': link_geojson,
'link_geojsonio': 'http://geojson.io/#data=data:text/x-url,'+link_geojson
})
else:
sources.append({
'name': source['name'],
'type': source['type_title'],
'origin': source['origin'],
'url': source['url'],
'requests_all': -1,
'requests_year': -1,
'requests_month': -1,
'requests_today': -1,
'link_proxy': link_proxy_internal,
'link_id': 'http://www.openstreetmap.org/edit#?background=custom:'+link_proxy_external,
'link_geojson': link_geojson,
'link_geojsonio': 'http://geojson.io/#data=data:text/x-url,'+link_geojson
})
return HttpResponse(json.dumps(sources),
content_type="application/json"
)
@login_required
def services_json(request):
now = datetime.datetime.now()
dt = now
#######
#stats = stats_tilerequest()
services = []
for service in TileService.objects.all().order_by('name'):
# link_geojson = settings.SITEURL+'cache/stats/export/geojson/15/source/'+source.name+'.geojson'
#link_proxy = settings.SITEURL+'cache/tms/proxy/?url='+(source.url).replace("{ext}","png")
link_proxy = service.url
services.append({
'name': service.name,
'type': service.type_title(),
'source': service.source.name,
'url': service.url,
# 'requests_all': getValue(stats['by_source'], source.name,0),
# 'requests_year': getValue(getValue(stats['by_year_source'],dt.strftime('%Y')),source.name, 0),
# 'requests_month': getValue(getValue(stats['by_month_source'],dt.strftime('%Y-%m')),source.name, 0),
# 'requests_today': getValue(getValue(stats['by_date_source'],dt.strftime('%Y-%m-%d')),source.name, 0),\
'link_proxy': link_proxy,
'link_id': 'http://www.openstreetmap.org/edit#?background=custom:'+link_proxy,
# 'link_geojson': link_geojson,
# 'link_geojsonio': 'http://geojson.io/#data=data:text/x-url,'+link_geojson
})
return HttpResponse(json.dumps(services),
content_type="application/json"
)
@login_required
def tile_tms(request, slug=None, z=None, x=None, y=None, u=None, ext=None):
match_tileservice = None
tileservices = getTileServices()
for tileservice in tileservices:
if tileservice['name'] == slug:
match_tileservice = tileservice
break
if match_tileservice:
match_tilesource = None
tilesources = getTileSources()
for tilesource in tilesources:
if tilesource['name'] == tileservice['source']:
match_tilesource = tilesource
break
if match_tilesource:
return _requestTile(request,tileservice=match_tileservice,tilesource=match_tilesource,z=z,x=x,y=y,u=u,ext=ext)
else:
return HttpResponse(RequestContext(request, {}), status=404)
else:
return HttpResponse(RequestContext(request, {}), status=404)
def requestIndirectTiles(tilesource, ext, tiles, now):
if tiles:
for t in tiles:
tx, ty, tz = t
#taskRequestTile.delay(tilesource.id, tz, tx, ty, ext)
args = [tilesource['id'], tz, tx, ty, ext]
#Expires handled by global queue setting
try:
taskRequestTile.apply_async(args=args, kwargs=None, queue="requests")
except:
print "Error: Could not connect to indirect request queue."
line = "Error: Could not connect to indirect request queue."
logTileRequestError(line, now)
def _requestTile(request, tileservice=None, tilesource=None, tileorigin=None, z=None, x=None, y=None, u=None, ext=None):
print "_requestTile"
now = datetime.datetime.now()
ip = getIPAddress(request)
#==#
if not tileorigin:
tileorigin = tilesource['origin']
#==#
verbose = True
ix = None
iy = None
iyf = None
iz = None
indirectTiles = None
nearbyTiles = None
parentTiles = None
childrenTiles = None
gw_client, gw_logs, gw_requests = None, None, None
if settings.GEOWATCH_ENABLED:
gw_client, gw_logs, gw_requests = provision_client_logs_requests()
if u:
iz, ix, iy = quadkey_to_tms(u)
elif x and y and z:
ix = int(x)
iy = int(y)
iz = int(z)
if tilesource['type'] == TYPE_BING:
u = tms_to_quadkey(ix, iy, iz)
iy, iyf = getYValues(tileservice,tilesource,ix,iy,iz)
tile_bbox = tms_to_bbox(ix,iy,iz)
if tilesource['cacheable']:
indirectTiles = []
if settings.TILEJET['heuristic']['nearby']['enabled']:
ir = settings.TILEJET['heuristic']['nearby']['radius']
nearbyTiles = getNearbyTiles(ix, iy, iz, ir)
indirectTiles.extend(nearbyTiles)
#print "Nearby Tiles", nearbyTiles
#print "Indirect Tiles", indirectTiles
if settings.TILEJET['heuristic']['up']['enabled']:
iDepth = getValue(settings.TILEJET['heuristic']['up'],'depth')
if iDepth:
parentTiles = getParentTiles(ix, iy, iz, depth=iDepth)
else:
parentTiles = getParentTiles(ix, iy, iz)
indirectTiles.extend(parentTiles)
#print "Parent Tiles"
#print parentTiles
heuristic_down = settings.TILEJET['heuristic']['down']
if heuristic_down['enabled']:
depth = heuristic_down['depth']
minZoom = heuristic_down['minZoom']
maxZoom = heuristic_down['maxZoom']
childrenTiles = getChildrenTiles(ix, iy, iz, depth, minZoom, maxZoom)
indirectTiles.extend(childrenTiles)
#print "Children Tiles: "+str(len(childrenTiles))
#print childrenTiles
#print "indirectTiles: ", indirectTiles
if gw_requests and indirectTiles:
start = time.time()
gw_requests.send_tile_requests(
str(tilesource['id']),
indirectTiles,
extension=ext,
now=now)
print "Duration Q: ", (time.time() - start)
#Check if requested tile is within source's extents
returnBlankTile = False
returnErrorTile = False
intersects = True
if tilesource['extents']:
intersects = bbox_intersects_source(tilesource,ix,iyf,iz)
if not intersects:
returnBlankTile = True
validZoom = 0
#Check if inside source zoom levels
if tilesource['minZoom'] or tilesource['maxZoom']:
if (tilesource['minZoom'] and iz < tilesource['minZoom']):
validZoom = -1
elif (tilesource['maxZoom'] and iz > tilesource['maxZoom']):
validZoom = 1
if validZoom != 0:
#returnBlank = True
returnErrorTile = True
if returnBlankTile:
print "responding with blank image"
image = blankTile(width=256, height=256)
response = HttpResponse(content_type="image/png")
image.save(response, "PNG")
return response
if returnErrorTile:
print "responding with a red image"
image = redTile(width=256, height=256)
response = HttpResponse(content_type="image/png")
image.save(response, "PNG")
return response
tile = None
if tilesource['cacheable'] and iz >= settings.TILEJET['cache']['memory']['minZoom'] and iz <= settings.TILEJET['cache']['memory']['maxZoom']:
#key = "{layer},{z},{x},{y},{ext}".format(layer=tilesource.name,x=ix,y=iy,z=iz,ext=ext)
key = ",".join([tilesource['name'],str(iz),str(ix),str(iy),ext])
tilecache, tile = getTileFromCache(
settings.CACHES['tiles']['LOCATION'],
settings.CACHES['tiles'],
'tiles',
key,
True,
GEVENT_MONKEY_PATCH=True)
if not tilecache:
print "Error: Could not connect to cache (tiles)."
line = "Error: Could not connect to cache (tiles)."
logTileRequestError(line, now)
if tile:
if verbose:
print "cache hit for "+key
logTileRequest(tileorigin, tilesource['name'], x, y, z, ext, 'hit', now, ip, gw_logs=gw_logs)
else:
if tilecache and verbose:
print "cache miss for "+key
logTileRequest(tileorigin, tilesource['name'], x, y, z, ext, 'miss', now, ip, gw_logs=gw_logs)
if tilesource['type'] == TYPE_TMS:
tile = requestTileFromSource(tilesource=tilesource,x=ix,y=iy,z=iz,ext=ext,verbose=True)
elif tilesource['type'] == TYPE_TMS_FLIPPED:
tile = requestTileFromSource(tilesource=tilesource,x=ix,y=iyf,z=iz,ext=ext,verbose=True)
elif tilesource['type'] == TYPE_BING:
tile = requestTileFromSource(tilesource=tilesource,u=u,ext=ext,verbose=True)
if settings.ASYNC_WRITEBACK:
from base64 import b64encode
try:
taskWriteBackTile.apply_async(
args=[key, json.dumps(tile['headers']), b64encode(tile['data'])],
kwargs=None,
queue="writeback")
except:
print "Error: Could not connect to writeback queue."
line = "Error: Could not connect to writeback queue."
logTileRequestError(line, now)
else:
try:
tilecache.set(key, tile)
except:
print "Error: Could not write back tile synchronously."
line = "Error: Could not write back tile synchronously."
logTileRequestError(line, now)
else:
if verbose:
print "cache bypass for "+tilesource['name']+"/"+str(iz)+"/"+str(ix)+"/"+str(iy)
logTileRequest(tileorigin, tilesource['name'], x, y, z, ext, 'bypass', now, ip, gw_logs=gw_logs)
if tilesource['type'] == TYPE_TMS:
tile = requestTileFromSource(tilesource=tilesource,x=ix,y=iy,z=iz,ext=ext,verbose=True)
elif tilesource['type'] == TYPE_TMS_FLIPPED:
tile = requestTileFromSource(tilesource=tilesource,x=ix,y=iyf,z=iz,ext=ext,verbose=True)
elif tilesource['type'] == TYPE_BING:
tile = requestTileFromSource(tilesource=tilesource,u=u,ext=ext,verbose=True)
if not tile:
print "responding with a red image"
image = redTile(width=256, height=256)
response = HttpResponse(content_type="image/png")
image.save(response, "PNG")
return response
#print "Headers:"
#print tile['headers']
image = Image.open(StringIO.StringIO(tile['data']))
#Is Tile blank. then band.getextrema should return 0,0 for band 4
#Tile Cache watermarking is messing up bands
#bands = image.split()
#for band in bands:
# print band.getextrema()
response = HttpResponse(content_type="image/png")
image.save(response, "PNG")
return response
def proxy_tms(request, origin=None, slug=None, z=None, x=None, y=None, u=None, ext=None):
#starttime = time.clock()
# Check Existing Tile Sourcesi
match_tilesource = None
tilesources = getTileSources(proxy=True)
for tilesource in tilesources:
if tilesource['name'] == slug:
match_tilesource = tilesource
break
if match_tilesource:
if match_tilesource['origin'] != origin:
print "Origin is not correct. Tilesource is unique, but origin need to match too."
print tilesource['origin']
return None
else:
tile = _requestTile(
request,
tileservice=None,
tilesource=match_tilesource,
tileorigin=match_tilesource['origin'],
z=z,x=x,y=y,u=u,ext=ext)
#print "Time Elapsed: "+str(time.clock()-starttime)
return tile
# Check Existing Tile Origins to see if we need to create a new tile source
match_tileorigin = None
if origin:
tileorigins = getTileOrigins(proxy=True)
for tileorigin in tileorigins:
if tileorigin.name == origin:
match_tileorigin = tileorigin
break
if match_tileorigin:
to = match_tileorigin
if to.multiple:
ts_url = to.url.replace('{slug}', slug)
if TileSource.objects.filter(url=ts_url).count() > 0:
print "Error: This souldn't happen. You should have matched the tilesource earlier so you don't duplicate"
return None
exts = string_to_list(to.extensions)
ts_pattern = url_to_pattern(ts_url, extensions=exts)
ts = TileSource(auto=True,url=ts_url,pattern=ts_pattern,name=slug,type=to.type,extensions=exts,origin=to)
ts.save()
reloadTileSources(proxy=False)
reloadTileSources(proxy=True)
return _requestTile(request,tileservice=None,tilesource=tilesource,z=z,x=x,y=y,u=u,ext=ext)
else:
ts = TileSource(auto=True,url=to.url,pattern=to.pattern,name=to.name,type=to.type,extensions=to.extensions)
ts.save()
reloadTileSources(proxy=False)
reloadTileSources(proxy=True)
return _requestTile(request,tileservice=None,tilesource=tilesource,z=z,x=x,y=y,u=u,ext=ext)
else:
return None
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import urlparse
from keystoneclient import exceptions
from keystoneclient import httpclient
_logger = logging.getLogger(__name__)
class Client(httpclient.HTTPClient):
"""Client for the OpenStack Keystone pre-version calls API.
:param string endpoint: A user-supplied endpoint URL for the keystone
service.
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
Example::
>>> from keystoneclient.generic import client
>>> root = client.Client(auth_url=KEYSTONE_URL)
>>> versions = root.discover()
...
>>> from keystoneclient.v2_0 import client as v2client
>>> keystone = v2client.Client(auth_url=versions['v2.0']['url'])
...
>>> user = keystone.users.get(USER_ID)
>>> user.delete()
"""
def __init__(self, endpoint=None, **kwargs):
"""Initialize a new client for the Keystone v2.0 API."""
super(Client, self).__init__(endpoint=endpoint, **kwargs)
self.endpoint = endpoint
def discover(self, url=None):
"""Discover Keystone servers and return API versions supported.
:param url: optional url to test (without version)
Returns::
{
'message': 'Keystone found at http://127.0.0.1:5000/',
'v2.0': {
'status': 'beta',
'url': 'http://127.0.0.1:5000/v2.0/',
'id': 'v2.0'
},
}
"""
if url:
return self._check_keystone_versions(url)
else:
return self._local_keystone_exists()
def _local_keystone_exists(self):
"""Checks if Keystone is available on default local port 35357."""
return self._check_keystone_versions("http://localhost:35357")
def _check_keystone_versions(self, url):
"""Calls Keystone URL and detects the available API versions."""
try:
client = httpclient.HTTPClient()
resp, body = client.request(url, "GET",
headers={'Accept':
'application/json'})
# Multiple Choices status code is returned by the root
# identity endpoint, with references to one or more
# Identity API versions -- v3 spec
# some cases we get No Content
if resp.status_code in (200, 204, 300):
try:
results = {}
if 'version' in body:
results['message'] = "Keystone found at %s" % url
version = body['version']
# Stable/diablo incorrect format
id, status, version_url = \
self._get_version_info(version, url)
results[str(id)] = {"id": id,
"status": status,
"url": version_url}
return results
elif 'versions' in body:
# Correct format
results['message'] = "Keystone found at %s" % url
for version in body['versions']['values']:
id, status, version_url = \
self._get_version_info(version, url)
results[str(id)] = {"id": id,
"status": status,
"url": version_url}
return results
else:
results['message'] = ("Unrecognized response from %s"
% url)
return results
except KeyError:
raise exceptions.AuthorizationFailure()
elif resp.status_code == 305:
return self._check_keystone_versions(resp['location'])
else:
raise exceptions.from_response(resp, "GET", url)
except Exception as e:
_logger.exception(e)
def discover_extensions(self, url=None):
"""Discover Keystone extensions supported.
:param url: optional url to test (should have a version in it)
Returns::
{
'message': 'Keystone extensions at http://127.0.0.1:35357/v2',
'OS-KSEC2': 'OpenStack EC2 Credentials Extension',
}
"""
if url:
return self._check_keystone_extensions(url)
def _check_keystone_extensions(self, url):
"""Calls Keystone URL and detects the available extensions."""
try:
client = httpclient.HTTPClient()
if not url.endswith("/"):
url += '/'
resp, body = client.request("%sextensions" % url, "GET",
headers={'Accept':
'application/json'})
if resp.status_code in (200, 204): # some cases we get No Content
try:
results = {}
if 'extensions' in body:
if 'values' in body['extensions']:
# Parse correct format (per contract)
for extension in body['extensions']['values']:
alias, name = \
self._get_extension_info(
extension['extension']
)
results[alias] = name
return results
else:
# Support incorrect, but prevalent format
for extension in body['extensions']:
alias, name = \
self._get_extension_info(extension)
results[alias] = name
return results
else:
results['message'] = ("Unrecognized extensions "
"response from %s" % url)
return results
except KeyError:
raise exceptions.AuthorizationFailure()
elif resp.status_code == 305:
return self._check_keystone_extensions(resp['location'])
else:
raise exceptions.from_response(
resp, "GET", "%sextensions" % url)
except Exception as e:
_logger.exception(e)
@staticmethod
def _get_version_info(version, root_url):
"""Parses version information.
:param version: a dict of a Keystone version response
:param root_url: string url used to construct
the version if no URL is provided.
:returns: tuple - (verionId, versionStatus, versionUrl)
"""
id = version['id']
status = version['status']
ref = urlparse.urljoin(root_url, id)
if 'links' in version:
for link in version['links']:
if link['rel'] == 'self':
ref = link['href']
break
return (id, status, ref)
@staticmethod
def _get_extension_info(extension):
"""Parses extension information.
:param extension: a dict of a Keystone extension response
:returns: tuple - (alias, name)
"""
alias = extension['alias']
name = extension['name']
return (alias, name)
|
|
import copy
import functools
import json
import logging
import random
import tempfile
from collections import namedtuple
from ruamel.yaml.comments import CommentedSeq, CommentedMap
import schema_salad.validate as validate
from schema_salad.sourceline import SourceLine, cmap
from typing import Any, Callable, cast, Generator, Iterable, List, Text, Union
from . import draft2tool
from . import expression
from .errors import WorkflowException
from .load_tool import load_tool
from .process import Process, shortname, uniquename
from .utils import aslist
_logger = logging.getLogger("cwltool")
WorkflowStateItem = namedtuple('WorkflowStateItem', ['parameter', 'value', 'success'])
def defaultMakeTool(toolpath_object, # type: Dict[Text, Any]
**kwargs # type: Any
):
# type: (...) -> Process
if not isinstance(toolpath_object, dict):
raise WorkflowException(u"Not a dict: `%s`" % toolpath_object)
if "class" in toolpath_object:
if toolpath_object["class"] == "CommandLineTool":
return draft2tool.CommandLineTool(toolpath_object, **kwargs)
elif toolpath_object["class"] == "ExpressionTool":
return draft2tool.ExpressionTool(toolpath_object, **kwargs)
elif toolpath_object["class"] == "Workflow":
return Workflow(toolpath_object, **kwargs)
raise WorkflowException(
u"Missing or invalid 'class' field in %s, expecting one of: CommandLineTool, ExpressionTool, Workflow" %
toolpath_object["id"])
def findfiles(wo, fn=None): # type: (Any, List) -> List[Dict[Text, Any]]
if fn is None:
fn = []
if isinstance(wo, dict):
if wo.get("class") == "File":
fn.append(wo)
findfiles(wo.get("secondaryFiles", None), fn)
else:
for w in wo.values():
findfiles(w, fn)
elif isinstance(wo, list):
for w in wo:
findfiles(w, fn)
return fn
def match_types(sinktype, src, iid, inputobj, linkMerge, valueFrom):
# type: (Union[List[Text],Text], WorkflowStateItem, Text, Dict[Text, Any], Text, Text) -> bool
if isinstance(sinktype, list):
# Sink is union type
for st in sinktype:
if match_types(st, src, iid, inputobj, linkMerge, valueFrom):
return True
elif isinstance(src.parameter["type"], list):
# Source is union type
# Check that every source type is compatible with the sink.
for st in src.parameter["type"]:
srccopy = copy.deepcopy(src)
srccopy.parameter["type"] = st
if not match_types(st, srccopy, iid, inputobj, linkMerge, valueFrom):
return False
return True
elif linkMerge:
if iid not in inputobj:
inputobj[iid] = []
if linkMerge == "merge_nested":
inputobj[iid].append(src.value)
elif linkMerge == "merge_flattened":
if isinstance(src.value, list):
inputobj[iid].extend(src.value)
else:
inputobj[iid].append(src.value)
else:
raise WorkflowException(u"Unrecognized linkMerge enum '%s'" % linkMerge)
return True
elif valueFrom is not None or can_assign_src_to_sink(src.parameter["type"], sinktype) or sinktype == "Any":
# simply assign the value from state to input
inputobj[iid] = copy.deepcopy(src.value)
return True
return False
def check_types(srctype, sinktype, linkMerge, valueFrom):
# type: (Union[List[Text],Text], Union[List[Text],Text], Text, Text) -> Text
"""Check if the source and sink types are "pass", "warning", or "exception".
"""
if valueFrom:
return "pass"
elif not linkMerge:
if can_assign_src_to_sink(srctype, sinktype, strict=True):
return "pass"
elif can_assign_src_to_sink(srctype, sinktype, strict=False):
return "warning"
else:
return "exception"
else:
if not isinstance(sinktype, dict):
return "exception"
elif linkMerge == "merge_nested":
return check_types(srctype, sinktype["items"], None, None)
elif linkMerge == "merge_flattened":
if not isinstance(srctype, dict):
return check_types(srctype, sinktype["items"], None, None)
else:
return check_types(srctype, sinktype, None, None)
else:
raise WorkflowException(u"Unrecognized linkMerge enum '%s'" % linkMerge)
def can_assign_src_to_sink(src, sink, strict=False): # type: (Any, Any, bool) -> bool
"""Check for identical type specifications, ignoring extra keys like inputBinding.
src: admissible source types
sink: admissible sink types
In non-strict comparison, at least one source type must match one sink type.
In strict comparison, all source types must match at least one sink type.
"""
if sink == "Any":
return True
if isinstance(src, dict) and isinstance(sink, dict):
if src["type"] == "array" and sink["type"] == "array":
return can_assign_src_to_sink(src["items"], sink["items"], strict)
elif src["type"] == "record" and sink["type"] == "record":
return _compare_records(src, sink, strict)
return False
elif isinstance(src, list):
if strict:
for t in src:
if not can_assign_src_to_sink(t, sink):
return False
return True
else:
for t in src:
if can_assign_src_to_sink(t, sink):
return True
return False
elif isinstance(sink, list):
for t in sink:
if can_assign_src_to_sink(src, t):
return True
return False
else:
return src == sink
def _compare_records(src, sink, strict=False):
# type: (Dict[Text, Any], Dict[Text, Any], bool) -> bool
"""Compare two records, ensuring they have compatible fields.
This handles normalizing record names, which will be relative to workflow
step, so that they can be compared.
"""
def _rec_fields(rec): # type: (Dict[Text, Any]) -> Dict[Text, Any]
out = {}
for field in rec["fields"]:
name = shortname(field["name"])
out[name] = field["type"]
return out
srcfields = _rec_fields(src)
sinkfields = _rec_fields(sink)
for key in sinkfields.iterkeys():
if (not can_assign_src_to_sink(
srcfields.get(key, "null"), sinkfields.get(key, "null"), strict)
and sinkfields.get(key) is not None):
_logger.info("Record comparison failure for %s and %s\n"
"Did not match fields for %s: %s and %s" %
(src["name"], sink["name"], key, srcfields.get(key),
sinkfields.get(key)))
return False
return True
def object_from_state(state, parms, frag_only, supportsMultipleInput, sourceField, incomplete=False):
# type: (Dict[Text, WorkflowStateItem], List[Dict[Text, Any]], bool, bool, Text, bool) -> Dict[Text, Any]
inputobj = {} # type: Dict[Text, Any]
for inp in parms:
iid = inp["id"]
if frag_only:
iid = shortname(iid)
if sourceField in inp:
if (isinstance(inp[sourceField], list) and not
supportsMultipleInput):
raise WorkflowException(
"Workflow contains multiple inbound links to a single "
"parameter but MultipleInputFeatureRequirement is not "
"declared.")
connections = aslist(inp[sourceField])
for src in connections:
if src in state and state[src] is not None and (state[src].success == "success" or incomplete):
if not match_types(
inp["type"], state[src], iid, inputobj,
inp.get("linkMerge", ("merge_nested"
if len(connections) > 1 else None)),
valueFrom=inp.get("valueFrom")):
raise WorkflowException(
u"Type mismatch between source '%s' (%s) and "
"sink '%s' (%s)" % (src,
state[src].parameter["type"], inp["id"],
inp["type"]))
elif src not in state:
raise WorkflowException(
u"Connect source '%s' on parameter '%s' does not "
"exist" % (src, inp["id"]))
elif not incomplete:
return None
elif "default" in inp:
inputobj[iid] = inp["default"]
elif "valueFrom" in inp:
inputobj[iid] = None
else:
raise WorkflowException(u"Value for %s not specified" % (inp["id"]))
return inputobj
class WorkflowJobStep(object):
def __init__(self, step): # type: (Any) -> None
self.step = step
self.tool = step.tool
self.id = step.id
self.submitted = False
self.completed = False
self.iterable = None # type: Iterable
self.name = uniquename(u"step %s" % shortname(self.id))
def job(self, joborder, output_callback, **kwargs):
# type: (Dict[Text, Text], functools.partial[None], **Any) -> Generator
kwargs["part_of"] = self.name
kwargs["name"] = shortname(self.id)
for j in self.step.job(joborder, output_callback, **kwargs):
yield j
class WorkflowJob(object):
def __init__(self, workflow, **kwargs):
# type: (Workflow, **Any) -> None
self.workflow = workflow
self.tool = workflow.tool
self.steps = [WorkflowJobStep(s) for s in workflow.steps]
self.state = None # type: Dict[Text, WorkflowStateItem]
self.processStatus = None # type: Text
if "outdir" in kwargs:
self.outdir = kwargs["outdir"]
elif "tmp_outdir_prefix" in kwargs:
self.outdir = tempfile.mkdtemp(prefix=kwargs["tmp_outdir_prefix"])
else:
# tmp_outdir_prefix defaults to tmp, so this is unlikely to be used
self.outdir = tempfile.mkdtemp()
self.name = uniquename(u"workflow %s" % kwargs.get("name", shortname(self.workflow.tool.get("id", "embedded"))))
_logger.debug(u"[%s] initialized from %s", self.name,
self.tool.get("id", "workflow embedded in %s" % kwargs.get("part_of")))
def receive_output(self, step, outputparms, jobout, processStatus):
# type: (WorkflowJobStep, List[Dict[Text,Text]], Dict[Text,Text], Text) -> None
for i in outputparms:
if "id" in i:
if i["id"] in jobout:
self.state[i["id"]] = WorkflowStateItem(i, jobout[i["id"]], processStatus)
else:
_logger.error(u"[%s] Output is missing expected field %s", step.name, i["id"])
processStatus = "permanentFail"
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(u"[%s] produced output %s", step.name, json.dumps(jobout, indent=4))
if processStatus != "success":
if self.processStatus != "permanentFail":
self.processStatus = processStatus
_logger.warn(u"[%s] completed %s", step.name, processStatus)
else:
_logger.info(u"[%s] completed %s", step.name, processStatus)
step.completed = True
self.made_progress = True
def try_make_job(self, step, **kwargs):
# type: (WorkflowJobStep, **Any) -> Generator
inputparms = step.tool["inputs"]
outputparms = step.tool["outputs"]
supportsMultipleInput = bool(self.workflow.get_requirement(
"MultipleInputFeatureRequirement")[0])
try:
inputobj = object_from_state(
self.state, inputparms, False, supportsMultipleInput, "source")
if inputobj is None:
_logger.debug(u"[%s] job step %s not ready", self.name, step.id)
return
if step.submitted:
return
_logger.debug(u"[%s] starting %s", self.name, step.name)
callback = functools.partial(self.receive_output, step, outputparms)
valueFrom = {
i["id"]: i["valueFrom"] for i in step.tool["inputs"]
if "valueFrom" in i}
if len(valueFrom) > 0 and not bool(self.workflow.get_requirement("StepInputExpressionRequirement")[0]):
raise WorkflowException(
"Workflow step contains valueFrom but StepInputExpressionRequirement not in requirements")
vfinputs = {shortname(k): v for k, v in inputobj.iteritems()}
def postScatterEval(io):
# type: (Dict[Text, Any]) -> Dict[Text, Any]
shortio = {shortname(k): v for k, v in io.iteritems()}
def valueFromFunc(k, v): # type: (Any, Any) -> Any
if k in valueFrom:
return expression.do_eval(
valueFrom[k], shortio, self.workflow.requirements,
None, None, {}, context=v)
else:
return v
return {k: valueFromFunc(k, v) for k, v in io.items()}
if "scatter" in step.tool:
scatter = aslist(step.tool["scatter"])
method = step.tool.get("scatterMethod")
if method is None and len(scatter) != 1:
raise WorkflowException("Must specify scatterMethod when scattering over multiple inputs")
kwargs["postScatterEval"] = postScatterEval
tot = 1
emptyscatter = [shortname(s) for s in scatter if len(inputobj[s]) == 0]
if emptyscatter:
_logger.warn(u"[job %s] Notice: scattering over empty input in '%s'. All outputs will be empty.", step.name, "', '".join(emptyscatter))
if method == "dotproduct" or method is None:
jobs = dotproduct_scatter(step, inputobj, scatter,
cast( # known bug with mypy
# https://github.com/python/mypy/issues/797
Callable[[Any], Any], callback), **kwargs)
elif method == "nested_crossproduct":
jobs = nested_crossproduct_scatter(step, inputobj,
scatter, cast(Callable[[Any], Any], callback),
# known bug in mypy
# https://github.com/python/mypy/issues/797
**kwargs)
elif method == "flat_crossproduct":
jobs = cast(Generator,
flat_crossproduct_scatter(step, inputobj,
scatter,
cast(Callable[[Any], Any],
# known bug in mypy
# https://github.com/python/mypy/issues/797
callback), 0, **kwargs))
else:
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(u"[job %s] job input %s", step.name, json.dumps(inputobj, indent=4))
inputobj = postScatterEval(inputobj)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(u"[job %s] evaluated job input to %s", step.name, json.dumps(inputobj, indent=4))
jobs = step.job(inputobj, callback, **kwargs)
step.submitted = True
for j in jobs:
yield j
except WorkflowException:
raise
except Exception:
_logger.exception("Unhandled exception")
self.processStatus = "permanentFail"
step.completed = True
def run(self, **kwargs):
_logger.debug(u"[%s] workflow starting", self.name)
def job(self, joborder, output_callback, **kwargs):
# type: (Dict[Text, Any], Callable[[Any, Any], Any], **Any) -> Generator
self.state = {}
self.processStatus = "success"
if "outdir" in kwargs:
del kwargs["outdir"]
for e, i in enumerate(self.tool["inputs"]):
with SourceLine(self.tool["inputs"], e, WorkflowException):
iid = shortname(i["id"])
if iid in joborder:
self.state[i["id"]] = WorkflowStateItem(i, copy.deepcopy(joborder[iid]), "success")
elif "default" in i:
self.state[i["id"]] = WorkflowStateItem(i, copy.deepcopy(i["default"]), "success")
else:
raise WorkflowException(
u"Input '%s' not in input object and does not have a default value." % (i["id"]))
for s in self.steps:
for out in s.tool["outputs"]:
self.state[out["id"]] = None
completed = 0
while completed < len(self.steps):
self.made_progress = False
for step in self.steps:
if kwargs.get("on_error", "stop") == "stop" and self.processStatus != "success":
break
if not step.submitted:
try:
step.iterable = self.try_make_job(step, **kwargs)
except WorkflowException as e:
_logger.error(u"[%s] Cannot make job: %s", step.name, e)
_logger.debug("", exc_info=True)
self.processStatus = "permanentFail"
if step.iterable:
try:
for newjob in step.iterable:
if kwargs.get("on_error", "stop") == "stop" and self.processStatus != "success":
break
if newjob:
self.made_progress = True
yield newjob
else:
break
except WorkflowException as e:
_logger.error(u"[%s] Cannot make job: %s", step.name, e)
_logger.debug("", exc_info=True)
self.processStatus = "permanentFail"
completed = sum(1 for s in self.steps if s.completed)
if not self.made_progress and completed < len(self.steps):
if self.processStatus != "success":
break
else:
yield None
supportsMultipleInput = bool(self.workflow.get_requirement("MultipleInputFeatureRequirement")[0])
try:
wo = object_from_state(self.state, self.tool["outputs"], True, supportsMultipleInput, "outputSource",
incomplete=True)
except WorkflowException as e:
_logger.error(u"[%s] Cannot collect workflow output: %s", self.name, e)
wo = {}
self.processStatus = "permanentFail"
_logger.info(u"[%s] outdir is %s", self.name, self.outdir)
output_callback(wo, self.processStatus)
class Workflow(Process):
def __init__(self, toolpath_object, **kwargs):
# type: (Dict[Text, Any], **Any) -> None
super(Workflow, self).__init__(toolpath_object, **kwargs)
kwargs["requirements"] = self.requirements
kwargs["hints"] = self.hints
makeTool = kwargs.get("makeTool")
self.steps = [] # type: List[WorkflowStep]
validation_errors = []
for n, step in enumerate(self.tool.get("steps", [])):
try:
self.steps.append(WorkflowStep(step, n, **kwargs))
except validate.ValidationException as v:
validation_errors.append(v)
if validation_errors:
raise validate.ValidationException("\n".join(str(v) for v in validation_errors))
random.shuffle(self.steps)
# statically validate data links instead of doing it at runtime.
workflow_inputs = self.tool["inputs"]
workflow_outputs = self.tool["outputs"]
step_inputs = [] # type: List[Any]
step_outputs = [] # type: List[Any]
for step in self.steps:
step_inputs.extend(step.tool["inputs"])
step_outputs.extend(step.tool["outputs"])
static_checker(workflow_inputs, workflow_outputs, step_inputs, step_outputs)
def job(self,
job_order, # type: Dict[Text, Text]
output_callbacks, # type: Callable[[Any, Any], Any]
**kwargs # type: Any
):
# type: (...) -> Generator[Any, None, None]
builder = self._init_job(job_order, **kwargs)
wj = WorkflowJob(self, **kwargs)
yield wj
kwargs["part_of"] = u"workflow %s" % wj.name
for w in wj.job(builder.job, output_callbacks, **kwargs):
yield w
def visit(self, op):
op(self.tool)
for s in self.steps:
s.visit(op)
def static_checker(workflow_inputs, workflow_outputs, step_inputs, step_outputs):
# type: (List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]]) -> None
"""Check if all source and sink types of a workflow are compatible before run time.
"""
# source parameters: workflow_inputs and step_outputs
# sink parameters: step_inputs and workflow_outputs
# make a dictionary of source parameters, indexed by the "id" field
src_parms = workflow_inputs + step_outputs
src_dict = {}
for parm in src_parms:
src_dict[parm["id"]] = parm
step_inputs_val = check_all_types(src_dict, step_inputs, "source")
workflow_outputs_val = check_all_types(src_dict, workflow_outputs, "outputSource")
warnings = step_inputs_val["warning"] + workflow_outputs_val["warning"]
exceptions = step_inputs_val["exception"] + workflow_outputs_val["exception"]
warning_msgs = []
exception_msgs = []
for warning in warnings:
src = warning.src
sink = warning.sink
linkMerge = warning.linkMerge
msg = SourceLine(src, "type").makeError(
"Source '%s' of type %s is partially incompatible"
% (shortname(src["id"]), json.dumps(src["type"]))) + "\n" + \
SourceLine(sink, "type").makeError(
" with sink '%s' of type %s"
% (shortname(sink["id"]), json.dumps(sink["type"])))
if linkMerge:
msg += "\n" + SourceLine(sink).makeError(" sink has linkMerge method %s" % linkMerge)
warning_msgs.append(msg)
for exception in exceptions:
src = exception.src
sink = exception.sink
linkMerge = exception.linkMerge
msg = SourceLine(src, "type").makeError(
"Source '%s' of type %s is incompatible"
% (shortname(src["id"]), json.dumps(src["type"]))) + "\n" + \
SourceLine(sink, "type").makeError(
" with sink '%s' of type %s"
% (shortname(sink["id"]), json.dumps(sink["type"])))
if linkMerge:
msg += "\n" + SourceLine(sink).makeError(" sink has linkMerge method %s" % linkMerge)
exception_msgs.append(msg)
for sink in step_inputs:
if ('null' != sink["type"] and 'null' not in sink["type"]
and "source" not in sink and "default" not in sink and "valueFrom" not in sink):
msg = SourceLine(sink).makeError(
"Required parameter '%s' does not have source, default, or valueFrom expression"
% shortname(sink["id"]))
exception_msgs.append(msg)
all_warning_msg = "\n".join(warning_msgs)
all_exception_msg = "\n".join(exception_msgs)
if warnings:
_logger.warn("Workflow checker warning:")
_logger.warn(all_warning_msg)
if exceptions:
raise validate.ValidationException(all_exception_msg)
SrcSink = namedtuple("SrcSink", ["src", "sink", "linkMerge"])
def check_all_types(src_dict, sinks, sourceField):
# type: (Dict[Text, Any], List[Dict[Text, Any]], Text) -> Dict[Text, List[SrcSink]]
# sourceField is either "soure" or "outputSource"
"""Given a list of sinks, check if their types match with the types of their sources.
"""
validation = {"warning": [], "exception": []} # type: Dict[Text, List[SrcSink]]
for sink in sinks:
if sourceField in sink:
valueFrom = sink.get("valueFrom")
if isinstance(sink[sourceField], list):
srcs_of_sink = [src_dict[parm_id] for parm_id in sink[sourceField]]
linkMerge = sink.get("linkMerge", ("merge_nested"
if len(sink[sourceField]) > 1 else None))
else:
parm_id = sink[sourceField]
srcs_of_sink = [src_dict[parm_id]]
linkMerge = None
for src in srcs_of_sink:
check_result = check_types(src["type"], sink["type"], linkMerge, valueFrom)
if check_result == "warning":
validation["warning"].append(SrcSink(src, sink, linkMerge))
elif check_result == "exception":
validation["exception"].append(SrcSink(src, sink, linkMerge))
return validation
class WorkflowStep(Process):
def __init__(self, toolpath_object, pos, **kwargs):
# type: (Dict[Text, Any], int, **Any) -> None
if "id" in toolpath_object:
self.id = toolpath_object["id"]
else:
self.id = "#step" + Text(pos)
kwargs["requirements"] = kwargs.get("requirements", []) + toolpath_object.get("requirements", [])
kwargs["hints"] = kwargs.get("hints", []) + toolpath_object.get("hints", [])
try:
if isinstance(toolpath_object["run"], dict):
self.embedded_tool = kwargs.get("makeTool")(toolpath_object["run"], **kwargs)
else:
self.embedded_tool = load_tool(
toolpath_object["run"], kwargs.get("makeTool"), kwargs,
enable_dev=kwargs.get("enable_dev"),
strict=kwargs.get("strict"),
fetcher_constructor=kwargs.get("fetcher_constructor"))
except validate.ValidationException as v:
raise WorkflowException(
u"Tool definition %s failed validation:\n%s" %
(toolpath_object["run"], validate.indent(str(v))))
validation_errors = []
self.tool = toolpath_object = copy.deepcopy(toolpath_object)
bound = set()
for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")):
toolpath_object[toolfield] = []
for n, step_entry in enumerate(toolpath_object[stepfield]):
if isinstance(step_entry, (str, unicode)):
param = CommentedMap() # type: CommentedMap
inputid = step_entry
else:
param = CommentedMap(step_entry.iteritems())
inputid = step_entry["id"]
shortinputid = shortname(inputid)
found = False
for tool_entry in self.embedded_tool.tool[toolfield]:
frag = shortname(tool_entry["id"])
if frag == shortinputid:
param.update(tool_entry) # type: ignore
found = True
bound.add(frag)
break
if not found:
if stepfield == "in":
param["type"] = "Any"
else:
validation_errors.append(
SourceLine(self.tool["out"], n).makeError(
"Workflow step output '%s' does not correspond to" % shortname(step_entry))
+ "\n" + SourceLine(self.embedded_tool.tool, "outputs").makeError(
" tool output (expected '%s')" % (
"', '".join(
[shortname(tool_entry["id"]) for tool_entry in
self.embedded_tool.tool[toolfield]]))))
param["id"] = inputid
param.lc.line = toolpath_object[stepfield].lc.data[n][0]
param.lc.col = toolpath_object[stepfield].lc.data[n][1]
param.lc.filename = toolpath_object[stepfield].lc.filename
toolpath_object[toolfield].append(param)
missing = []
for i, tool_entry in enumerate(self.embedded_tool.tool["inputs"]):
if shortname(tool_entry["id"]) not in bound:
if "null" not in tool_entry["type"] and "default" not in tool_entry:
missing.append(shortname(tool_entry["id"]))
if missing:
validation_errors.append(SourceLine(self.tool, "in").makeError(
"Step is missing required parameter%s '%s'" % ("s" if len(missing) > 1 else "", "', '".join(missing))))
if validation_errors:
raise validate.ValidationException("\n".join(validation_errors))
super(WorkflowStep, self).__init__(toolpath_object, **kwargs)
if self.embedded_tool.tool["class"] == "Workflow":
(feature, _) = self.get_requirement("SubworkflowFeatureRequirement")
if not feature:
raise WorkflowException(
"Workflow contains embedded workflow but SubworkflowFeatureRequirement not in requirements")
if "scatter" in self.tool:
(feature, _) = self.get_requirement("ScatterFeatureRequirement")
if not feature:
raise WorkflowException("Workflow contains scatter but ScatterFeatureRequirement not in requirements")
inputparms = copy.deepcopy(self.tool["inputs"])
outputparms = copy.deepcopy(self.tool["outputs"])
scatter = aslist(self.tool["scatter"])
method = self.tool.get("scatterMethod")
if method is None and len(scatter) != 1:
raise validate.ValidationException("Must specify scatterMethod when scattering over multiple inputs")
inp_map = {i["id"]: i for i in inputparms}
for s in scatter:
if s not in inp_map:
raise validate.ValidationException(
SourceLine(self.tool, "scatter").makeError(u"Scatter parameter '%s' does not correspond to an input parameter of this "
u"step, expecting '%s'" % (shortname(s), "', '".join(shortname(k) for k in inp_map.keys()))))
inp_map[s]["type"] = {"type": "array", "items": inp_map[s]["type"]}
if self.tool.get("scatterMethod") == "nested_crossproduct":
nesting = len(scatter)
else:
nesting = 1
for r in xrange(0, nesting):
for op in outputparms:
op["type"] = {"type": "array", "items": op["type"]}
self.tool["inputs"] = inputparms
self.tool["outputs"] = outputparms
def receive_output(self, output_callback, jobout, processStatus):
# type: (Callable[...,Any], Dict[Text, Text], Text) -> None
# _logger.debug("WorkflowStep output from run is %s", jobout)
output = {}
for i in self.tool["outputs"]:
field = shortname(i["id"])
if field in jobout:
output[i["id"]] = jobout[field]
else:
processStatus = "permanentFail"
output_callback(output, processStatus)
def job(self,
job_order, # type: Dict[Text, Text]
output_callbacks, # type: Callable[[Any, Any], Any]
**kwargs # type: Any
):
# type: (...) -> Generator[Any, None, None]
for i in self.tool["inputs"]:
p = i["id"]
field = shortname(p)
job_order[field] = job_order[i["id"]]
del job_order[i["id"]]
try:
for t in self.embedded_tool.job(job_order,
functools.partial(
self.receive_output,
output_callbacks),
**kwargs):
yield t
except WorkflowException:
_logger.error(u"Exception on step '%s'", kwargs.get("name"))
raise
except Exception as e:
_logger.exception("Unexpected exception")
raise WorkflowException(Text(e))
def visit(self, op):
self.embedded_tool.visit(op)
class ReceiveScatterOutput(object):
def __init__(self, output_callback, dest):
# type: (Callable[..., Any], Dict[Text,List[Text]]) -> None
self.dest = dest
self.completed = 0
self.processStatus = u"success"
self.total = None # type: int
self.output_callback = output_callback
def receive_scatter_output(self, index, jobout, processStatus):
# type: (int, Dict[Text, Text], Text) -> None
for k, v in jobout.items():
self.dest[k][index] = v
if processStatus != "success":
if self.processStatus != "permanentFail":
self.processStatus = processStatus
self.completed += 1
if self.completed == self.total:
self.output_callback(self.dest, self.processStatus)
def setTotal(self, total): # type: (int) -> None
self.total = total
if self.completed == self.total:
self.output_callback(self.dest, self.processStatus)
def parallel_steps(steps, rc, kwargs): # type: (List[Generator], ReceiveScatterOutput, Dict[str, Any]) -> Generator
while rc.completed < rc.total:
made_progress = False
for index in xrange(len(steps)):
step = steps[index]
if kwargs.get("on_error", "stop") == "stop" and rc.processStatus != "success":
break
try:
for j in step:
if kwargs.get("on_error", "stop") == "stop" and rc.processStatus != "success":
break
if j:
made_progress = True
yield j
else:
break
except WorkflowException as e:
_logger.error(u"Cannot make scatter job: %s", e)
_logger.debug("", exc_info=True)
rc.receive_scatter_output(index, {}, "permanentFail")
if not made_progress and rc.completed < rc.total:
yield None
def dotproduct_scatter(process, joborder, scatter_keys, output_callback, **kwargs):
# type: (WorkflowJobStep, Dict[Text, Any], List[Text], Callable[..., Any], **Any) -> Generator
l = None
for s in scatter_keys:
if l is None:
l = len(joborder[s])
elif l != len(joborder[s]):
raise WorkflowException("Length of input arrays must be equal when performing dotproduct scatter.")
output = {} # type: Dict[Text,List[Text]]
for i in process.tool["outputs"]:
output[i["id"]] = [None] * l
rc = ReceiveScatterOutput(output_callback, output)
steps = []
for n in range(0, l):
jo = copy.copy(joborder)
for s in scatter_keys:
jo[s] = joborder[s][n]
jo = kwargs["postScatterEval"](jo)
steps.append(process.job(jo, functools.partial(rc.receive_scatter_output, n), **kwargs))
rc.setTotal(l)
return parallel_steps(steps, rc, kwargs)
def nested_crossproduct_scatter(process, joborder, scatter_keys, output_callback, **kwargs):
# type: (WorkflowJobStep, Dict[Text, Any], List[Text], Callable[..., Any], **Any) -> Generator
scatter_key = scatter_keys[0]
l = len(joborder[scatter_key])
output = {} # type: Dict[Text,List[Text]]
for i in process.tool["outputs"]:
output[i["id"]] = [None] * l
rc = ReceiveScatterOutput(output_callback, output)
steps = []
for n in range(0, l):
jo = copy.copy(joborder)
jo[scatter_key] = joborder[scatter_key][n]
if len(scatter_keys) == 1:
jo = kwargs["postScatterEval"](jo)
steps.append(process.job(jo, functools.partial(rc.receive_scatter_output, n), **kwargs))
else:
# known bug with mypy, https://github.com/python/mypy/issues/797
casted = cast(Callable[[Any], Any], functools.partial(rc.receive_scatter_output, n))
steps.append(nested_crossproduct_scatter(process, jo,
scatter_keys[1:],
casted, **kwargs))
rc.setTotal(l)
return parallel_steps(steps, rc, kwargs)
def crossproduct_size(joborder, scatter_keys):
# type: (Dict[Text, Any], List[Text]) -> int
scatter_key = scatter_keys[0]
if len(scatter_keys) == 1:
sum = len(joborder[scatter_key])
else:
sum = 0
for n in range(0, len(joborder[scatter_key])):
jo = copy.copy(joborder)
jo[scatter_key] = joborder[scatter_key][n]
sum += crossproduct_size(joborder, scatter_keys[1:])
return sum
def flat_crossproduct_scatter(process, joborder, scatter_keys, output_callback, startindex, **kwargs):
# type: (WorkflowJobStep, Dict[Text, Any], List[Text], Union[ReceiveScatterOutput,Callable[..., Any]], int, **Any) -> Union[List[Generator], Generator]
scatter_key = scatter_keys[0]
l = len(joborder[scatter_key])
rc = None # type: ReceiveScatterOutput
if startindex == 0 and not isinstance(output_callback, ReceiveScatterOutput):
output = {} # type: Dict[Text,List[Text]]
for i in process.tool["outputs"]:
output[i["id"]] = [None] * crossproduct_size(joborder, scatter_keys)
rc = ReceiveScatterOutput(output_callback, output)
elif isinstance(output_callback, ReceiveScatterOutput):
rc = output_callback
else:
raise Exception("Unhandled code path. Please report this.")
steps = []
put = startindex
for n in range(0, l):
jo = copy.copy(joborder)
jo[scatter_key] = joborder[scatter_key][n]
if len(scatter_keys) == 1:
jo = kwargs["postScatterEval"](jo)
steps.append(process.job(jo, functools.partial(rc.receive_scatter_output, put), **kwargs))
put += 1
else:
add = flat_crossproduct_scatter(process, jo, scatter_keys[1:], rc, put, **kwargs)
put += len(cast(List[Generator], add))
steps.extend(add)
if startindex == 0 and not isinstance(output_callback, ReceiveScatterOutput):
rc.setTotal(put)
return parallel_steps(steps, rc, kwargs)
else:
return steps
|
|
import graphene
import graphene_django_optimizer as gql_optimizer
from django.core.exceptions import ValidationError
from graphene import relay
from ...order import models
from ...order.models import FulfillmentStatus
from ...product.templatetags.product_images import get_product_image_thumbnail
from ..account.types import User
from ..core.connection import CountableDjangoObjectType
from ..core.types.common import Image
from ..core.types.money import Money, TaxedMoney
from ..payment.types import OrderAction, Payment, PaymentChargeStatusEnum
from ..product.types import ProductVariant
from ..shipping.types import ShippingMethod
from .enums import OrderEventsEmailsEnum, OrderEventsEnum
from .utils import applicable_shipping_methods, validate_draft_order
class OrderEvent(CountableDjangoObjectType):
date = graphene.types.datetime.DateTime(
description='Date when event happened at in ISO 8601 format.')
type = OrderEventsEnum(description='Order event type')
user = graphene.Field(
User, id=graphene.Argument(graphene.ID),
description='User who performed the action.')
message = graphene.String(
description='Content of a note added to the order.')
email = graphene.String(description='Email of the customer')
email_type = OrderEventsEmailsEnum(
description='Type of an email sent to the customer')
amount = graphene.Float(description='Amount of money.')
quantity = graphene.Int(description='Number of items.')
composed_id = graphene.String(
description='Composed id of the Fulfillment.')
order_number = graphene.String(
description='User-friendly number of an order.')
oversold_items = graphene.List(
graphene.String, description='List of oversold lines names.')
class Meta:
description = 'History log of the order.'
model = models.OrderEvent
interfaces = [relay.Node]
only_fields = ['id']
def resolve_email(self, _info):
return self.parameters.get('email', None)
def resolve_email_type(self, _info):
return self.parameters.get('email_type', None)
def resolve_amount(self, _info):
amount = self.parameters.get('amount', None)
return float(amount) if amount else None
def resolve_quantity(self, _info):
quantity = self.parameters.get('quantity', None)
return int(quantity) if quantity else None
def resolve_message(self, _info):
return self.parameters.get('message', None)
def resolve_composed_id(self, _info):
return self.parameters.get('composed_id', None)
def resolve_oversold_items(self, _info):
return self.parameters.get('oversold_items', None)
def resolve_order_number(self, _info):
return self.order_id
class FulfillmentLine(CountableDjangoObjectType):
order_line = graphene.Field(lambda: OrderLine)
class Meta:
description = 'Represents line of the fulfillment.'
interfaces = [relay.Node]
model = models.FulfillmentLine
only_fields = ['id', 'quantity']
@gql_optimizer.resolver_hints(prefetch_related='order_line')
def resolve_order_line(self, _info):
return self.order_line
class Fulfillment(CountableDjangoObjectType):
lines = gql_optimizer.field(
graphene.List(
FulfillmentLine,
description='List of lines for the fulfillment'),
model_field='lines')
status_display = graphene.String(
description='User-friendly fulfillment status.')
class Meta:
description = 'Represents order fulfillment.'
interfaces = [relay.Node]
model = models.Fulfillment
only_fields = [
'fulfillment_order', 'id', 'shipping_date', 'status',
'tracking_number']
def resolve_lines(self, _info):
return self.lines.all()
def resolve_status_display(self, _info):
return self.get_status_display()
class OrderLine(CountableDjangoObjectType):
thumbnail_url = graphene.String(
description='The URL of a main thumbnail for the ordered product.',
size=graphene.Int(description='Size of the image'),
deprecation_reason=(
'thumbnailUrl is deprecated, use thumbnail instead'))
thumbnail = graphene.Field(
Image, description='The main thumbnail for the ordered product.',
size=graphene.Argument(graphene.Int, description='Size of thumbnail'))
unit_price = graphene.Field(
TaxedMoney, description='Price of the single item in the order line.')
variant = graphene.Field(
ProductVariant,
required=False,
description='''
A purchased product variant. Note: this field may be null if the
variant has been removed from stock at all.''')
class Meta:
description = 'Represents order line of particular order.'
model = models.OrderLine
interfaces = [relay.Node]
only_fields = [
'digital_content_url', 'id', 'is_shipping_required',
'product_name', 'product_sku', 'quantity', 'quantity_fulfilled',
'tax_rate', 'translated_product_name']
@gql_optimizer.resolver_hints(
prefetch_related=['variant__images', 'variant__product__images'])
def resolve_thumbnail_url(self, info, size=None):
if not self.variant_id:
return None
if not size:
size = 255
url = get_product_image_thumbnail(
self.variant.get_first_image(), size, method='thumbnail')
return info.context.build_absolute_uri(url)
@gql_optimizer.resolver_hints(
prefetch_related=['variant__images', 'variant__product__images'])
def resolve_thumbnail(self, info, *, size=None):
if not self.variant_id:
return None
if not size:
size = 255
image = self.variant.get_first_image()
url = get_product_image_thumbnail(image, size, method='thumbnail')
alt = image.alt if image else None
return Image(alt=alt, url=info.context.build_absolute_uri(url))
def resolve_unit_price(self, _info):
return self.unit_price
class Order(CountableDjangoObjectType):
fulfillments = gql_optimizer.field(
graphene.List(
Fulfillment, required=True,
description='List of shipments for the order.'),
model_field='fulfillments')
lines = gql_optimizer.field(
graphene.List(
lambda: OrderLine, required=True,
description='List of order lines.'),
model_field='lines')
actions = graphene.List(
OrderAction, description='''List of actions that can be performed in
the current state of an order.''', required=True)
available_shipping_methods = graphene.List(
ShippingMethod, required=False,
description='Shipping methods that can be used with this order.')
number = graphene.String(description='User-friendly number of an order.')
is_paid = graphene.Boolean(
description='Informs if an order is fully paid.')
payment_status = PaymentChargeStatusEnum(
description='Internal payment status.')
payment_status_display = graphene.String(
description='User-friendly payment status.')
payments = gql_optimizer.field(
graphene.List(
Payment, description='List of payments for the order'),
model_field='payments')
total = graphene.Field(
TaxedMoney, description='Total amount of the order.')
shipping_price = graphene.Field(
TaxedMoney, description='Total price of shipping.')
subtotal = graphene.Field(
TaxedMoney,
description='The sum of line prices not including shipping.')
status_display = graphene.String(description='User-friendly order status.')
can_finalize = graphene.Boolean(
description=(
'Informs whether a draft order can be finalized'
'(turned into a regular order).'), required=True)
total_authorized = graphene.Field(
Money, description='Amount authorized for the order.')
total_captured = graphene.Field(
Money, description='Amount captured by payment.')
events = gql_optimizer.field(
graphene.List(
OrderEvent,
description='List of events associated with the order.'),
model_field='events')
total_balance = graphene.Field(
Money,
description='''The difference between the paid and the order total
amount.''', required=True)
user_email = graphene.String(
required=False, description='Email address of the customer.')
is_shipping_required = graphene.Boolean(
description='Returns True, if order requires shipping.',
required=True)
class Meta:
description = 'Represents an order in the shop.'
interfaces = [relay.Node]
model = models.Order
only_fields = [
'billing_address', 'created', 'customer_note', 'discount_amount',
'discount_name', 'display_gross_prices', 'id', 'language_code',
'shipping_address', 'shipping_method', 'shipping_method_name',
'shipping_price', 'status', 'token', 'tracking_client_id',
'translated_discount_name', 'user', 'voucher', 'weight']
def resolve_shipping_price(self, _info):
return self.shipping_price
@gql_optimizer.resolver_hints(prefetch_related='payments__transactions')
def resolve_actions(self, _info):
actions = []
payment = self.get_last_payment()
if self.can_capture(payment):
actions.append(OrderAction.CAPTURE)
if self.can_mark_as_paid():
actions.append(OrderAction.MARK_AS_PAID)
if self.can_refund(payment):
actions.append(OrderAction.REFUND)
if self.can_void(payment):
actions.append(OrderAction.VOID)
return actions
def resolve_subtotal(self, _info):
return self.get_subtotal()
def resolve_total(self, _info):
return self.total
@gql_optimizer.resolver_hints(prefetch_related='payments__transactions')
def resolve_total_authorized(self, _info):
# FIXME adjust to multiple payments in the future
return self.total_authorized
@gql_optimizer.resolver_hints(prefetch_related='payments')
def resolve_total_captured(self, _info):
# FIXME adjust to multiple payments in the future
return self.total_captured
def resolve_total_balance(self, _info):
return self.total_balance
def resolve_fulfillments(self, info):
user = info.context.user
if user.is_staff:
qs = self.fulfillments.all()
else:
qs = self.fulfillments.exclude(status=FulfillmentStatus.CANCELED)
return qs.order_by('pk')
def resolve_lines(self, _info):
return self.lines.all().order_by('pk')
def resolve_events(self, _info):
return self.events.all().order_by('pk')
@gql_optimizer.resolver_hints(prefetch_related='payments')
def resolve_is_paid(self, _info):
return self.is_fully_paid()
def resolve_number(self, _info):
return str(self.pk)
@gql_optimizer.resolver_hints(prefetch_related='payments')
def resolve_payment_status(self, _info):
return self.get_payment_status()
@gql_optimizer.resolver_hints(prefetch_related='payments')
def resolve_payment_status_display(self, _info):
return self.get_payment_status_display()
def resolve_payments(self, _info):
return self.payments.all()
def resolve_status_display(self, _info):
return self.get_status_display()
@staticmethod
def resolve_can_finalize(self, _info):
try:
validate_draft_order(self)
except ValidationError:
return False
return True
@gql_optimizer.resolver_hints(select_related='user')
def resolve_user_email(self, _info):
if self.user_email:
return self.user_email
if self.user_id:
return self.user.email
return None
def resolve_available_shipping_methods(self, _info):
return applicable_shipping_methods(
self, self.get_subtotal().gross.amount)
def resolve_is_shipping_required(self, _info):
return self.is_shipping_required()
|
|
import datetime
import logging
from django.conf import settings
from django.contrib.gis.db import models
from django.core.cache import cache
from django.dispatch import receiver
from django_mailbox.signals import message_received
from jsonfield.fields import JSONField
from location.settings import SETTINGS
logger = logging.getLogger('location.models')
try:
from census_places.models import PlaceBoundary
except ImportError:
logger.warning(
"django-census-places is not installed, locations will not "
"be populated with city information."
)
PlaceBoundary = None
try:
from neighborhoods.models import Neighborhood
except ImportError:
logger.warning(
"django-neighborhoods is not installed, locations will not "
"be populated with neighborhood information."
)
Neighborhood = None
class LocationConsumerSettings(models.Model):
user = models.OneToOneField(
getattr(
settings,
'AUTH_USER_MODEL',
'auth.User'
),
related_name='location_consumer_settings'
)
icloud_enabled = models.BooleanField(default=False)
icloud_timezone = models.CharField(
max_length=255,
blank=True,
null=True,
default='US/Pacific',
)
icloud_username = models.CharField(
max_length=255,
blank=True,
null=True,
)
icloud_password = models.CharField(
max_length=255,
blank=True,
null=True,
)
icloud_device_id = models.CharField(
max_length=255,
blank=True,
null=True,
help_text=(
"Device ID of the iCloud device from which to gather periodic"
"location updates"
)
)
runmeter_enabled = models.BooleanField(default=False)
runmeter_email = models.EmailField(
max_length=255,
help_text=(
"E-mail address of the device from which RunMeter will be sending"
"location updates"
)
)
def __unicode__(self):
return "Location Consumer Settings for %s" % (
self.user.get_username()
)
class Meta:
verbose_name = 'Location Consumer Settings'
verbose_name_plural = 'Location Consumer Settings'
class LocationSourceType(models.Model):
name = models.CharField(max_length=255)
icon = models.ImageField(
null=True,
blank=True,
upload_to='source_type_icons/'
)
def __unicode__(self):
return self.name
class LocationSource(models.Model):
name = models.CharField(max_length=255)
user = models.ForeignKey(
getattr(
settings,
'AUTH_USER_MODEL',
'auth.User'
),
related_name='location_sources',
null=True,
default=None,
)
type = models.ForeignKey(LocationSourceType)
data = JSONField()
created = models.DateTimeField(
auto_now_add=True
)
updated = models.DateTimeField(
auto_now=True
)
active = models.BooleanField(
default=False
)
def __unicode__(self):
return "%s: %s" % (
self.type.name,
self.name,
)
class LocationSnapshot(models.Model):
location = models.PointField(
geography=True,
spatial_index=True
)
source = models.ForeignKey(
LocationSource,
related_name='points',
null=True,
blank=True
)
date = models.DateTimeField(
default=datetime.datetime.now
)
created = models.DateTimeField(
auto_now_add=True
)
objects = models.GeoManager()
def get_cache_key(self, name):
return '%s:%s:%s:%s' % (
SETTINGS['cache_prefix'],
self.__class__.__name__,
self.pk,
name
)
def get_cached(self, name):
return cache.get(self.get_cache_key(name))
def set_cached(self, name, value):
cache.set(self.get_cache_key(name), value, 60 * 60 * 24)
@property
def city(self):
if PlaceBoundary:
cached = self.get_cached('city')
if cached:
return cached
try:
result = PlaceBoundary.get_containing(self.location)
self.set_cached('city', result)
return result
except PlaceBoundary.DoesNotExist:
pass
return None
@property
def neighborhood(self):
if Neighborhood:
cached = self.get_cached('neighborhood')
if cached:
return cached
try:
result = Neighborhood.get_containing(self.location)
self.set_cached('neighborhood', result)
return result
except Neighborhood.DoesNotExist:
pass
return None
def find_nearest_city(self):
if PlaceBoundary:
cached = self.get_cached('nearest_city')
if cached:
return cached
try:
result = PlaceBoundary.get_nearest_to(self.location)
self.set_cached('nearest_city', result)
return result
except PlaceBoundary.DoesNotExist:
pass
return None
def __unicode__(self):
return u"%s's location at %s" % (
self.source.user,
self.date
)
@receiver(message_received, dispatch_uid='process_incoming_runmeter_msg')
def process_incoming_runmeter_message(sender, message, **kwargs):
from location.consumers.runmeter import RunmeterConsumer
if message.mailbox.name == SETTINGS['runmeter_mailbox']:
try:
RunmeterConsumer.process_message(message)
except LocationConsumerSettings.DoesNotExist:
logger.warning(
'Unable to process message \'%s\': '
'No user is currently assigned to from_address %s',
message.from_address
)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Coordinator to help multiple threads stop when requested."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import sys
import threading
import time
import six
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
class Coordinator(object):
"""A coordinator for threads.
This class implements a simple mechanism to coordinate the termination of a
set of threads.
#### Usage:
```python
# Create a coordinator.
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate.
coord.join(threads)
```
Any of the threads can call `coord.request_stop()` to ask for all the threads
to stop. To cooperate with the requests, each thread must check for
`coord.should_stop()` on a regular basis. `coord.should_stop()` returns
`True` as soon as `coord.request_stop()` has been called.
A typical thread running with a coordinator will do something like:
```python
while not coord.should_stop():
...do some work...
```
#### Exception handling:
A thread can report an exception to the coordinator as part of the
`should_stop()` call. The exception will be re-raised from the
`coord.join()` call.
Thread code:
```python
try:
while not coord.should_stop():
...do some work...
except Exception as e:
coord.request_stop(e)
```
Main code:
```python
try:
...
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate.
coord.join(threads)
except Exception as e:
...exception that was passed to coord.request_stop()
```
To simplify the thread implementation, the Coordinator provides a
context handler `stop_on_exception()` that automatically requests a stop if
an exception is raised. Using the context handler the thread code above
can be written as:
```python
with coord.stop_on_exception():
while not coord.should_stop():
...do some work...
```
#### Grace period for stopping:
After a thread has called `coord.request_stop()` the other threads have a
fixed time to stop, this is called the 'stop grace period' and defaults to 2
minutes. If any of the threads is still alive after the grace period expires
`coord.join()` raises a RuntimeException reporting the laggards.
```python
try:
...
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate, give them 10s grace period
coord.join(threads, stop_grace_period_secs=10)
except RuntimeException:
...one of the threads took more than 10s to stop after request_stop()
...was called.
except Exception:
...exception that was passed to coord.request_stop()
```
"""
def __init__(self, clean_stop_exception_types=None):
"""Create a new Coordinator.
Args:
clean_stop_exception_types: Optional tuple of Exception types that should
cause a clean stop of the coordinator. If an exception of one of these
types is reported to `request_stop(ex)` the coordinator will behave as
if `request_stop(None)` was called. Defaults to
`(tf.errors.OutOfRangeError,)` which is used by input queues to signal
the end of input. When feeding training data from a Python iterator it
is common to add `StopIteration` to this list.
"""
if clean_stop_exception_types is None:
clean_stop_exception_types = (errors.OutOfRangeError,)
self._clean_stop_exception_types = tuple(clean_stop_exception_types)
# Protects all attributes.
self._lock = threading.Lock()
# Event set when threads must stop.
self._stop_event = threading.Event()
# Python exc_info to report.
# If not None, it should hold the returned value of sys.exc_info(), which is
# a tuple containing exception (type, value, traceback).
self._exc_info_to_raise = None
# True if we have called join() already.
self._joined = False
# Set of threads registered for joining when join() is called. These
# threads will be joined in addition to the threads passed to the join()
# call. It's ok if threads are both registered and passed to the join()
# call.
self._registered_threads = set()
def _filter_exception(self, ex):
"""Check if the exception indicated in 'ex' should be ignored.
This method examines `ex` to check if it is an exception that should be
reported to the users. If yes, it returns `ex` as is, otherwise it returns
None.
The code returns None for exception types listed in
`_clean_stop_exception_types`.
Args:
ex: None, an `Exception`, or a Python `exc_info` tuple as returned by
`sys.exc_info()`.
Returns:
ex or None.
"""
if isinstance(ex, tuple):
ex2 = ex[1]
else:
ex2 = ex
if isinstance(ex2, self._clean_stop_exception_types):
# Ignore the exception.
ex = None
return ex
def request_stop(self, ex=None):
"""Request that the threads stop.
After this is called, calls to `should_stop()` will return `True`.
Note: If an exception is being passed in, in must be in the context of
handling the exception (i.e. `try: ... except Exception as ex: ...`) and not
a newly created one.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
with self._lock:
ex = self._filter_exception(ex)
# If we have already joined the coordinator the exception will not have a
# chance to be reported, so just raise it normally. This can happen if
# you continue to use a session have having stopped and joined the
# coordinator threads.
if self._joined:
if isinstance(ex, tuple):
six.reraise(*ex)
elif ex is not None:
# NOTE(touts): This is bogus if request_stop() is not called
# from the exception handler that raised ex.
six.reraise(*sys.exc_info())
if not self._stop_event.is_set():
if ex and self._exc_info_to_raise is None:
if isinstance(ex, tuple):
logging.info("Error reported to Coordinator: %s, %s",
type(ex[1]),
compat.as_str_any(ex[1]))
self._exc_info_to_raise = ex
else:
logging.info("Error reported to Coordinator: %s, %s",
type(ex),
compat.as_str_any(ex))
self._exc_info_to_raise = sys.exc_info()
# self._exc_info_to_raise should contain a tuple containing exception
# (type, value, traceback)
if (len(self._exc_info_to_raise) != 3 or
not self._exc_info_to_raise[0] or
not self._exc_info_to_raise[1]):
# Raise, catch and record the exception here so that error happens
# where expected.
try:
raise ValueError(
"ex must be a tuple or sys.exc_info must return the current "
"exception: %s"
% self._exc_info_to_raise)
except ValueError:
# Record this error so it kills the coordinator properly.
# NOTE(touts): As above, this is bogus if request_stop() is not
# called from the exception handler that raised ex.
self._exc_info_to_raise = sys.exc_info()
self._stop_event.set()
def clear_stop(self):
"""Clears the stop flag.
After this is called, calls to `should_stop()` will return `False`.
"""
with self._lock:
self._joined = False
self._exc_info_to_raise = None
if self._stop_event.is_set():
self._stop_event.clear()
def should_stop(self):
"""Check if stop was requested.
Returns:
True if a stop was requested.
"""
return self._stop_event.is_set()
@contextlib.contextmanager
def stop_on_exception(self):
"""Context manager to request stop when an Exception is raised.
Code that uses a coordinator must catch exceptions and pass
them to the `request_stop()` method to stop the other threads
managed by the coordinator.
This context handler simplifies the exception handling.
Use it as follows:
```python
with coord.stop_on_exception():
# Any exception raised in the body of the with
# clause is reported to the coordinator before terminating
# the execution of the body.
...body...
```
This is completely equivalent to the slightly longer code:
```python
try:
...body...
exception Exception as ex:
coord.request_stop(ex)
```
Yields:
nothing.
"""
# pylint: disable=broad-except
try:
yield
except Exception as ex:
self.request_stop(ex)
# pylint: enable=broad-except
def wait_for_stop(self, timeout=None):
"""Wait till the Coordinator is told to stop.
Args:
timeout: Float. Sleep for up to that many seconds waiting for
should_stop() to become True.
Returns:
True if the Coordinator is told stop, False if the timeout expired.
"""
return self._stop_event.wait(timeout)
def register_thread(self, thread):
"""Register a thread to join.
Args:
thread: A Python thread to join.
"""
with self._lock:
self._registered_threads.add(thread)
def join(self, threads=None, stop_grace_period_secs=120):
"""Wait for threads to terminate.
This call blocks until a set of threads have terminated. The set of thread
is the union of the threads passed in the `threads` argument and the list
of threads that registered with the coordinator by calling
`Coordinator.register_thread()`.
After the threads stop, if an `exc_info` was passed to `request_stop`, that
exception is re-raised.
Grace period handling: When `request_stop()` is called, threads are given
'stop_grace_period_secs' seconds to terminate. If any of them is still
alive after that period expires, a `RuntimeError` is raised. Note that if
an `exc_info` was passed to `request_stop()` then it is raised instead of
that `RuntimeError`.
Args:
threads: List of `threading.Threads`. The started threads to join in
addition to the registered threads.
stop_grace_period_secs: Number of seconds given to threads to stop after
`request_stop()` has been called.
Raises:
RuntimeError: If any thread is still alive after `request_stop()`
is called and the grace period expires.
"""
# Threads registered after this call will not be joined.
with self._lock:
if threads is None:
threads = self._registered_threads
else:
threads = self._registered_threads.union(set(threads))
# Wait for all threads to stop or for request_stop() to be called.
while any(t.is_alive() for t in threads) and not self.wait_for_stop(1.0):
pass
# If any thread is still alive, wait for the grace period to expire.
# By the time this check is executed, threads may still be shutting down,
# so we add a sleep of increasing duration to give them a chance to shut
# down without loosing too many cycles.
# The sleep duration is limited to the remaining grace duration.
stop_wait_secs = 0.001
while any(t.is_alive() for t in threads) and stop_grace_period_secs >= 0.0:
time.sleep(stop_wait_secs)
stop_grace_period_secs -= stop_wait_secs
stop_wait_secs = 2 * stop_wait_secs
# Keep the waiting period within sane bounds.
# The minimum value is to avoid decreasing stop_wait_secs to a value
# that could cause stop_grace_period_secs to remain unchanged.
stop_wait_secs = max(min(stop_wait_secs, stop_grace_period_secs), 0.001)
# List the threads still alive after the grace period.
stragglers = [t.name for t in threads if t.is_alive()]
# Terminate with an exception if appropriate.
with self._lock:
self._joined = True
self._registered_threads = set()
if self._exc_info_to_raise:
six.reraise(*self._exc_info_to_raise)
elif stragglers:
raise RuntimeError(
"Coordinator stopped with threads still running: %s" %
" ".join(stragglers))
@property
def joined(self):
return self._joined
# Threads for the standard services.
class LooperThread(threading.Thread):
"""A thread that runs code repeatedly, optionally on a timer.
This thread class is intended to be used with a `Coordinator`. It repeatedly
runs code specified either as `target` and `args` or by the `run_loop()`
method.
Before each run the thread checks if the coordinator has requested stop. In
that case the looper thread terminates immediately.
If the code being run raises an exception, that exception is reported to the
coordinator and the thread terminates. The coordinator will then request all
the other threads it coordinates to stop.
You typically pass looper threads to the supervisor `Join()` method.
"""
def __init__(self, coord, timer_interval_secs, target=None, args=None,
kwargs=None):
"""Create a LooperThread.
Args:
coord: A Coordinator.
timer_interval_secs: Time boundaries at which to call Run(), or None
if it should be called back to back.
target: Optional callable object that will be executed in the thread.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Raises:
ValueError: If one of the arguments is invalid.
"""
if not isinstance(coord, Coordinator):
raise ValueError("'coord' argument must be a Coordinator: %s" % coord)
super(LooperThread, self).__init__()
self.daemon = True
self._coord = coord
self._timer_interval_secs = timer_interval_secs
self._target = target
if self._target:
self._args = args or ()
self._kwargs = kwargs or {}
elif args or kwargs:
raise ValueError("'args' and 'kwargs' argument require that you also "
"pass 'target'")
self._coord.register_thread(self)
@staticmethod
def loop(coord, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(args)`
repeatedly. Otherwise `target(args)` is called every `timer_interval_secs`
seconds. The thread terminates when a stop of the coordinator is
requested.
Args:
coord: A Coordinator.
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = LooperThread(coord, timer_interval_secs, target=target, args=args,
kwargs=kwargs)
looper.start()
return looper
def run(self):
with self._coord.stop_on_exception():
self.start_loop()
if self._timer_interval_secs is None:
# Call back-to-back.
while not self._coord.should_stop():
self.run_loop()
else:
# Next time at which to call run_loop(), starts as 'now'.
next_timer_time = time.time()
while not self._coord.wait_for_stop(next_timer_time - time.time()):
next_timer_time += self._timer_interval_secs
self.run_loop()
self.stop_loop()
def start_loop(self):
"""Called when the thread starts."""
pass
def stop_loop(self):
"""Called when the thread stops."""
pass
def run_loop(self):
"""Called at 'timer_interval_secs' boundaries."""
if self._target:
self._target(*self._args, **self._kwargs)
|
|
# Copyright (c) 2014 Christian Schwede <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import testtools
import time
import types
from io import BytesIO
from six.moves import configparser
import swiftclient
class TestFunctional(testtools.TestCase):
def __init__(self, *args, **kwargs):
super(TestFunctional, self).__init__(*args, **kwargs)
self.skip_tests = False
self._get_config()
self.test_data = b'42' * 10
self.etag = '2704306ec982238d85d4b235c925d58e'
self.containername = "functional-tests-container-%s" % int(time.time())
self.containername_2 = self.containername + '_second'
self.containername_3 = self.containername + '_third'
self.objectname = "functional-tests-object-%s" % int(time.time())
self.objectname_2 = self.objectname + '_second'
def _get_config(self):
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/test.conf')
config = configparser.SafeConfigParser({'auth_version': '1'})
config.read(config_file)
if config.has_section('func_test'):
auth_host = config.get('func_test', 'auth_host')
auth_port = config.getint('func_test', 'auth_port')
auth_ssl = config.getboolean('func_test', 'auth_ssl')
auth_prefix = config.get('func_test', 'auth_prefix')
self.auth_version = config.get('func_test', 'auth_version')
self.account = config.get('func_test', 'account')
self.username = config.get('func_test', 'username')
self.password = config.get('func_test', 'password')
self.auth_url = ""
if auth_ssl:
self.auth_url += "https://"
else:
self.auth_url += "http://"
self.auth_url += "%s:%s%s" % (auth_host, auth_port, auth_prefix)
if self.auth_version == "1":
self.auth_url += 'v1.0'
self.account_username = "%s:%s" % (self.account, self.username)
else:
self.skip_tests = True
def setUp(self):
super(TestFunctional, self).setUp()
if self.skip_tests:
self.skipTest('SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG')
self.conn = swiftclient.Connection(
self.auth_url, self.account_username, self.password,
auth_version=self.auth_version)
self.conn.put_container(self.containername)
self.conn.put_container(self.containername_2)
self.conn.put_object(
self.containername, self.objectname, self.test_data)
self.conn.put_object(
self.containername, self.objectname_2, self.test_data)
def tearDown(self):
super(TestFunctional, self).tearDown()
for obj in [self.objectname, self.objectname_2]:
try:
self.conn.delete_object(self.containername, obj)
except swiftclient.ClientException:
pass
for container in [self.containername,
self.containername_2,
self.containername_3,
self.containername + '_segments']:
try:
self.conn.delete_container(container)
except swiftclient.ClientException:
pass
def _check_account_headers(self, headers):
self.assertTrue(headers.get('content-length'))
self.assertTrue(headers.get('x-account-object-count'))
self.assertTrue(headers.get('x-timestamp'))
self.assertTrue(headers.get('x-trans-id'))
self.assertTrue(headers.get('date'))
self.assertTrue(headers.get('x-account-bytes-used'))
self.assertTrue(headers.get('x-account-container-count'))
self.assertTrue(headers.get('content-type'))
self.assertTrue(headers.get('accept-ranges'))
def test_stat_account(self):
headers = self.conn.head_account()
self._check_account_headers(headers)
def test_list_account(self):
headers, containers = self.conn.get_account()
self._check_account_headers(headers)
self.assertTrue(len(containers))
test_container = [c
for c in containers
if c.get('name') == self.containername][0]
self.assertTrue(test_container.get('bytes') >= 0)
self.assertTrue(test_container.get('count') >= 0)
# Check if list limit is working
headers, containers = self.conn.get_account(limit=1)
self.assertEqual(1, len(containers))
# Check full listing
headers, containers = self.conn.get_account(limit=1, full_listing=True)
self.assertTrue(len(containers) >= 2) # there might be more containers
# Test marker
headers, containers = self.conn.get_account(marker=self.containername)
self.assertTrue(len(containers) >= 1)
self.assertEqual(self.containername_2, containers[0].get('name'))
def _check_container_headers(self, headers):
self.assertTrue(headers.get('content-length'))
self.assertTrue(headers.get('x-container-object-count'))
self.assertTrue(headers.get('x-timestamp'))
self.assertTrue(headers.get('x-trans-id'))
self.assertTrue(headers.get('date'))
self.assertTrue(headers.get('x-container-bytes-used'))
self.assertTrue(headers.get('x-container-object-count'))
self.assertTrue(headers.get('content-type'))
self.assertTrue(headers.get('accept-ranges'))
def test_stat_container(self):
headers = self.conn.head_container(self.containername)
self._check_container_headers(headers)
def test_list_container(self):
headers, objects = self.conn.get_container(self.containername)
self._check_container_headers(headers)
self.assertTrue(len(objects))
test_object = [o
for o in objects
if o.get('name') == self.objectname][0]
self.assertEqual(len(self.test_data), test_object.get('bytes'))
self.assertEqual(self.etag, test_object.get('hash'))
self.assertEqual('application/octet-stream',
test_object.get('content_type'))
# Check if list limit is working
headers, objects = self.conn.get_container(self.containername, limit=1)
self.assertEqual(1, len(objects))
# Check full listing
headers, objects = self.conn.get_container(
self.containername, limit=1, full_listing=True)
self.assertEqual(2, len(objects))
# Test marker
headers, objects = self.conn.get_container(
self.containername, marker=self.objectname)
self.assertEqual(1, len(objects))
self.assertEqual(self.objectname_2, objects[0].get('name'))
def test_create_container(self):
self.conn.put_container(self.containername_3)
self.assertTrue(self.conn.head_container(self.containername_3))
def test_delete(self):
self.conn.delete_object(self.containername, self.objectname)
self.conn.delete_object(self.containername, self.objectname_2)
self.conn.delete_container(self.containername)
# Container HEAD will raise an exception if container doesn't exist
# which is only possible if previous requests succeeded
self.assertRaises(
swiftclient.ClientException,
self.conn.head_container,
self.containername)
def test_upload_object(self):
# Object with content from string
self.conn.put_object(
self.containername, self.objectname, contents=self.test_data)
hdrs = self.conn.head_object(self.containername, self.objectname)
self.assertEqual(str(len(self.test_data)),
hdrs.get('content-length'))
self.assertEqual(self.etag, hdrs.get('etag'))
self.assertEqual('application/octet-stream',
hdrs.get('content-type'))
# Same but with content-length
self.conn.put_object(
self.containername, self.objectname,
contents=self.test_data, content_length=len(self.test_data))
hdrs = self.conn.head_object(self.containername, self.objectname)
self.assertEqual(str(len(self.test_data)),
hdrs.get('content-length'))
self.assertEqual(self.etag, hdrs.get('etag'))
self.assertEqual('application/octet-stream', hdrs.get('content-type'))
# Content from File-like object
fileobj = BytesIO(self.test_data)
self.conn.put_object(
self.containername, self.objectname, contents=fileobj)
hdrs = self.conn.head_object(self.containername, self.objectname)
self.assertEqual(str(len(self.test_data)),
hdrs.get('content-length'))
self.assertEqual(self.etag, hdrs.get('etag'))
self.assertEqual('application/octet-stream', hdrs.get('content-type'))
# Content from File-like object, but read in chunks
fileobj = BytesIO(self.test_data)
self.conn.put_object(
self.containername, self.objectname,
contents=fileobj, content_length=len(self.test_data),
chunk_size=10)
hdrs = self.conn.head_object(self.containername, self.objectname)
self.assertEqual(str(len(self.test_data)),
hdrs.get('content-length'))
self.assertEqual(self.etag, hdrs.get('etag'))
self.assertEqual('application/octet-stream', hdrs.get('content-type'))
# Wrong etag arg, should raise an exception
self.assertRaises(
swiftclient.ClientException,
self.conn.put_object,
self.containername, self.objectname,
contents=self.test_data, etag='invalid')
def test_download_object(self):
# Download whole object
hdrs, body = self.conn.get_object(self.containername, self.objectname)
self.assertEqual(self.test_data, body)
# Download in chunks, should return a generator
hdrs, body = self.conn.get_object(
self.containername, self.objectname,
resp_chunk_size=10)
self.assertTrue(isinstance(body, types.GeneratorType))
self.assertEqual(self.test_data, b''.join(body))
def test_post_account(self):
self.conn.post_account({'x-account-meta-data': 'Something'})
headers = self.conn.head_account()
self.assertEqual('Something', headers.get('x-account-meta-data'))
def test_post_container(self):
self.conn.post_container(
self.containername, {'x-container-meta-color': 'Something'})
headers = self.conn.head_container(self.containername)
self.assertEqual('Something', headers.get('x-container-meta-color'))
def test_post_object(self):
self.conn.post_object(self.containername,
self.objectname,
{'x-object-meta-color': 'Something'})
headers = self.conn.head_object(self.containername, self.objectname)
self.assertEqual('Something', headers.get('x-object-meta-color'))
def test_get_capabilities(self):
resp = self.conn.get_capabilities()
self.assertTrue(resp.get('swift'))
|
|
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.shortcuts import render
from django.template import loader, RequestContext
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
from django.contrib import messages
from django.forms.models import model_to_dict
import json
import datetime
from apps.dc_algorithm.models import Satellite, Area, Application
from apps.dc_algorithm.forms import DataSelectionForm
from .forms import AdditionalOptionsForm
from .tasks import run, get_acquisition_list
from collections import OrderedDict
from apps.dc_algorithm.views import (ToolView, SubmitNewRequest, GetTaskResult, SubmitNewSubsetRequest, CancelRequest,
UserHistory, ResultList, OutputList, RegionSelection, TaskDetails)
from apps.dc_algorithm.forms import MAX_NUM_YEARS
class RegionSelection(RegionSelection):
"""Creates the region selection page for the tool by extending the RegionSelection class
Extends the RegionSelection abstract class - tool_name is the only required parameter -
all other parameters are provided by the context processor.
See the dc_algorithm.views docstring for more information
"""
tool_name = 'slip'
class SlipTool(ToolView):
"""Creates the main view for the custom mosaic tool by extending the ToolView class
Extends the ToolView abstract class - required attributes are the tool_name and the
generate_form_dict function.
See the dc_algorithm.views docstring for more details.
"""
tool_name = 'slip'
task_model_name = 'SlipTask'
def generate_form_dict(self, satellites, area, user_id, user_history, task_model_class):
forms = {}
for satellite in satellites:
time_end = satellite.date_max
earliest_allowed_time = datetime.date(time_end.year - MAX_NUM_YEARS, time_end.month, time_end.day)
time_start = max(satellite.date_min, earliest_allowed_time)
forms[satellite.pk] = {
'Data Selection':
AdditionalOptionsForm(
datacube_platform=satellite.datacube_platform, auto_id="{}_%s".format(satellite.pk)),
'Geospatial Bounds':
DataSelectionForm(
user_id=user_id,
user_history=user_history,
task_model_class=task_model_class,
area=area,
time_start=time_start,
time_end=time_end,
auto_id="{}_%s".format(satellite.pk))
}
return forms
class SubmitNewRequest(SubmitNewRequest):
"""
Submit new request REST API Endpoint
Extends the SubmitNewRequest abstract class - required attributes are the tool_name,
task_model_name, form_list, and celery_task_func
Note:
celery_task_func should be callable with .delay() and take a single argument of a TaskModel pk.
See the dc_algorithm.views docstrings for more information.
"""
tool_name = 'slip'
task_model_name = 'SlipTask'
#celery_task_func = create_cloudfree_mosaic
celery_task_func = run
form_list = [DataSelectionForm, AdditionalOptionsForm]
class GetTaskResult(GetTaskResult):
"""
Get task result REST API endpoint
Extends the GetTaskResult abstract class, required attributes are the tool_name
and task_model_name
See the dc_algorithm.views docstrings for more information.
"""
tool_name = 'slip'
task_model_name = 'SlipTask'
class SubmitNewSubsetRequest(SubmitNewSubsetRequest):
"""
Submit new subset request REST API endpoint
Extends the SubmitNewSubsetRequest abstract class, required attributes are
the tool_name, task_model_name, celery_task_func, and task_model_update_func.
See the dc_algorithm.views docstrings for more information.
"""
tool_name = 'slip'
task_model_name = 'SlipTask'
celery_task_func = run
def task_model_update_func(self, task_model, **kwargs):
"""
Basic funct that updates a task model with kwargs. In this case only the date
needs to be changed, and results reset.
"""
date = kwargs.get('date')[0]
date_datetime_format = datetime.strptime(date, '%m/%d/%Y') + datetime.timedelta(days=1)
acquisition_dates = get_acquisition_list(task_model, task_model.area_id, task_model.satellite,
date_datetime_format)
task_model.time_start = acquisition_dates[-1 * (task_model.baseline_length + 1)]
task_model.time_end = date_datetime_format
task_model.complete = False
task_model.scenes_processed = 0
task_model.total_scenes = 0
task_model.title = "Single acquisition for " + date
return task_model
class CancelRequest(CancelRequest):
"""
Cancel request REST API endpoint
Extends the CancelRequest abstract class, required attributes are the tool
name and task model name. This will not kill running queries, but will
disassociate it from the user's history.
"""
tool_name = 'slip'
task_model_name = 'SlipTask'
class UserHistory(UserHistory):
"""
Generate a template used to display the user's history
Extends the QueryHistory abstract class, required attributes are the tool
name and task model name. This will list all queries that are complete, have a
OK status, and are registered to the user.
"""
tool_name = 'slip'
task_model_name = 'SlipTask'
class ResultList(ResultList):
"""
Generate a template used to display any number of existing queries and metadatas
Extends the ResultList abstract class, required attributes are the tool
name and task model name. This will list all queries that are complete, have a
OK status, and are registered to the user.
"""
tool_name = 'slip'
task_model_name = 'SlipTask'
class OutputList(OutputList):
"""
Generate a template used to display any number of existing queries and metadatas
Extends the OutputList abstract class, required attributes are the tool
name and task model name. This will list all queries that are complete, have a
OK status, and are registered to the user.
"""
tool_name = 'slip'
task_model_name = 'SlipTask'
class TaskDetails(TaskDetails):
"""
Generate a template used to display the full task details for any
given task.
Extends the TaskDetails abstract class, required attributes are the tool
name and task model name.
"""
tool_name = 'slip'
task_model_name = 'SlipTask'
|
|
import datetime
from corehq.apps.accounting import utils, tasks
from corehq.apps.accounting.models import (
DomainUserHistory,
InvoiceCommunicationHistory,
CommunicationType,
DefaultProductPlan,
SoftwarePlanEdition,
SubscriptionType,
Subscription,
CustomerInvoiceCommunicationHistory,
CustomerInvoice,
)
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.accounting.tests import generator
from corehq.apps.accounting.utils.downgrade import (
DAYS_PAST_DUE_TO_TRIGGER_OVERDUE_NOTICE,
downgrade_eligible_domains,
DAYS_PAST_DUE_TO_TRIGGER_DOWNGRADE_WARNING,
DAYS_PAST_DUE_TO_TRIGGER_DOWNGRADE,
)
def _generate_invoice_and_subscription(days_ago, is_customer_billing_account=False):
"""
:param days_ago: The number of days ago an invoice should be due
:return: random domain, with invoices generated on the backend
"""
invoice_due_date = datetime.date.today() - datetime.timedelta(days=days_ago)
billing_contact = generator.create_arbitrary_web_user_name()
dimagi_user = generator.create_arbitrary_web_user_name(is_dimagi=True)
account = generator.billing_account(
dimagi_user,
billing_contact
)
account.is_customer_billing_account = is_customer_billing_account
account.save()
domain = generator.arbitrary_domain()
subscription_start_date = utils.months_from_date(invoice_due_date, -2)
subscription = generator.generate_domain_subscription(
account,
domain,
date_start=subscription_start_date,
date_end=None,
plan_version=DefaultProductPlan.get_default_plan_version(
SoftwarePlanEdition.ADVANCED
),
service_type=SubscriptionType.PRODUCT,
)
subscription.is_active = True
subscription.save()
invoice_date = utils.months_from_date(invoice_due_date, -1)
DomainUserHistory.objects.create(
domain=domain.name,
num_users=20,
record_date=invoice_date - datetime.timedelta(days=1)
)
tasks.generate_invoices_based_on_date(invoice_date)
# for testing purposes, force the latest invoice due_date to be
# the "invoice_due_date" specified above
if is_customer_billing_account:
latest_invoice = CustomerInvoice.objects.filter(
account=account,
).latest('date_created')
else:
latest_invoice = subscription.invoice_set.latest('date_created')
latest_invoice.date_due = invoice_due_date
latest_invoice.save()
return domain, latest_invoice
class TestDowngrades(BaseAccountingTest):
@classmethod
def setUpClass(cls):
super(TestDowngrades, cls).setUpClass()
generator.bootstrap_test_software_plan_versions()
generator.init_default_currency()
def setUp(self):
super(TestDowngrades, self).setUp()
self.domains = []
def tearDown(self):
for domain in self.domains:
for user in domain.all_users():
user.delete(domain.name, deleted_by=None)
domain.delete()
super(BaseAccountingTest, self).tearDown()
@classmethod
def tearDownClass(cls):
utils.clear_plan_version_cache()
super(TestDowngrades, cls).tearDownClass()
def _simulate_downgrade(self, days_overdue, is_customer_billing_account=False):
domain, latest_invoice = _generate_invoice_and_subscription(
days_overdue,
is_customer_billing_account=is_customer_billing_account
)
self.domains.append(domain)
downgrade_eligible_domains(only_downgrade_domain=domain.name)
return domain, latest_invoice
def test_no_notification(self):
domain, latest_invoice = self._simulate_downgrade(
DAYS_PAST_DUE_TO_TRIGGER_OVERDUE_NOTICE - 1
)
self.assertFalse(InvoiceCommunicationHistory.objects.filter(
invoice=latest_invoice,
).exists())
def test_overdue_notification(self):
domain, latest_invoice = self._simulate_downgrade(
DAYS_PAST_DUE_TO_TRIGGER_OVERDUE_NOTICE
)
# confirm communication was initiated
self.assertTrue(InvoiceCommunicationHistory.objects.filter(
invoice=latest_invoice,
communication_type=CommunicationType.OVERDUE_INVOICE,
).exists())
# try to trigger another communication (it should fail), and make sure
# only one communication was ever sent
downgrade_eligible_domains(only_downgrade_domain=domain.name)
self.assertTrue(InvoiceCommunicationHistory.objects.filter(
invoice=latest_invoice,
).count(), 1)
def test_belated_overdue_notification(self):
# just in case on the 30th day, the downgrade process fails, make
# sure it happens properly on the 31st day.
domain, latest_invoice = self._simulate_downgrade(
DAYS_PAST_DUE_TO_TRIGGER_OVERDUE_NOTICE + 1
)
# confirm communication was initiated
self.assertTrue(InvoiceCommunicationHistory.objects.filter(
invoice=latest_invoice,
communication_type=CommunicationType.OVERDUE_INVOICE,
).exists())
def test_downgrade_warning(self):
domain, latest_invoice = self._simulate_downgrade(
DAYS_PAST_DUE_TO_TRIGGER_DOWNGRADE_WARNING
)
# confirm communication was initiated
self.assertTrue(InvoiceCommunicationHistory.objects.filter(
invoice=latest_invoice,
communication_type=CommunicationType.DOWNGRADE_WARNING,
).exists())
# make sure a downgrade warning isn't sent again
downgrade_eligible_domains(only_downgrade_domain=domain.name)
self.assertTrue(InvoiceCommunicationHistory.objects.filter(
invoice=latest_invoice,
communication_type=CommunicationType.DOWNGRADE_WARNING,
).count(), 1)
def test_downgrade(self):
domain, latest_invoice = self._simulate_downgrade(
DAYS_PAST_DUE_TO_TRIGGER_DOWNGRADE
)
# confirm a downgrade wasn't actually initiated because a warning
# email has not been sent
subscription = Subscription.get_active_subscription_by_domain(domain)
self.assertNotEqual(subscription.plan_version.plan.edition, SoftwarePlanEdition.PAUSED)
# fake the warning to have been triggered a few days ago
warning_days_ago = DAYS_PAST_DUE_TO_TRIGGER_DOWNGRADE - DAYS_PAST_DUE_TO_TRIGGER_DOWNGRADE_WARNING
history = InvoiceCommunicationHistory.objects.filter(
invoice=latest_invoice
).latest('date_created')
history.date_created = datetime.date.today() - datetime.timedelta(days=warning_days_ago)
history.save()
# now trigger a successful downgrade
downgrade_eligible_domains(only_downgrade_domain=domain.name)
subscription = Subscription.get_active_subscription_by_domain(domain)
self.assertEqual(subscription.plan_version.plan.edition, SoftwarePlanEdition.PAUSED)
def test_overdue_customer_notification(self):
domain, latest_invoice = self._simulate_downgrade(
DAYS_PAST_DUE_TO_TRIGGER_OVERDUE_NOTICE,
is_customer_billing_account=True
)
# confirm communication was initiated
self.assertTrue(CustomerInvoiceCommunicationHistory.objects.filter(
invoice=latest_invoice,
communication_type=CommunicationType.OVERDUE_INVOICE,
).exists())
def test_overdue_customer_downgrade_warning(self):
domain, latest_invoice = self._simulate_downgrade(
DAYS_PAST_DUE_TO_TRIGGER_DOWNGRADE_WARNING,
is_customer_billing_account=True
)
# confirm communication was initiated
self.assertTrue(CustomerInvoiceCommunicationHistory.objects.filter(
invoice=latest_invoice,
communication_type=CommunicationType.DOWNGRADE_WARNING,
).exists())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.