gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord, frame_transform_graph
from astropy.wcs.utils import (celestial_frame_to_wcs, pixel_to_skycoord, proj_plane_pixel_scales,
skycoord_to_pixel, wcs_to_celestial_frame)
from ..utils import parse_input_data
__all__ = ['find_optimal_celestial_wcs']
def find_optimal_celestial_wcs(input_data, frame=None, auto_rotate=False,
projection='TAN', resolution=None,
reference=None):
"""
Given one or more images, return an optimal WCS projection object and
shape.
This currently only works with 2-d images with celestial WCS.
Parameters
----------
input_data : iterable
One or more input datasets to include in the calculation of the final
WCS. This should be an iterable containing one entry for each dataset,
where a single dataset is one of:
* The name of a FITS file
* An `~astropy.io.fits.HDUList` object
* An image HDU object such as a `~astropy.io.fits.PrimaryHDU`,
`~astropy.io.fits.ImageHDU`, or `~astropy.io.fits.CompImageHDU`
instance
* A tuple where the first element is a `~numpy.ndarray` and the
second element is either a `~astropy.wcs.WCS` or a
`~astropy.io.fits.Header` object
frame : str or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate system for the final image (defaults to the frame of
the first image specified)
auto_rotate : bool
Whether to rotate the header to minimize the final image area (if
`True`, requires shapely>=1.6 to be installed)
projection : str
Three-letter code for the WCS projection
resolution : `~astropy.units.Quantity`
The resolution of the final image. If not specified, this is the
smallest resolution of the input images.
reference : `~astropy.coordinates.SkyCoord`
The reference coordinate for the final header. If not specified, this
is determined automatically from the input images.
Returns
-------
wcs : :class:`~astropy.wcs.WCS`
The optimal WCS determined from the input images.
shape : tuple
The optimal shape required to cover all the output.
"""
# TODO: support higher-dimensional datasets in future
# TODO: take into account NaN values when determining the extent of the
# final WCS
if isinstance(frame, str):
frame = frame_transform_graph.lookup_name(frame)()
input_data = [parse_input_data(data) for data in input_data]
# We start off by looping over images, checking that they are indeed
# celestial images, and building up a list of all corners and all reference
# coordinates in celestial (ICRS) coordinates.
corners = []
references = []
resolutions = []
for array, wcs in input_data:
if array.ndim != 2:
raise ValueError("Input data is not 2-dimensional")
if wcs.naxis != 2:
raise ValueError("Input WCS is not 2-dimensional")
if not wcs.has_celestial:
raise TypeError("WCS does not have celestial components")
# Determine frame if it wasn't specified
if frame is None:
frame = wcs_to_celestial_frame(wcs)
# Find pixel coordinates of corners. In future if we are worried about
# significant distortions of the edges in the reprojection process we
# could simply add arbitrary numbers of midpoints to this list.
ny, nx = array.shape
xc = np.array([-0.5, nx - 0.5, nx - 0.5, -0.5])
yc = np.array([-0.5, -0.5, ny - 0.5, ny - 0.5])
# We have to do .frame here to make sure that we get an ICRS object
# without any 'hidden' attributes, otherwise the stacking below won't
# work. TODO: check if we need to enable distortions here.
corners.append(pixel_to_skycoord(xc, yc, wcs, origin=0).icrs.frame)
# We now figure out the reference coordinate for the image in ICRS. The
# easiest way to do this is actually to use pixel_to_skycoord with the
# reference position in pixel coordinates. We have to set origin=1
# because crpix values are 1-based.
xp, yp = wcs.wcs.crpix
references.append(pixel_to_skycoord(xp, yp, wcs, origin=1).icrs.frame)
# Find the pixel scale at the reference position - we take the minimum
# since we are going to set up a header with 'square' pixels with the
# smallest resolution specified.
scales = proj_plane_pixel_scales(wcs)
resolutions.append(np.min(np.abs(scales)))
# We now stack the coordinates - however the ICRS class can't do this
# so we have to use the high-level SkyCoord class.
corners = SkyCoord(corners)
references = SkyCoord(references)
# If no reference coordinate has been passed in for the final header, we
# determine the reference coordinate as the mean of all the reference
# positions. This choice is as good as any and if the user really cares,
# they can set it manually.
if reference is None:
reference = SkyCoord(references.data.mean(), frame=references.frame)
# In any case, we need to convert the reference coordinate (either
# specified or automatically determined) to the requested final frame.
reference = reference.transform_to(frame)
# Determine resolution if not specified
if resolution is None:
resolution = np.min(resolutions) * u.deg
# Determine the resolution in degrees
cdelt = resolution.to(u.deg).value
# Construct WCS object centered on position
wcs_final = celestial_frame_to_wcs(frame, projection=projection)
rep = reference.represent_as('unitspherical')
wcs_final.wcs.crval = rep.lon.degree, rep.lat.degree
wcs_final.wcs.cdelt = -cdelt, cdelt
# For now, set crpix to (1, 1) and we'll then figure out where all the
# images fall in this projection, then we'll adjust crpix.
wcs_final.wcs.crpix = (1, 1)
# Find pixel coordinates of all corners in the final WCS projection. We use
# origin=1 since we are trying to determine crpix values.
xp, yp = skycoord_to_pixel(corners, wcs_final, origin=1)
if auto_rotate:
# Use shapely to represent the points and find the minimum rotated
# rectangle
from shapely.geometry import MultiPoint
mp = MultiPoint(list(zip(xp, yp)))
# The following returns a list of rectangle vertices - in fact there
# are 5 coordinates because shapely represents it as a closed polygon
# with the same first/last vertex.
xr, yr = mp.minimum_rotated_rectangle.exterior.coords.xy
xr, yr = xr[:4], yr[:4]
# The order of the vertices is not guaranteed to be constant so we
# take the vertices with the two smallest y values (which, for a
# rectangle, guarantees that the vertices are neighboring)
order = np.argsort(yr)
x1, y1, x2, y2 = xr[order[0]], yr[order[0]], xr[order[1]], yr[order[1]]
# Determine angle between two of the vertices. It doesn't matter which
# ones they are, we just want to know how far from being straight the
# rectangle is.
angle = np.arctan2(y2 - y1, x2 - x1)
# Determine the smallest angle that would cause the rectangle to be
# lined up with the axes.
angle = angle % (np.pi / 2)
if angle > np.pi / 4:
angle -= np.pi / 2
# Set rotation matrix (use PC instead of CROTA2 since PC is the
# recommended approach)
pc = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
wcs_final.wcs.pc = pc
# Recompute pixel coordinates (more accurate than simply rotating xp, yp)
xp, yp = skycoord_to_pixel(corners, wcs_final, origin=1)
# Find the full range of values
xmin = xp.min()
xmax = xp.max()
ymin = yp.min()
ymax = yp.max()
# Update crpix so that the lower range falls on the bottom and left. We add
# 0.5 because in the final image the bottom left corner should be at (0.5,
# 0.5) not (1, 1).
wcs_final.wcs.crpix = (1 - xmin) + 0.5, (1 - ymin) + 0.5
# Return the final image shape too
naxis1 = int(round(xmax - xmin))
naxis2 = int(round(ymax - ymin))
return wcs_final, (naxis2, naxis1)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Tests for Google Cloud Life Sciences Hook
"""
import unittest
from unittest import mock
from unittest.mock import PropertyMock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.life_sciences import LifeSciencesHook
from tests.providers.google.cloud.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST,
mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
TEST_OPERATION = {
"name": 'operation-name',
"metadata": {"@type": 'anytype'},
"done": True,
"response": "response",
}
TEST_WAITING_OPERATION = {"done": False, "response": "response"}
TEST_DONE_OPERATION = {"done": True, "response": "response"}
TEST_ERROR_OPERATION = {"done": True, "response": "response", "error": "error"}
TEST_PROJECT_ID = "life-science-project-id"
TEST_LOCATION = 'test-location'
class TestLifeSciencesHookWithPassedProjectId(unittest.TestCase):
def setUp(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = LifeSciencesHook(gcp_conn_id="test")
def test_location_path(self):
path = 'projects/life-science-project-id/locations/test-location'
path2 = self.hook._location_path(project_id=TEST_PROJECT_ID, location=TEST_LOCATION)
assert path == path2
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.LifeSciencesHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.build")
def test_life_science_client_creation(self, mock_build, mock_authorize):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
'lifesciences', 'v2beta', http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
assert self.hook._conn == result
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.LifeSciencesHook.get_conn")
def test_run_pipeline_immediately_complete(self, get_conn_mock, mock_project_id):
service_mock = get_conn_mock.return_value
# fmt: off
service_mock.projects.return_value \
.locations.return_value \
.pipelines.return_value \
.run.return_value \
.execute.return_value = TEST_OPERATION
service_mock.projects.return_value \
.locations.return_value \
.operations.return_value \
.get.return_value \
.execute.return_value = TEST_DONE_OPERATION
result = self.hook.run_pipeline(body={}, # pylint: disable=no-value-for-parameter
location=TEST_LOCATION)
parent = self.hook. \
_location_path(location=TEST_LOCATION) # pylint: disable=no-value-for-parameter
service_mock.projects.return_value.locations.return_value \
.pipelines.return_value.run \
.assert_called_once_with(body={},
parent=parent)
# fmt: on
assert result == TEST_OPERATION
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.LifeSciencesHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.time.sleep")
def test_waiting_operation(self, _, get_conn_mock, mock_project_id):
service_mock = get_conn_mock.return_value
# fmt: off
service_mock.projects.return_value \
.locations.return_value \
.pipelines.return_value \
.run.return_value \
.execute.return_value = TEST_OPERATION
execute_mock = mock.Mock(
**{"side_effect": [TEST_WAITING_OPERATION, TEST_DONE_OPERATION]}
)
service_mock.projects.return_value \
.locations.return_value \
.operations.return_value \
.get.return_value \
.execute = execute_mock
# fmt: on
result = self.hook.run_pipeline(body={}, location=TEST_LOCATION, project_id=TEST_PROJECT_ID)
assert result == TEST_OPERATION
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.LifeSciencesHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.time.sleep")
def test_error_operation(self, _, get_conn_mock, mock_project_id):
service_mock = get_conn_mock.return_value
# fmt: off
service_mock.projects.return_value \
.locations.return_value \
.pipelines.return_value \
.run.return_value \
.execute.return_value = TEST_OPERATION
execute_mock = mock.Mock(**{"side_effect": [TEST_WAITING_OPERATION, TEST_ERROR_OPERATION]})
service_mock.projects.return_value \
.locations.return_value \
.operations.return_value \
.get.return_value \
.execute = execute_mock
# fmt: on
with pytest.raises(AirflowException, match="error"):
self.hook.run_pipeline(body={}, location=TEST_LOCATION, project_id=TEST_PROJECT_ID)
class TestLifeSciencesHookWithDefaultProjectIdFromConnection(unittest.TestCase):
def setUp(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = LifeSciencesHook(gcp_conn_id="test")
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.LifeSciencesHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.build")
def test_life_science_client_creation(self, mock_build, mock_authorize):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
'lifesciences', 'v2beta', http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
assert self.hook._conn == result
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.LifeSciencesHook.get_conn")
def test_run_pipeline_immediately_complete(self, get_conn_mock, mock_project_id):
service_mock = get_conn_mock.return_value
# fmt: off
service_mock.projects.return_value \
.locations.return_value \
.pipelines.return_value \
.run.return_value \
.execute.return_value = TEST_OPERATION
service_mock.projects.return_value \
.locations.return_value \
.operations.return_value \
.get.return_value \
.execute.return_value = TEST_DONE_OPERATION
result = self.hook.run_pipeline(body={}, location=TEST_LOCATION, project_id=TEST_PROJECT_ID)
parent = self.hook._location_path(project_id=TEST_PROJECT_ID, location=TEST_LOCATION)
service_mock.projects.return_value.locations.return_value \
.pipelines.return_value.run \
.assert_called_once_with(body={},
parent=parent)
# fmt: on
assert result == TEST_OPERATION
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.LifeSciencesHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.time.sleep")
def test_waiting_operation(self, _, get_conn_mock, mock_project_id):
service_mock = get_conn_mock.return_value
# fmt: off
service_mock.projects.return_value \
.locations.return_value \
.pipelines.return_value \
.run.return_value \
.execute.return_value = TEST_OPERATION
execute_mock = mock.Mock(**{"side_effect": [TEST_WAITING_OPERATION, TEST_DONE_OPERATION]})
service_mock.projects.return_value \
.locations.return_value \
.operations.return_value \
.get.return_value \
.execute = execute_mock
# fmt: on
# pylint: disable=no-value-for-parameter
result = self.hook.run_pipeline(body={}, location=TEST_LOCATION)
assert result == TEST_OPERATION
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.LifeSciencesHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.time.sleep")
def test_error_operation(self, _, get_conn_mock, mock_project_id):
service_mock = get_conn_mock.return_value
# fmt: off
service_mock.projects.return_value \
.locations.return_value \
.pipelines.return_value \
.run.return_value \
.execute.return_value = TEST_OPERATION
execute_mock = mock.Mock(**{"side_effect": [TEST_WAITING_OPERATION, TEST_ERROR_OPERATION]})
service_mock.projects.return_value \
.locations.return_value \
.operations.return_value \
.get.return_value \
.execute = execute_mock
# fmt: on
with pytest.raises(AirflowException, match="error"):
self.hook.run_pipeline(body={}, location=TEST_LOCATION) # pylint: disable=no-value-for-parameter
class TestLifeSciencesHookWithoutProjectId(unittest.TestCase):
def setUp(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = LifeSciencesHook(gcp_conn_id="test")
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.LifeSciencesHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.build")
def test_life_science_client_creation(self, mock_build, mock_authorize):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
'lifesciences', 'v2beta', http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
assert self.hook._conn == result
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.life_sciences.LifeSciencesHook.get_conn")
def test_run_pipeline(self, get_conn_mock, mock_project_id): # pylint: disable=unused-argument
with pytest.raises(AirflowException) as ctx:
self.hook.run_pipeline(body={}, location=TEST_LOCATION) # pylint: disable=no-value-for-parameter
assert (
"The project id must be passed either as keyword project_id parameter or as project_id extra in "
"Google Cloud connection definition. Both are not set!" == str(ctx.value)
)
|
|
################################################################################
# Copyright 2016-2020 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
import os
import sys
import argparse
import re
from distutils.util import strtobool
import pandas as pd
from ExtractSizes import *
from TuningConfiguration import *
headers = ""
def MatchLine(headerPattern, linePattern, line):
global headers
if not headers:
matched = headerPattern.match(line)
if matched:
headers = line
return matched
else:
matched = linePattern.match(line)
return matched
def ResultsFilesList(inputPath, resultsName):
resultsFilePattern = re.compile(resultsName + "\.[0-9]*")
resultsFiles = [f for f in os.listdir(inputPath)]
filteredFiles = [f for f in resultsFiles if resultsFilePattern.match(f)]
return filteredFiles
def ParseResults(inputPath, outputPath, resultsName):
global headers
headers = ""
filteredFiles = ResultsFilesList(inputPath, resultsName)
headerPattern = re.compile("transA,transB")
linePattern = re.compile(r"(N|T),(N|T).*")
outfilename = resultsName + ".csv"
outputFilePath = os.path.join(outputPath, outfilename)
outfile = open(outputFilePath,'w')
for fl in filteredFiles:
flPath = os.path.join(inputPath,fl)
filteredLines = [ line for line in open(flPath) if MatchLine(headerPattern, linePattern, line)]
outfile.writelines(filteredLines)
outfile.flush()
outfile.close()
def getMultiplier(xdl):
if xdl:
return 2
return 1
def getCuCount(gpu):
gpuMap = {'vega10':64, 'mi25':64, 'vega20':64, 'v340l':56,'mi50':60,'arcturus':120,'mi60':64}
for key in gpuMap.keys():
if gpu == key:
return gpuMap[key]
return 64
def fillCallCounts(problemMapper, callCounts, callCount, callCountStrided, isOne):
for i in problemMapper:
for klist in i:
midList = list()
for key in klist:
if key == "transposeA" or key == "transposeB" or key == "f" or key == "i":
if klist[key] == 10 and isOne:
klist[key] = 1
midList.append(klist[key])
if len(midList) == 4:
callCounts.append(midList)
for line in callCounts:
if line[0] == "gemm" or line[0] == "gemm_ex":
callCount.append(line[3])
elif "gemm_strided_batched" in line[0]:
callCountStrided.append(line[3])
def chooseCallCount(resultsName, callCount, callCountStrided):
if "strided" in resultsName:
return callCountStrided
return callCount
def ProcessResults(outputPath, resultsName, freqM, sz, call_count, gpu = 'vega20', xdl = False):
global headers
resultsFilename = resultsName + ".csv"
resultsFilePath = os.path.join(outputPath, resultsFilename)
data = None
data = pd.read_csv(resultsFilePath)
multiplier = getMultiplier(xdl)
cus = getCuCount(gpu)
headerValues = headers.strip().split(",")
headerLength = len(headerValues)
key = headerValues[0:headerLength-2]
key.append('us')
performanceField = "rocblas-Gflops"
timingField = "us"
df = data.groupby(key,sort=False)
results = df[performanceField].mean().to_frame()
timingResults = df[timingField].mean().to_frame()
freq=freqM
factor=sz * 64 * multiplier * cus
results['eff'] = 100*1e3*results['rocblas-Gflops'] / (factor * freq)
results['us_w'] = timingResults['us']*call_count
aggregateFileName = resultsName + "-aggregated.csv"
aggregateFilePath = os.path.join(outputPath, aggregateFileName)
results.to_csv(aggregateFilePath, header=True)
resultsBad = results[results['eff'] < 70]
badResultsFileName = resultsName + "-bad.csv"
badResultsFilePath = os.path.join(outputPath, badResultsFileName)
resultsBad.sort_values(by='us_w',ascending=False).to_csv(badResultsFilePath, header=True)
large1 = data
large1['N'] = pd.to_numeric(large1['N'])
large1['M'] = pd.to_numeric(large1['M'])
large2 = large1[large1['N']>1000]
large = large2[large2['M']>1000]
largeAgg = large.groupby(key)
largeResults = largeAgg[performanceField].mean().to_frame()
largeResultsTime = largeAgg[timingField].mean().to_frame()
largeResults['eff'] = 100*1e3*largeResults['rocblas-Gflops'] / (factor * freq)
largeResults['us_w'] = largeResultsTime['us']
resultsFileName = resultsName + "-large.csv"
resultsFilePath = os.path.join(outputPath, resultsFileName)
largeResults.sort_values(by='us_w',ascending=False).to_csv(resultsFilePath, header=True)
resultsBad = largeResults[largeResults['eff'] < 70]
badResultsFileName = resultsName + "-bad-large.csv"
badResultsFilePath = os.path.join(outputPath, badResultsFileName)
resultsBad.sort_values(by='eff',ascending=True).to_csv(badResultsFilePath, header=True)
def RunMain():
userArgs = sys.argv[1:]
def strbool(arg):
return bool(strtobool(arg))
argParser = argparse.ArgumentParser()
argParser.add_argument("input_path", help="path where the results are located")
argParser.add_argument("output_path", help="path where the processed files are to go")
argParser.add_argument("frequency", help="frequecy in megahertz used in testing", type=int,default=1301)
argParser.add_argument("data_size", help="data size",type=int,default=2)
argParser.add_argument("input_file_name", help="configuration file path")
argParser.add_argument("gpu", help="which gpu was used", type=str,default="vega20")
argParser.add_argument("mfma", help="were mfma instructions enabled", type=strbool,default=False)
argParser.add_argument("is_count_1", help="were mfma instructions enabled", type=strbool,default=False) # duplicated parameter?
args = argParser.parse_args(userArgs)
inputPath = args.input_path
outputPath = args.output_path
freqM = args.frequency
sz = args.data_size
inputFileName = args.input_file_name
cu = args.gpu
xdl = args.mfma
isOne = args.is_count_1
problemMapper = list(ProcessFile(inputFileName).values())
callCounts = list(list())
callCount = list()
callCountStrided = list()
fillCallCounts(problemMapper, callCounts, callCount, callCountStrided, isOne)
resultsFiles = [f for f in os.listdir(inputPath) if (os.path.isfile(os.path.join(inputPath, f)))]
resultsNameSet = set()
for resultsFile in resultsFiles:
resultsName, _ = os.path.splitext(resultsFile)
resultsNameSet.add(resultsName)
resultsNames = list(resultsNameSet)
for resultsName in resultsNames:
ParseResults(inputPath, outputPath, resultsName)
callCountChoice = chooseCallCount(resultsName, callCount, callCountStrided)
ProcessResults(outputPath, resultsName, freqM, sz, callCountChoice, cu, xdl)
if __name__ == "__main__":
RunMain()
|
|
import json
import time
from uuid import uuid4
import mock
import pytest
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APITestCase
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.device.models import SyncQueue
from kolibri.core.public.api import HANDSHAKING_TIME
from kolibri.core.public.api import MAX_CONCURRENT_SYNCS
from kolibri.core.public.api import position_in_queue
from kolibri.core.public.constants.user_sync_statuses import QUEUED
from kolibri.core.public.constants.user_sync_statuses import SYNC
from kolibri.core.public.utils import begin_request_soud_sync
from kolibri.core.public.utils import request_soud_sync
class SyncQueueTestBase(TestCase):
def setUp(self):
self.facility = Facility.objects.create(name="Test")
def test_create_queue_element(self):
previous_time = time.time()
element, _ = SyncQueue.objects.get_or_create(
user=FacilityUser.objects.create(username="test", facility=self.facility)
)
assert element.keep_alive == 5.0
current_time = time.time()
assert (
current_time >= element.updated
) # = added because sometimes this is too quick
assert previous_time < element.updated
def test_queue_cleaning(self):
for i in range(3):
SyncQueue.objects.create(
user=FacilityUser.objects.create(
username="test{}".format(i), facility=self.facility
)
)
for i in range(3, 5):
item = SyncQueue.objects.create(
user=FacilityUser.objects.create(
username="test{}".format(i), facility=self.facility
)
)
item.updated = item.updated - 200
item.save()
assert SyncQueue.objects.count() == 5
SyncQueue.clean_stale() # default expiry time = 180 seconds
assert SyncQueue.objects.count() == 3
class SyncQueueViewSetAPITestCase(APITestCase):
def setUp(self):
self.default_facility = Facility.objects.create(name="Test")
Facility.objects.create(name="Test2")
self.test_user = FacilityUser.objects.create(
username="test", facility=self.default_facility
)
def test_list(self):
response = self.client.get(
reverse("kolibri:core:syncqueue-list"), format="json"
)
assert len(response.data) == Facility.objects.count()
assert response.status_code == status.HTTP_200_OK
def test_list_queue_length(self):
queue_length = 3
for i in range(queue_length):
SyncQueue.objects.create(
user=FacilityUser.objects.create(
username="test{}".format(i), facility=self.default_facility
)
)
response = self.client.get(
reverse("kolibri:core:syncqueue-list"), format="json"
)
assert response.data[self.default_facility.id] == queue_length
@mock.patch(
"kolibri.core.public.api.get_device_setting",
return_value=True,
)
def test_soud(self, mock_device_setting):
response = self.client.post(
reverse("kolibri:core:syncqueue-list"), {"user": uuid4()}, format="json"
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "I'm a Subset of users device" in response.data
def test_user_needed(self):
response = self.client.post(reverse("kolibri:core:syncqueue-list"))
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
def test_existing_user(self):
response = self.client.post(
reverse("kolibri:core:syncqueue-list"),
{"user": uuid4()},
format="json",
)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_allow_sync(self):
response = self.client.post(
reverse("kolibri:core:syncqueue-list"),
{
"user": self.test_user.id,
},
format="json",
)
assert response.status_code == status.HTTP_200_OK
assert response.data["action"] == SYNC
@mock.patch("kolibri.core.public.api.TransferSession.objects.filter")
def test_enqueued(self, _filter):
_filter().count.return_value = MAX_CONCURRENT_SYNCS + 1
response = self.client.post(
reverse("kolibri:core:syncqueue-list"),
{"user": self.test_user.id},
format="json",
)
assert response.status_code == status.HTTP_200_OK
assert response.data["action"] == QUEUED
assert "id" in response.data
assert response.data["keep_alive"] == MAX_CONCURRENT_SYNCS * HANDSHAKING_TIME
def test_update(self):
response = self.client.put(
reverse("kolibri:core:syncqueue-detail", kwargs={"pk": uuid4()})
)
assert response.status_code == status.HTTP_200_OK
assert response.data["action"] == SYNC
@mock.patch("kolibri.core.public.api.TransferSession.objects.filter")
def test_not_in_queue(self, _filter):
_filter().count.return_value = MAX_CONCURRENT_SYNCS + 1
response = self.client.put(
reverse("kolibri:core:syncqueue-detail", kwargs={"pk": uuid4()})
)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert "Missing element" in response.data
@mock.patch("kolibri.core.public.api.TransferSession.objects.filter")
def test_updated_enqueued(self, _filter):
_filter().count.return_value = MAX_CONCURRENT_SYNCS + 1
element = SyncQueue.objects.create(user=self.test_user)
previous_time = element.updated
response = self.client.put(
reverse("kolibri:core:syncqueue-detail", kwargs={"pk": element.id})
)
element = SyncQueue.objects.get(id=element.id)
assert element.updated >= previous_time
assert response.status_code == status.HTTP_200_OK
assert response.data["action"] == QUEUED
assert response.data["id"] == element.id
assert (
response.data["keep_alive"] == MAX_CONCURRENT_SYNCS * HANDSHAKING_TIME
) # first in queue, position does not change
@mock.patch("kolibri.core.public.api.TransferSession.objects.filter")
def test_position_in_queue(self, _filter):
_filter().count.return_value = MAX_CONCURRENT_SYNCS + 1
for n in range(10):
user = FacilityUser.objects.create(
username="test{}".format(n), facility=self.default_facility
)
element = SyncQueue.objects.create(user=user)
if n == 5:
pk = element.id
response = self.client.put(
reverse("kolibri:core:syncqueue-detail", kwargs={"pk": pk})
)
assert position_in_queue(pk) == 5
assert response.data["keep_alive"] == HANDSHAKING_TIME * 6
SyncQueue.objects.all().order_by("datetime").first().delete()
SyncQueue.objects.all().order_by("datetime").first().delete()
assert position_in_queue(pk) == 3
response = self.client.put(
reverse("kolibri:core:syncqueue-detail", kwargs={"pk": pk})
)
assert response.data["keep_alive"] == HANDSHAKING_TIME * 4
@pytest.mark.django_db
class TestRequestSoUDSync(object):
@pytest.fixture()
def setUp(self):
self.facility = Facility.objects.create(name="Test")
self.test_user = FacilityUser.objects.create(
username="test", facility=self.facility
)
@mock.patch("kolibri.core.public.utils.queue")
@mock.patch(
"kolibri.core.public.utils.get_device_setting",
return_value=True,
)
def test_begin_request_soud_sync(self, mock_device_info, queue, setUp):
begin_request_soud_sync("whatever_server", self.test_user.id)
queue.enqueue.assert_called_with(
request_soud_sync, "whatever_server", self.test_user.id
)
@mock.patch("kolibri.core.public.utils.scheduler")
@mock.patch("kolibri.core.public.utils.requests")
@mock.patch("kolibri.core.tasks.api.MorangoProfileController")
@mock.patch("kolibri.core.tasks.api.get_client_and_server_certs")
@mock.patch("kolibri.core.tasks.api.get_dataset_id")
def test_request_soud_sync(
self,
get_dataset_id,
get_client_and_server_certs,
MorangoProfileController,
requests_mock,
scheduler,
setUp,
):
get_client_and_server_certs.return_value = None
get_dataset_id.return_value = self.facility.dataset_id
requests_mock.post.return_value.status_code = 200
requests_mock.post.return_value.content = json.dumps({"action": SYNC})
network_connection = mock.Mock()
controller = MorangoProfileController.return_value
controller.create_network_connection.return_value = network_connection
request_soud_sync("http://whatever:8000", self.test_user.id)
scheduler.enqueue_in.call_count == 0
requests_mock.post.return_value.status_code = 200
requests_mock.post.return_value.content = json.dumps(
{"action": QUEUED, "keep_alive": "5", "id": str(uuid4())}
)
request_soud_sync("whatever_server", self.test_user.id)
scheduler.enqueue_in.call_count == 1
|
|
import json
import base64
import uuid
import urllib
import hashlib
from datetime import datetime, timedelta
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.utils.timezone import utc
from django.conf import settings
from ..models import Statement, Agent, Verb, Activity, SubStatement
from ..views import register, statements
from ..util import retrieve_statement
class AuthTests(TestCase):
# Want to test no auth, so have to disable both auths
@classmethod
def setUpClass(cls):
print "\n%s" % __name__
def setUp(self):
if not settings.ALLOW_EMPTY_HTTP_AUTH:
settings.ALLOW_EMPTY_HTTP_AUTH = True
if settings.OAUTH_ENABLED:
settings.OAUTH_ENABLED = False
self.auth = "Basic %s" % base64.b64encode("%s:%s" % ('',''))
self.guid1 = str(uuid.uuid1())
self.guid2 = str(uuid.uuid1())
self.guid3 = str(uuid.uuid1())
self.guid4 = str(uuid.uuid1())
self.guid5 = str(uuid.uuid1())
self.guid6 = str(uuid.uuid1())
self.guid7 = str(uuid.uuid1())
self.guid8 = str(uuid.uuid1())
self.guid9 = str(uuid.uuid1())
self.guid10 = str(uuid.uuid1())
self.cguid1 = str(uuid.uuid1())
self.cguid2 = str(uuid.uuid1())
self.cguid3 = str(uuid.uuid1())
self.cguid4 = str(uuid.uuid1())
self.cguid5 = str(uuid.uuid1())
self.cguid6 = str(uuid.uuid1())
self.cguid7 = str(uuid.uuid1())
self.cguid8 = str(uuid.uuid1())
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}}, "object": {"id":"act:activity"},
"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"}})
exist_stmt_response = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(exist_stmt_response.status_code, 200)
self.exist_stmt_id = json.loads(exist_stmt_response.content)[0]
self.firstTime = str(datetime.utcnow().replace(tzinfo=utc).isoformat())
self.existStmt1 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"object": {"objectType": "Activity", "id":"act:foogie",
"definition": {"name": {"en-US":"testname2", "en-GB": "altname"},
"description": {"en-US":"testdesc2", "en-GB": "altdesc"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in","correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2","ext:key3": "value3"}}},
"result": {"score":{"scaled":.85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:key1": "value1", "ext:key2":"value2"}},
"context":{"registration": self.cguid1, "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform":"bard","language": "en-US", "extensions":{"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}})
self.existStmt2 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"object": {"objectType": "Activity", "id":"act:foogie",
"definition": {"name": {"en-US":"testname3", "en-GB": "altname"},
"description": {"en-US":"testdesc3","en-GB":"altdesc"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in","correctResponsesPattern": ["answers"],
"extensions": {"ext:key11": "value11", "ext:key22": "value22","ext:key33": "value33"}}},
"result": {"score":{"scaled":.75}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:dkey1": "dvalue1", "ext:dkey2":"dvalue2"}},
"context":{"registration": self.cguid2, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform":"bard","language": "en-US", "extensions":{"ext:ckey11": "cval11",
"ext:ckey22": "cval22"}}})
self.existStmt3 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"object": {"objectType": "Activity", "id":"act:act:foogals",
"definition": {"name": {"en-US":"testname3"},"description": {"en-US":"testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in","correctResponsesPattern": ["answers"],
"extensions": {"ext:key111": "value111", "ext:key222": "value222","ext:key333": "value333"}}},
"result": {"score":{"scaled":.79}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:dkey1": "dvalue1", "ext:dkey2":"dvalue2"}},
"context":{"registration": self.cguid3, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform":"bard","language": "en-US",
"instructor":{"objectType": "Agent", "name":"bob", "mbox":"mailto:[email protected]"},
"extensions":{"ext:ckey111": "cval111","ext:ckey222": "cval222"}}})
self.existStmt4 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"object": {"objectType": "Activity", "id":"act:foogal",
"definition": {"name": {"en-US":"testname3"},"description": {"en-US":"testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in","correctResponsesPattern": ["answers"],
"extensions": {"ext:key111": "value111", "ext:key222": "value222","ext:key333": "value333"}}},
"result": {"score":{"scaled":.79}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:dkey1": "dvalue1", "ext:dkey2":"dvalue2"}},
"context":{"registration": self.cguid4, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform":"bard","language": "en-US","instructor":{"name":"bill", "mbox":"mailto:[email protected]"},
"extensions":{"ext:ckey111": "cval111","ext:ckey222": "cval222"}}})
self.existStmt5 = json.dumps({"object":{"objectType":"Agent","name":"jon","mbox":"mailto:[email protected]"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created","display": {"en-US":"created"}},
"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"}})
self.existStmt6 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:[email protected]"},
"object":{"id": "act:test_activity"},"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}}})
self.existStmt7 = json.dumps({"object": {"objectType":"Agent","name":"max","mbox":"mailto:[email protected]"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/created","display": {"en-US":"created"}},
"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"}})
self.existStmt8 = json.dumps({"object": {"objectType":"Agent","name":"john","mbox":"mailto:[email protected]"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/missed","display": {"en-US":"missed"}},
"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"}})
self.existStmt9 = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/missed"},"object":{"objectType":"SubStatement",
"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},"verb": {"id":"nested:verb/url/nested"},
"object": {"objectType":"Activity", "id":"act:testex.com"}, "result":{"completion": True, "success": True,
"response": "kicked"}, "context":{"registration": self.cguid6,
"contextActivities": {"other": {"id": "act:NewActivityID"}},"revision": "foo", "platform":"bar",
"language": "en-US", "extensions":{"ext:k1": "v1", "ext:k2": "v2"}}}})
self.existStmt10 = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/missed"},"object":{"objectType":"StatementRef",
"id":str(self.exist_stmt_id)}})
# Put statements
param = {"statementId":self.guid1}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt_payload = self.existStmt1
self.putresponse1 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse1.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=2)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(statement_id=self.guid1).update(stored=time)
param = {"statementId":self.guid3}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt_payload = self.existStmt3
self.putresponse3 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse3.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=3)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(statement_id=self.guid3).update(stored=time)
param = {"statementId":self.guid4}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt_payload = self.existStmt4
self.putresponse4 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse4.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=4)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(statement_id=self.guid4).update(stored=time)
self.secondTime = str((datetime.utcnow()+timedelta(seconds=4)).replace(tzinfo=utc).isoformat())
param = {"statementId":self.guid2}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt_payload = self.existStmt2
self.putresponse2 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse2.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=6)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(statement_id=self.guid2).update(stored=time)
param = {"statementId":self.guid5}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt_payload = self.existStmt5
self.putresponse5 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse5.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=7)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(statement_id=self.guid5).update(stored=time)
param = {"statementId":self.guid6}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt_payload = self.existStmt6
self.putresponse6 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse6.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=8)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(statement_id=self.guid6).update(stored=time)
param = {"statementId":self.guid7}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt_payload = self.existStmt7
self.putresponse7 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse7.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=9)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(statement_id=self.guid7).update(stored=time)
param = {"statementId":self.guid8}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt_payload = self.existStmt8
self.putresponse8 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse8.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=10)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(statement_id=self.guid8).update(stored=time)
param = {"statementId": self.guid9}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt_payload = self.existStmt9
self.putresponse9 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse9.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=11)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(statement_id=self.guid9).update(stored=time)
param = {"statementId": self.guid10}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt_payload = self.existStmt10
self.putresponse10 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse10.status_code, 204)
time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=11)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(statement_id=self.guid10).update(stored=time)
def tearDown(self):
if settings.ALLOW_EMPTY_HTTP_AUTH:
settings.ALLOW_EMPTY_HTTP_AUTH = False
if not settings.OAUTH_ENABLED:
settings.OAUTH_ENABLED = True
def test_post_with_no_valid_params(self):
# Error will be thrown in statements class
resp = self.client.post(reverse(statements), {"feet":"yes","hands": {"id":"http://example.com/test_post"}},
Authorization=self.auth, content_type="application/json", X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 400)
def test_post(self):
stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_post"}})
response = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
act = Activity.objects.get(activity_id="act:test_post")
self.assertEqual(act.activity_id, "act:test_post")
agent = Agent.objects.get(mbox="mailto:[email protected]")
self.assertEqual(agent.name, "bob")
def test_post_stmt_ref_no_existing_stmt(self):
stmt = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/missed"},"object":{"objectType":"StatementRef",
"id":"12345678-1234-5678-1234-567812345678"}})
response = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 404)
def test_post_with_actor(self):
stmt = json.dumps({"actor":{"mbox":"mailto:[email protected]"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:i.pity.the.fool"}})
response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
Agent.objects.get(mbox="mailto:[email protected]")
def test_list_post(self):
stmts = json.dumps([{"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_list_post"}, "actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/failed","display": {"en-GB":"failed"}},
"object": {"id":"act:test_list_post1"}, "actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}}])
response = self.client.post(reverse(statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
activity1 = Activity.objects.get(activity_id="act:test_list_post")
activity2 = Activity.objects.get(activity_id="act:test_list_post1")
stmt1 = Statement.objects.get(object_activity=activity1)
stmt2 = Statement.objects.get(object_activity=activity2)
verb1 = Verb.objects.get(id=stmt1.verb.id)
verb2 = Verb.objects.get(id=stmt2.verb.id)
lang_map1 = verb1.display
lang_map2 = verb2.display
self.assertEqual(response.status_code, 200)
self.assertEqual(stmt1.verb.verb_id, "http://adlnet.gov/expapi/verbs/passed")
self.assertEqual(stmt2.verb.verb_id, "http://adlnet.gov/expapi/verbs/failed")
self.assertEqual(lang_map1.keys()[0], "en-US")
self.assertEqual(lang_map1.values()[0], "passed")
self.assertEqual(lang_map2.keys()[0], "en-GB")
self.assertEqual(lang_map2.values()[0], "failed")
def test_put(self):
guid = str(uuid.uuid1())
param = {"statementId":guid}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_put"},"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}})
putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 204)
stmt = Statement.objects.get(statement_id=guid)
act = Activity.objects.get(activity_id="act:test_put")
self.assertEqual(act.activity_id, "act:test_put")
self.assertEqual(stmt.actor.mbox, "mailto:[email protected]")
self.assertEqual(stmt.verb.verb_id, "http://adlnet.gov/expapi/verbs/passed")
def test_put_with_substatement(self):
con_guid = str(uuid.uuid1())
st_guid = str(uuid.uuid1())
param = {"statementId": st_guid}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"verb": {"id":"verb:verb/url/tested"}, "object":{"objectType":"SubStatement",
"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},"verb": {"id":"verb:verb/url/nested"},
"object": {"objectType":"Activity", "id":"act:testex.com"}, "result":{"completion": True, "success": True,
"response": "kicked"}, "context":{"registration": con_guid,
"contextActivities": {"other": {"id": "act:NewActivityID"}},"revision": "foo", "platform":"bar",
"language": "en-US", "extensions":{"ext:k1": "v1", "ext:k2": "v2"}}}})
response = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
rsp = get_response.content
self.assertIn("objectType",rsp)
self.assertIn("SubStatement", rsp)
self.assertIn("actor",rsp)
self.assertIn("mailto:[email protected]",rsp)
self.assertIn("verb",rsp)
self.assertIn("verb:verb/url/nested", rsp)
self.assertIn("Activity", rsp)
self.assertIn("act:testex.com", rsp)
self.assertIn("result", rsp)
self.assertIn("completion",rsp)
self.assertIn("success", rsp)
self.assertIn("response", rsp)
self.assertIn("kicked", rsp)
self.assertIn("context", rsp)
self.assertIn(con_guid, rsp)
self.assertIn("contextActivities", rsp)
self.assertIn("other", rsp)
self.assertIn("revision", rsp)
self.assertIn("foo", rsp)
self.assertIn("platform", rsp)
self.assertIn("bar", rsp)
self.assertIn("language", rsp)
self.assertIn("en-US", rsp)
self.assertIn("extensions", rsp)
self.assertIn("ext:k1", rsp)
self.assertIn("v1", rsp)
self.assertIn("ext:k2", rsp)
self.assertIn("v2", rsp)
def test_no_content_put(self):
guid = str(uuid.uuid1())
param = {"statementId":guid}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt = json.dumps({})
putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 400)
def test_existing_stmtID_put_put(self):
guid = str(uuid.uuid1())
param = {"statementId":guid}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
exist_stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:activity"},"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}})
first_put = self.client.put(path, exist_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(first_put.status_code, 204)
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object":{"id":"act:test_existing_put"}, "actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}})
putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 409)
def test_existing_stmtID_put_post(self):
guid = str(uuid.uuid1())
exist_stmt = json.dumps({"id": guid, "verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:activity"},"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}})
post = self.client.post(reverse(statements), exist_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 200)
param = {"statementId":guid}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object":{"id":"act:test_existing_put"}, "actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}})
putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 409)
def test_missing_stmtID_put(self):
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:act:test_put"},"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}})
response = self.client.put(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn(response.content, "Error -- statements - method = PUT, but no statementId parameter or ID given in statement")
def test_get(self):
param = {"statementId":self.guid1}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
getResponse = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(getResponse.status_code, 200)
rsp = getResponse.content
self.assertIn(self.guid1, rsp)
def test_get_no_existing_ID(self):
param = {"statementId":"aaaaaa"}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
getResponse = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(getResponse.status_code, 404)
def test_get_no_statementid(self):
getResponse = self.client.get(reverse(statements), X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(getResponse.status_code, 200)
jsn = json.loads(getResponse.content)
self.assertEqual(len(jsn["statements"]), 11)
# Sever activities are PUT-contextActivites create 3 more
def test_number_of_activities(self):
acts = len(Activity.objects.all())
self.assertEqual(9, acts)
def test_update_activity_correct_auth(self):
stmt = json.dumps({"verb": {"id":"verb:verb/url/changed-act"},"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"},
"object": {"objectType": "Activity", "id":"act:foogie",
"definition": {"name": {"en-US":"testname3"},"description": {"en-US":"testdesc3"},
"type": "http://adlnet.gov/expapi/activities/cmi.interaction","interactionType": "fill-in","correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2","ext:key3": "value3"}}},
"result": {"score":{"scaled":.85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:key1": "value1", "ext:key2":"value2"}},
"context":{"registration": self.cguid8, "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform":"bard","language": "en-US", "extensions":{"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}})
post_response = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post_response.status_code, 200)
act = Activity.objects.get(activity_id="act:foogie")
name_set = act.activity_definition_name
desc_set = act.activity_definition_description
self.assertEqual(name_set.keys()[1], "en-US")
self.assertEqual(name_set.values()[1], "testname3")
self.assertEqual(name_set.keys()[0], "en-GB")
self.assertEqual(name_set.values()[0], "altname")
self.assertEqual(desc_set.keys()[1], "en-US")
self.assertEqual(desc_set.values()[1], "testdesc3")
self.assertEqual(desc_set.keys()[0], "en-GB")
self.assertEqual(desc_set.values()[0], "altdesc")
def test_cors_post_put(self):
st_id = str(uuid.uuid1())
content = {"verb":{"id":"verb:verb/url"}, "actor":{"objectType":"Agent", "mbox": "mailto:[email protected]"},
"object": {"id":"act:test_cors_post_put"}}
bdy = "statementId=%s&content=%s&Content-Type=application/json&X-Experience-API-Version=1.0.0" % (st_id, content)
path = "%s?%s" % (reverse(statements), urllib.urlencode({"method":"PUT"}))
response = self.client.post(path, bdy, content_type="application/x-www-form-urlencoded", Authorization=self.auth)
self.assertEqual(response.status_code, 204)
act = Activity.objects.get(activity_id="act:test_cors_post_put")
self.assertEqual(act.activity_id, "act:test_cors_post_put")
def test_issue_put(self):
stmt_id = "33f60b35-e1b2-4ddc-9c6f-7b3f65244430"
stmt = json.dumps({"verb":{"id":"verb:verb/uri"},"object":{"id":"act:scorm.com/JsTetris_TCAPI","definition":{"type":"type:media",
"name":{"en-US":"Js Tetris - Tin Can Prototype"},"description":{"en-US":"A game of tetris."}}},
"context":{"contextActivities":{"grouping":{"id":"act:scorm.com/JsTetris_TCAPI"}},
"registration":"6b1091be-2833-4886-b4a6-59e5e0b3c3f4"},
"actor":{"mbox":"mailto:[email protected]","name":"Tom Creighton"}})
path = "%s?%s" % (reverse(statements), urllib.urlencode({"statementId":stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json", X_Experience_API_Version=settings.XAPI_VERSION,Authorization=self.auth)
self.assertEqual(put_stmt.status_code, 204)
def test_post_with_group(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:[email protected]"
stmt = json.dumps({"actor":{"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:[email protected]"},
{"name":"agentB","mbox":"mailto:[email protected]"}]},"verb":{"id": "http://verb/uri/created", "display":{"en-US":"created"}},
"object": {"id":"act:i.pity.the.fool"}})
response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
g = Agent.objects.get(mbox="mailto:[email protected]")
self.assertEquals(g.name, name)
self.assertEquals(g.mbox, mbox)
mems = g.member.values_list("name", flat=True)
self.assertEquals(len(mems), 2)
self.assertIn("agentA", mems)
self.assertIn("agentB", mems)
def test_issue_put_no_version_header(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244431'
stmt = json.dumps({"verb":"verb:completed","object":{"id":"act:scorm.com/JsTetris_TCAPI/level2",
"definition":{"type":"media","name":{"en-US":"Js Tetris Level2"},
"description":{"en-US":"Starting at 1, the higher the level, the harder the game."}}},
"result":{"extensions":{"ext:time":104,"ext:apm":229,"ext:lines":5},"score":{"raw":9911,"min":0}},
"context":{"contextActivities":{"grouping":{"id":"act:scorm.com/JsTetris_TCAPI"}},
"registration":"b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor":{"name":"tom creighton","mbox":"mailto:[email protected]"}})
path = '%s?%s' % (reverse(statements), urllib.urlencode({"statementId":stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth)
self.assertEqual(put_stmt.status_code, 400)
def test_issue_put_wrong_version_header(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244432'
stmt = json.dumps({"verb":"verb:completed","object":{"id":"act:scorm.com/JsTetris_TCAPI/level2",
"definition":{"type":"media","name":{"en-US":"Js Tetris Level2"},
"description":{"en-US":"Starting at 1, the higher the level, the harder the game."}}},
"result":{"extensions":{"ext:time":104,"ext:apm":229,"ext:lines":5},"score":{"raw":9911,"min":0}},
"context":{"contextActivities":{"grouping":{"id":"act:scorm.com/JsTetris_TCAPI"}},
"registration":"b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor":{"name":"tom creighton","mbox":"mailto:[email protected]"}})
path = '%s?%s' % (reverse(statements), urllib.urlencode({"statementId":stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="0.90")
self.assertEqual(put_stmt.status_code, 400)
# Use this test to make sure stmts are being returned correctly with all data - doesn't check timestamp and stored fields
def test_all_fields_activity_as_object(self):
nested_st_id = str(uuid.uuid1())
nest_param = {"statementId":nested_st_id}
nest_path = "%s?%s" % (reverse(statements), urllib.urlencode(nest_param))
nested_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:[email protected]"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid1())
context_id= str(uuid.uuid1())
param = {"statementId":stmt_id}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt = json.dumps({"actor":{"objectType":"Agent","name": "Lou Wolford","account":{"homePage":"http://example.com", "name":"uniqueName"}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created","display": {"en-US":"created", "en-GB":"made"}},
"object": {"objectType": "Activity", "id":"http:adlnet.gov/my/Activity/URL",
"definition": {"name": {"en-US":"actName", "en-GB": "anotherActName"},
"description": {"en-US":"This is my activity description.", "en-GB": "This is another activity description."},
"type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "choice",
"correctResponsesPattern": ["golf", "tetris"],
"choices":[{"id": "golf", "description": {"en-US":"Golf Example", "en-GB": "GOLF"}},
{"id": "tetris","description":{"en-US": "Tetris Example", "en-GB": "TETRIS"}},
{"id":"facebook", "description":{"en-US":"Facebook App", "en-GB": "FACEBOOK"}},
{"id":"scrabble", "description": {"en-US": "Scrabble Example", "en-GB": "SCRABBLE"}}],
"extensions": {"ext:key1": "value1", "ext:key2": "value2","ext:key3": "value3"}}},
"result": {"score":{"scaled":.85, "raw": 85, "min":0, "max":100}, "completion": True, "success": True, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey1": "resultValue1", "ext:resultKey2":"resultValue2"}},
"context":{"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"},
"grouping":{"id":"http://groupingID"} },
"revision": "Spelling error in choices.", "platform":"Platform is web browser.","language": "en-US",
"statement":{"objectType":"StatementRef", "id":str(nested_st_id)},
"extensions":{"ext:contextKey1": "contextVal1","ext:contextKey2": "contextVal2"}},
"timestamp":self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor']['account']['name'], 'uniqueName')
self.assertEqual(the_returned['actor']['account']['homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'], 'http://adlnet.gov/expapi/verbs/created')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'made')
self.assertEqual(the_returned['verb']['display']['en-US'], 'created')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result']['duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], True)
self.assertEqual(the_returned['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context']['platform'], 'Platform is web browser.')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context']['revision'], 'Spelling error in choices.')
self.assertEqual(the_returned['context']['statement']['id'], str(nested_st_id))
self.assertEqual(the_returned['context']['statement']['objectType'], 'StatementRef')
# Use this test to make sure stmts are being returned correctly with all data - doesn't check timestamp, stored fields
def test_all_fields_agent_as_object(self):
nested_st_id = str(uuid.uuid1())
nest_param = {"statementId":nested_st_id}
nest_path = "%s?%s" % (reverse(statements), urllib.urlencode(nest_param))
nested_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:[email protected]"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid1())
context_id= str(uuid.uuid1())
param = {"statementId":stmt_id}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
msha = hashlib.sha1("[email protected]").hexdigest()
stmt = json.dumps({"actor":{"objectType":"Agent","name": "Lou Wolford","account":{"homePage":"http://example.com", "name":"louUniqueName"}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/helped","display": {"en-US":"helped", "en-GB":"assisted"}},
"object": {"objectType":"Agent","name": "Tom Creighton","mbox_sha1sum":msha},
"result": {"score":{"scaled":.85, "raw": 85, "min":0, "max":100}, "completion": True, "success": True, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey1": "resultValue1", "ext:resultKey2":"resultValue2"}},
"context":{"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}},
"language": "en-US",
"statement":{"objectType":"StatementRef", "id":str(nested_st_id)},
"extensions":{"ext:contextKey1": "contextVal1","ext:contextKey2": "contextVal2"}},
"timestamp":self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor']['account']['name'], 'louUniqueName')
self.assertEqual(the_returned['actor']['account']['homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'], 'http://adlnet.gov/expapi/verbs/helped')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'assisted')
self.assertEqual(the_returned['verb']['display']['en-US'], 'helped')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result']['duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], True)
self.assertEqual(the_returned['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context']['statement']['id'], str(nested_st_id))
self.assertEqual(the_returned['context']['statement']['objectType'], 'StatementRef')
self.assertEqual(the_returned['object']['objectType'], 'Agent')
self.assertEqual(the_returned['object']['name'], 'Tom Creighton')
self.assertEqual(the_returned['object']['mbox_sha1sum'], 'edb97c2848fc47bdd2091028de8a3b1b24933752')
# Use this test to make sure stmts are being returned correctly with all data - doesn't check timestamps or stored fields
def test_all_fields_substatement_as_object(self):
nested_st_id = str(uuid.uuid1())
nest_param = {"statementId":nested_st_id}
nest_path = "%s?%s" % (reverse(statements), urllib.urlencode(nest_param))
nested_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:[email protected]"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed", "en-GB":"graded"}},
"object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_sub_stmt.status_code, 204)
nested_sub_st_id = str(uuid.uuid1())
nest_sub_param = {"statementId":nested_sub_st_id}
nest_sub_path = "%s?%s" % (reverse(statements), urllib.urlencode(nest_sub_param))
nested_sub_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:[email protected]"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/verb","display": {"en-US":"verb", "en-GB":"altVerb"}},
"object":{"id":"http://example.adlnet.gov/tincan/example/simplenestedsubstatement"}})
put_nest_sub_stmt = self.client.put(nest_sub_path, nested_sub_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_nest_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid1())
context_id= str(uuid.uuid1())
sub_context_id= str(uuid.uuid1())
param = {"statementId":stmt_id}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt = json.dumps({"actor":{"objectType":"Agent","name": "Lou Wolford","account":{"homePage":"http://example.com", "name":"louUniqueName"}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/said","display": {"en-US":"said", "en-GB":"talked"}},
"object": {"objectType": "SubStatement", "actor":{"objectType":"Agent","name":"Tom Creighton","mbox": "mailto:[email protected]"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed", "en-GB": "Graded"}},
"object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement",
'definition': {'name': {'en-US':'SubStatement name'},
'description': {'en-US':'SubStatement description'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'matching',
'correctResponsesPattern': ['lou.3,tom.2,andy.1'],'source':[{'id': 'lou',
'description': {'en-US':'Lou', 'it': 'Luigi'}},{'id': 'tom','description':{'en-US': 'Tom', 'it':'Tim'}},
{'id':'andy', 'description':{'en-US':'Andy'}}],'target':[{'id':'1',
'description':{'en-US': 'ADL LRS'}},{'id':'2','description':{'en-US': 'lrs'}},
{'id':'3', 'description':{'en-US': 'the adl lrs', 'en-CH': 'the lrs'}}]}},
"result": {"score":{"scaled":.50, "raw": 50, "min":1, "max":51}, "completion": True,
"success": True, "response": "Poorly done",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey11": "resultValue11", "ext:resultKey22":"resultValue22"}},
"context":{"registration": sub_context_id,
"contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test/nest"}},
"revision": "Spelling error in target.", "platform":"Ipad.","language": "en-US",
"statement":{"objectType":"StatementRef", "id":str(nested_sub_st_id)},
"extensions":{"ext:contextKey11": "contextVal11","ext:contextKey22": "contextVal22"}}},
"result": {"score":{"scaled":.85, "raw": 85, "min":0, "max":100}, "completion": True, "success": True, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey1": "resultValue1", "ext:resultKey2":"resultValue2"}},
"context":{"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}},
"revision": "Spelling error in choices.", "platform":"Platform is web browser.","language": "en-US",
"statement":{"objectType":"StatementRef", "id":str(nested_st_id)},
"extensions":{"ext:contextKey1": "contextVal1","ext:contextKey2": "contextVal2"}},
"timestamp":self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor']['account']['name'], 'louUniqueName')
self.assertEqual(the_returned['actor']['account']['homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'], 'http://adlnet.gov/expapi/verbs/said')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'talked')
self.assertEqual(the_returned['verb']['display']['en-US'], 'said')
self.assertEqual(the_returned['object']['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['object']['actor']['name'], 'Tom Creighton')
self.assertEqual(the_returned['object']['actor']['mbox'], 'mailto:[email protected]')
self.assertEqual(the_returned['object']['context']['registration'], sub_context_id)
self.assertEqual(the_returned['object']['context']['language'], 'en-US')
self.assertEqual(the_returned['object']['context']['platform'], 'Ipad.')
self.assertEqual(the_returned['object']['context']['revision'], 'Spelling error in target.')
self.assertEqual(the_returned['object']['context']['statement']['id'], str(nested_sub_st_id))
self.assertEqual(the_returned['object']['context']['statement']['objectType'], 'StatementRef')
self.assertEqual(the_returned['object']['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test/nest')
self.assertEqual(the_returned['object']['context']['extensions']['ext:contextKey11'], 'contextVal11')
self.assertEqual(the_returned['object']['context']['extensions']['ext:contextKey22'], 'contextVal22')
self.assertEqual(the_returned['object']['object']['id'], 'http://example.adlnet.gov/tincan/example/simplestatement')
self.assertEqual(the_returned['object']['object']['definition']['type'], 'http://adlnet.gov/expapi/activities/cmi.interaction')
self.assertEqual(the_returned['object']['object']['definition']['description']['en-US'], 'SubStatement description')
self.assertEqual(the_returned['object']['object']['definition']['interactionType'], 'matching')
self.assertEqual(the_returned['object']['object']['definition']['name']['en-US'], 'SubStatement name')
# arrays.. testing slightly differently
source_str = json.dumps(the_returned['object']['object']['definition']['source'])
self.assertIn('description', source_str)
self.assertIn('id', source_str)
self.assertIn('Lou', source_str)
self.assertIn('Luigi', source_str)
self.assertIn('lou', source_str)
self.assertIn('Tom', source_str)
self.assertIn('Tim', source_str)
self.assertIn('tom', source_str)
self.assertIn('Andy', source_str)
self.assertIn('andy', source_str)
target_str = json.dumps(the_returned['object']['object']['definition']['target'])
self.assertIn('description', target_str)
self.assertIn('id', target_str)
self.assertIn('ADL LRS', target_str)
self.assertIn('1', target_str)
self.assertIn('lrs', target_str)
self.assertIn('2', target_str)
self.assertIn('the lrs', target_str)
self.assertIn('the adl lrs', target_str)
self.assertIn('3', target_str)
self.assertEqual(the_returned['object']['objectType'], 'SubStatement')
self.assertEqual(the_returned['object']['result']['completion'], True)
self.assertEqual(the_returned['object']['result']['duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['object']['result']['extensions']['ext:resultKey11'], 'resultValue11')
self.assertEqual(the_returned['object']['result']['extensions']['ext:resultKey22'], 'resultValue22')
self.assertEqual(the_returned['object']['result']['response'], 'Poorly done')
self.assertEqual(the_returned['object']['result']['score']['max'], 51)
self.assertEqual(the_returned['object']['result']['score']['min'], 1)
self.assertEqual(the_returned['object']['result']['score']['raw'], 50)
self.assertEqual(the_returned['object']['result']['score']['scaled'], 0.5)
self.assertEqual(the_returned['object']['result']['success'], True)
self.assertEqual(the_returned['object']['verb']['id'], 'http://adlnet.gov/expapi/verbs/assess')
self.assertEqual(the_returned['object']['verb']['display']['en-GB'], 'Graded')
self.assertEqual(the_returned['object']['verb']['display']['en-US'], 'assessed')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result']['duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions']['ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], True)
self.assertEqual(the_returned['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions']['ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context']['platform'], 'Platform is web browser.')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context']['revision'], 'Spelling error in choices.')
self.assertEqual(the_returned['context']['statement']['id'], nested_st_id)
self.assertEqual(the_returned['context']['statement']['objectType'], 'StatementRef')
# Third stmt in list is missing actor - should throw error and perform cascading delete on first three statements
def test_post_list_rollback(self):
cguid1 = str(uuid.uuid1())
stmts = json.dumps([{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-failed","display": {"en-US":"wrong-failed"}},"object": {"id":"act:test_wrong_list_post2"},
"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"},"result": {"score":{"scaled":.99}, "completion": True, "success": True, "response": "wrong",
"extensions":{"ext:resultwrongkey1": "value1", "ext:resultwrongkey2":"value2"}}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked","display": {"en-US":"wrong-kicked"}},
"object": {"objectType": "Activity", "id":"act:test_wrong_list_post",
"definition": {"name": {"en-US":"wrongactName", "en-GB": "anotherActName"},
"description": {"en-US":"This is my activity description.", "en-GB": "This is another activity description."},
"type": "http://adlnet.gov/expapi/activities/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "choice",
"correctResponsesPattern": ["wronggolf", "wrongtetris"],
"choices":[{"id": "wronggolf", "description": {"en-US":"Golf Example", "en-GB": "GOLF"}},
{"id": "wrongtetris","description":{"en-US": "Tetris Example", "en-GB": "TETRIS"}},
{"id":"wrongfacebook", "description":{"en-US":"Facebook App", "en-GB": "FACEBOOK"}},
{"id":"wrongscrabble", "description": {"en-US": "Scrabble Example", "en-GB": "SCRABBLE"}}],
"extensions": {"ext:wrongkey1": "wrongvalue1", "ext:wrongkey2": "wrongvalue2","ext:wrongkey3": "wrongvalue3"}}},
"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-passed","display": {"en-US":"wrong-passed"}},"object": {"id":"act:test_wrong_list_post1"},
"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"},"context":{"registration": cguid1, "contextActivities": {"other": {"id": "act:wrongActivityID2"}},
"revision": "wrong", "platform":"wrong","language": "en-US", "extensions":{"ext:wrongkey1": "wrongval1",
"ext:wrongkey2": "wrongval2"}}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked","display": {"en-US":"wrong-kicked"}},"object": {"id":"act:test_wrong_list_post2"}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked","display": {"en-US":"wrong-kicked"}},"object": {"id":"act:test_wrong_list_post4"}, "actor":{"objectType":"Agent", "mbox":"[email protected]"}}])
response = self.client.post(reverse(statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
verbs = Verb.objects.filter(verb_id__contains='wrong')
activities = Activity.objects.filter(activity_id__contains='test_wrong_list_post')
stmts = Statement.objects.all()
# 11 statements from setup
self.assertEqual(len(stmts), 11)
self.assertEqual(len(verbs), 0)
self.assertEqual(len(activities), 0)
def test_post_list_rollback_part_2(self):
stmts = json.dumps([{"object": {"objectType":"Agent","name":"john","mbox":"mailto:[email protected]"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/wrong","display": {"wrong-en-US":"wrong"}},
"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/created"},
"object": {"objectType": "Activity", "id":"act:foogie",
"definition": {"name": {"en-US":"testname2", "en-GB": "altname"},
"description": {"en-US":"testdesc2", "en-GB": "altdesc"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in","correctResponsesPattern": ["answer"]}},
"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked"},"object": {"id":"act:test_wrong_list_post2"}}])
response = self.client.post(reverse(statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
created_verbs = Verb.objects.filter(verb_id__contains='http://adlnet.gov/expapi/verbs/created')
wrong_verbs = Verb.objects.filter(verb_id__contains='http://adlnet.gov/expapi/verbs/wrong')
activities = Activity.objects.filter(activity_id='act:foogie')
stmts = Statement.objects.all()
wrong_agent = Agent.objects.filter(mbox='mailto:[email protected]')
john_agent = Agent.objects.filter(mbox='mailto:[email protected]')
s_agent = Agent.objects.filter(mbox='mailto:[email protected]')
auth_agent = Agent.objects.filter(mbox='mailto:[email protected]')
self.assertEqual(len(created_verbs), 1)
# Both verbs from the first and last stmts in the list would still be there
self.assertEqual(len(wrong_verbs), 0)
self.assertEqual(len(activities), 1)
self.assertEqual(len(stmts), 11)
self.assertEqual(len(wrong_agent), 0)
self.assertEqual(len(john_agent), 1)
self.assertEqual(len(s_agent), 1)
self.assertEqual(len(auth_agent), 0)
def test_post_list_rollback_with_void(self):
stmts = json.dumps([{"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"object": {"objectType":"StatementRef","id":str(self.exist_stmt_id)},
"verb": {"id": "http://adlnet.gov/expapi/verbs/voided","display": {"en-US":"voided"}}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked"},"object": {"id":"act:test_wrong_list_post2"}}])
response = self.client.post(reverse(statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
voided_st = Statement.objects.get(statement_id=str(self.exist_stmt_id))
voided_verb = Verb.objects.filter(verb_id__contains='voided')
only_actor = Agent.objects.filter(mbox="mailto:[email protected]")
stmts = Statement.objects.all()
self.assertEqual(len(stmts), 11)
self.assertEqual(voided_st.voided, False)
self.assertEqual(len(voided_verb), 0)
self.assertEqual(len(only_actor), 0)
def test_post_list_rollback_with_subs(self):
sub_context_id = str(uuid.uuid1())
stmts = json.dumps([{"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/wrong","display": {"wrong-en-US":"wrong"}},
"object": {"objectType":"Agent","name":"john","mbox":"mailto:[email protected]"}},
{"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/wrong-next","display": {"wrong-en-US":"wrong-next"}},
"object":{"objectType":"SubStatement",
"actor":{"objectType":"Agent","mbox":"mailto:[email protected]"},"verb": {"id":"http://adlnet.gov/expapi/verbs/wrong-sub"},
"object": {"objectType":"Activity", "id":"act:wrong-testex.com"}, "result":{"completion": True, "success": True,
"response": "sub-wrong-kicked"}, "context":{"registration": sub_context_id,
"contextActivities": {"other": {"id": "act:sub-wrong-ActivityID"}},"revision": "foo", "platform":"bar",
"language": "en-US", "extensions":{"ext:wrong-k1": "v1", "ext:wrong-k2": "v2"}}}},
{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked"},"object": {"id":"act:test_wrong_list_post2"}}])
response = self.client.post(reverse(statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
s_agent = Agent.objects.filter(mbox="mailto:[email protected]")
ss_agent = Agent.objects.filter(mbox="mailto:[email protected]")
john_agent = Agent.objects.filter(mbox="mailto:[email protected]")
subs = SubStatement.objects.all()
wrong_verb = Verb.objects.filter(verb_id__contains="wrong")
activities = Activity.objects.filter(activity_id__contains="wrong")
stmts = Statement.objects.all()
self.assertEqual(len(stmts), 11)
self.assertEqual(len(s_agent), 0)
self.assertEqual(len(ss_agent), 0)
self.assertEqual(len(john_agent), 1)
# Only 1 sub from setup
self.assertEqual(len(subs), 1)
self.assertEqual(len(wrong_verb), 0)
self.assertEqual(len(activities), 0)
def test_activity_definition_change(self):
username_1 = "tester1"
email_1 = "[email protected]"
password_1 = "test"
auth_1 = "Basic %s" % base64.b64encode("%s:%s" % (username_1, password_1))
form_1 = {"username":username_1, "email":email_1,"password":password_1,"password2":password_1}
response_1 = self.client.post(reverse(register),form_1, X_Experience_API_Version=settings.XAPI_VERSION)
username_2 = "tester2"
email_2 = "[email protected]"
password_2 = "test2"
auth_2 = "Basic %s" % base64.b64encode("%s:%s" % (username_2, password_2))
form_2 = {"username":username_2, "email":email_2,"password":password_2,"password2":password_2}
response_2 = self.client.post(reverse(register),form_2, X_Experience_API_Version=settings.XAPI_VERSION)
# Should have no definition
stmt_1 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:[email protected]"},
"object":{"id": "act:test_activity_change"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}})
response_1 = self.client.post(reverse(statements), stmt_1, content_type="application/json",
Authorization=auth_1, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response_1.status_code, 200)
user1_agent = Agent.objects.get(mbox="mailto:[email protected]")
act = Activity.objects.get(activity_id="act:test_activity_change").to_dict()
self.assertEqual(act["id"], "act:test_activity_change")
with self.assertRaises(KeyError):
act["definition"]
acts = Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 1)
# Creates local act for other user
stmt_2 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:[email protected]"},
"object":{"id": "act:test_activity_change", "definition":{"name":{"en-US": "fail_test"}}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}})
response_2 = self.client.post(reverse(statements), stmt_2, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION)
user2_agent = Agent.objects.get(mbox="mailto:[email protected]")
self.assertEqual(response_2.status_code, 200)
act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertIn('definition', act)
acts = Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Should update local version of activity with definition for that user
response_3 = self.client.post(reverse(statements), stmt_1, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response_3.status_code, 200)
act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertIn('definition', act)
acts = Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Should have new definition for canonical since user is owner
stmt_3 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:[email protected]"},
"object":{"id": "act:test_activity_change", "definition":{"name":{"en-US": "foo"}}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}})
response_4 = self.client.post(reverse(statements), stmt_3, content_type="application/json",
Authorization=auth_1, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response_4.status_code, 200)
act = Activity.objects.get(activity_id="act:test_activity_change", authority=user1_agent).to_dict()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertEqual(act["definition"], {"name":{"en-US": "foo"}})
# Should have updated local activity for that user with new definition
response_5 = self.client.post(reverse(statements), stmt_3, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response_5.status_code, 200)
act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertEqual(act["definition"], {"name":{"en-US": "foo"}})
acts = Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Should update local version of that activity for that user
stmt_4 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:[email protected]"},
"object":{"id": "act:test_activity_change", "definition":{"name":{"en-US": "bar"}}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}})
response_6 = self.client.post(reverse(statements), stmt_4, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response_6.status_code, 200)
act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertEqual(act["definition"], {"name":{"en-US": "bar"}})
acts = Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Should have replaced name in def for local act of that user
stmt_5 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:[email protected]"},
"object":{"id": "act:test_activity_change", "definition":{"name":{"fr": "bar"}}},
"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}})
response_7 = self.client.post(reverse(statements), stmt_5, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response_7.status_code, 200)
act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertIn("fr", act['definition']['name'])
acts = Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Can't remove definition if it already exists - should still be there
response_8 = self.client.post(reverse(statements), stmt_1, content_type="application/json",
Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response_8.status_code, 200)
act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict()
self.assertEqual(act["id"], "act:test_activity_change")
self.assertIn("definition", act.keys())
acts = Activity.objects.filter(activity_id="act:test_activity_change").count()
self.assertEqual(acts, 2)
# Check canonical of last stmt returned from query to make sure it contains the definition
param = {"agent":{"mbox":"mailto:[email protected]"}, "format":"canonical", "activity":"act:test_activity_change"}
path = "%s?%s" % (reverse(statements),urllib.urlencode(param))
r = self.client.get(path, X_Experience_API_Version="1.0", Authorization=auth_1)
self.assertEqual(r.status_code, 200)
first_stmt = json.loads(r.content)["statements"][0]
self.assertEqual(first_stmt["object"]["definition"], {"name":{"en-US": "foo"}})
def test_post_with_non_oauth_not_existing_group(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:[email protected]"
stmt = json.dumps({"actor":{"name":"agentA","mbox":"mailto:[email protected]"},"verb":{"id": "http://verb/uri/joined", "display":{"en-US":"joined"}},
"object": {"id":"act:i.pity.the.fool"}, "authority": {"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:[email protected]"},{"name":"agentB","mbox":"mailto:[email protected]"}]}})
response = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn("Statements cannot have a non-Oauth group as the authority", response.content)
def test_post_with_non_oauth_existing_group(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:[email protected]"
group = {"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:[email protected]"},{"name":"agentB","mbox":"mailto:[email protected]"}]}
Agent.objects.retrieve_or_create(**group)
stmt = json.dumps({"actor":{"name":"agentA","mbox":"mailto:[email protected]"},"verb":{"id": "http://verb/uri/joined", "display":{"en-US":"joined"}},
"object": {"id":"act:i.pity.the.fool"}, "authority": {"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:[email protected]"},{"name":"agentB","mbox":"mailto:[email protected]"}]}})
response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, "Statements cannot have a non-Oauth group as the authority")
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.exp_fetchers."""
import copy
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_jobs_one_off
from core.domain import exp_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
class ExplorationRetrievalTests(test_utils.GenericTestBase):
"""Test the exploration retrieval methods."""
EXP_ID = 'An_exploration_id'
def setUp(self):
super(ExplorationRetrievalTests, self).setUp()
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.create_new_user(self.owner_id, self.OWNER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
def test_retrieval_of_explorations(self):
"""Test the get_exploration_by_id() method."""
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
exp_fetchers.get_exploration_by_id('fake_eid')
exploration = self.save_new_default_exploration(
self.EXP_ID, self.owner_id)
retrieved_exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.id, retrieved_exploration.id)
self.assertEqual(exploration.title, retrieved_exploration.title)
with self.assertRaises(Exception):
exp_fetchers.get_exploration_by_id('fake_exploration')
def test_retrieval_of_multiple_exploration_versions_for_fake_exp_id(self):
with self.assertRaisesRegexp(
ValueError, 'The given entity_id fake_exp_id is invalid'):
exp_fetchers.get_multiple_explorations_by_version(
'fake_exp_id', [1, 2, 3])
def test_retrieval_of_multiple_exploration_versions(self):
self.save_new_default_exploration(self.EXP_ID, self.owner_id)
# Update exploration to version 2.
change_list = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'New state',
})]
exp_services.update_exploration(
feconf.SYSTEM_COMMITTER_ID, self.EXP_ID, change_list, '')
# Update exploration to version 3.
change_list = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'New state 2',
})]
exp_services.update_exploration(
feconf.SYSTEM_COMMITTER_ID, self.EXP_ID, change_list, '')
exploration_latest = exp_fetchers.get_exploration_by_id(self.EXP_ID)
latest_version = exploration_latest.version
explorations = exp_fetchers.get_multiple_explorations_by_version(
self.EXP_ID, range(1, latest_version + 1))
self.assertEqual(len(explorations), 3)
self.assertEqual(explorations[0].version, 1)
self.assertEqual(explorations[1].version, 2)
self.assertEqual(explorations[2].version, 3)
def test_version_number_errors_for_get_multiple_exploration_versions(self):
self.save_new_default_exploration(self.EXP_ID, self.owner_id)
# Update exploration to version 2.
change_list = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'New state',
})]
exp_services.update_exploration(
feconf.SYSTEM_COMMITTER_ID, self.EXP_ID, change_list, '')
# Update exploration to version 3.
change_list = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'New state 2',
})]
exp_services.update_exploration(
feconf.SYSTEM_COMMITTER_ID, self.EXP_ID, change_list, '')
with self.assertRaisesRegexp(
ValueError,
'Requested version number 4 cannot be higher than the current '
'version number 3.'):
exp_fetchers.get_multiple_explorations_by_version(
self.EXP_ID, [1, 2, 3, 4])
with self.assertRaisesRegexp(
ValueError,
'At least one version number is invalid'):
exp_fetchers.get_multiple_explorations_by_version(
self.EXP_ID, [1, 2, 2.5, 3])
def test_retrieval_of_multiple_explorations(self):
exps = {}
chars = 'abcde'
exp_ids = ['%s%s' % (self.EXP_ID, c) for c in chars]
for _id in exp_ids:
exp = self.save_new_valid_exploration(_id, self.owner_id)
exps[_id] = exp
result = exp_fetchers.get_multiple_explorations_by_id(
exp_ids)
for _id in exp_ids:
self.assertEqual(result.get(_id).title, exps.get(_id).title)
# Test retrieval of non-existent ids.
result = exp_fetchers.get_multiple_explorations_by_id(
exp_ids + ['doesnt_exist'], strict=False
)
for _id in exp_ids:
self.assertEqual(result.get(_id).title, exps.get(_id).title)
self.assertNotIn('doesnt_exist', result)
with self.assertRaises(Exception):
exp_fetchers.get_multiple_explorations_by_id(
exp_ids + ['doesnt_exist'])
class ExplorationConversionPipelineTests(test_utils.GenericTestBase):
"""Tests the exploration model -> exploration conversion pipeline."""
OLD_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
UPGRADED_EXP_YAML = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: category
correctness_feedback_enabled: false
init_state_name: %s
language_code: en
objective: Old objective
param_changes: []
param_specs: {}
schema_version: %d
states:
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
%s:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
buttonText:
value: Continue
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: Continue
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: %d
tags: []
title: Old Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
def setUp(self):
super(ExplorationConversionPipelineTests, self).setUp()
# Setup user who will own the test explorations.
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
# Create exploration that uses a states schema version of 0 and ensure
# it is properly converted.
self.save_new_exp_with_states_schema_v0(
self.OLD_EXP_ID, self.albert_id, 'Old Title')
# Create standard exploration that should not be converted.
new_exp = self.save_new_valid_exploration(
self.NEW_EXP_ID, self.albert_id)
self._up_to_date_yaml = new_exp.to_yaml()
def test_converts_exp_model_with_default_states_schema_version(self):
exploration = exp_fetchers.get_exploration_by_id(self.OLD_EXP_ID)
self.assertEqual(
exploration.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
self.assertEqual(exploration.to_yaml(), self.UPGRADED_EXP_YAML)
def test_does_not_convert_up_to_date_exploration(self):
exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
self.assertEqual(
exploration.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
self.assertEqual(exploration.to_yaml(), self._up_to_date_yaml)
def test_migration_then_reversion_maintains_valid_exploration(self):
"""This integration test simulates the behavior of the domain layer
prior to the introduction of a states schema. In particular, it deals
with an exploration that was created before any states schema
migrations occur. The exploration is constructed using multiple change
lists, then a migration job is run. The test thereafter tests if
reverting to a version prior to the migration still maintains a valid
exploration. It tests both the exploration domain object and the
exploration model stored in the datastore for validity.
Note: It is important to distinguish between when the test is testing
the exploration domain versus its model. It is operating at the domain
layer when using exp_fetchers.get_exploration_by_id. Otherwise, it
loads the model explicitly using exp_models.ExplorationModel.get and
then converts it to an exploration domain object for validation using
exp_fetchers.get_exploration_from_model. This is NOT the same process
as exp_fetchers.get_exploration_by_id as it skips many steps which
include the conversion pipeline (which is crucial to this test).
"""
exp_id = 'exp_id2'
# Create a exploration with states schema version 0.
self.save_new_exp_with_states_schema_v0(
exp_id, self.albert_id, 'Old Title')
# Load the exploration without using the conversion pipeline. All of
# these changes are to happen on an exploration with states schema
# version 0.
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=True, version=None)
# In version 1, the title was 'Old title'.
# In version 2, the title becomes 'New title'.
exploration_model.title = 'New title'
exploration_model.commit(
self.albert_id, 'Changed title.', [])
# Version 2 of exploration.
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=True, version=None)
# Store state id mapping model for new exploration.
exploration = exp_fetchers.get_exploration_from_model(exploration_model)
# In version 3, a new state is added.
new_state = copy.deepcopy(
self.VERSION_0_STATES_DICT[feconf.DEFAULT_INIT_STATE_NAME])
new_state['interaction']['id'] = 'TextInput'
exploration_model.states['New state'] = new_state
# Properly link in the new state to avoid an invalid exploration.
init_state = exploration_model.states[feconf.DEFAULT_INIT_STATE_NAME]
init_handler = init_state['interaction']['handlers'][0]
init_handler['rule_specs'][0]['dest'] = 'New state'
exploration_model.commit(
'committer_id_v3', 'Added new state', [])
# Version 3 of exploration.
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=True, version=None)
# Store state id mapping model for new exploration.
exploration = exp_fetchers.get_exploration_from_model(exploration_model)
# Version 4 is an upgrade based on the migration job.
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify the latest version of the exploration has the most up-to-date
# states schema version.
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=True, version=None)
exploration = exp_fetchers.get_exploration_from_model(
exploration_model, run_conversion=False)
self.assertEqual(
exploration.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
# The exploration should be valid after conversion.
exploration.validate(strict=True)
# Version 5 is a reversion to version 1.
exp_services.revert_exploration('committer_id_v4', exp_id, 4, 1)
# The exploration model itself should now be the old version
# (pre-migration).
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=True, version=None)
self.assertEqual(exploration_model.states_schema_version, 0)
# The exploration domain object should be updated since it ran through
# the conversion pipeline.
exploration = exp_fetchers.get_exploration_by_id(exp_id)
# The reversion after migration should still be an up-to-date
# exploration. exp_fetchers.get_exploration_by_id will automatically
# keep it up-to-date.
self.assertEqual(exploration.to_yaml(), self.UPGRADED_EXP_YAML)
# The exploration should be valid after reversion.
exploration.validate(strict=True)
snapshots_metadata = exp_services.get_exploration_snapshots_metadata(
exp_id)
# These are used to verify the correct history has been recorded after
# both migration and reversion.
commit_dict_5 = {
'committer_id': 'committer_id_v4',
'commit_message': 'Reverted exploration to version 1',
'version_number': 5,
}
commit_dict_4 = {
'committer_id': feconf.MIGRATION_BOT_USERNAME,
'commit_message':
'Update exploration states from schema version 0 to %d.' %
feconf.CURRENT_STATE_SCHEMA_VERSION,
'commit_cmds': [{
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': '0',
'to_version': str(
feconf.CURRENT_STATE_SCHEMA_VERSION)
}],
'version_number': 4,
}
# Ensure there have been 5 commits.
self.assertEqual(len(snapshots_metadata), 5)
# Ensure the correct commit logs were entered during both migration and
# reversion. Also, ensure the correct commit command was written during
# migration.
self.assertDictContainsSubset(commit_dict_4, snapshots_metadata[3])
self.assertDictContainsSubset(commit_dict_5, snapshots_metadata[4])
self.assertLess(
snapshots_metadata[3]['created_on_ms'],
snapshots_metadata[4]['created_on_ms'])
# Ensure that if a converted, then reverted, then converted exploration
# is saved, it will be the up-to-date version within the datastore.
exp_services.update_exploration(
self.albert_id, exp_id, [], 'Resave after reversion')
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=True, version=None)
exploration = exp_fetchers.get_exploration_from_model(
exploration_model,
run_conversion=False)
# This exploration should be both up-to-date and valid.
self.assertEqual(exploration.to_yaml(), self.UPGRADED_EXP_YAML)
exploration.validate(strict=True)
def test_loading_old_exploration_does_not_break_domain_object_ctor(self):
"""This test attempts to load an exploration that is stored in the data
store as pre-states schema version 0. The
exp_fetchers.get_exploration_by_id function should properly load and
convert the exploration without any issues. Structural changes to the
states schema will not break the exploration domain class constructor.
"""
exp_id = 'exp_id3'
# Create a exploration with states schema version 0 and an old states
# blob.
self.save_new_exp_with_states_schema_v0(
exp_id, self.albert_id, 'Old Title')
# Ensure the exploration was converted.
exploration = exp_fetchers.get_exploration_by_id(exp_id)
self.assertEqual(
exploration.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
# The converted exploration should be up-to-date and properly
# converted.
self.assertEqual(exploration.to_yaml(), self.UPGRADED_EXP_YAML)
|
|
from sympy.core.basic import S, C, Basic, sympify
from sympy.simplify import simplify, trigsimp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.geometry.exceptions import GeometryError
from entity import GeometryEntity
from point import Point
from line import LinearEntity, Line
class Ellipse(GeometryEntity):
"""
An ellipse in space. Constructed from a center and two radii, the
first being the horizontal radius (along the x-axis) and the second
being the vertical radius (along the y-axis).
Notes:
======
- Rotation is currently not supported since an ellipse is defined
on horizontal/vertical radii
Example:
========
>>> e = Ellipse(Point(0, 0), 5, 1)
>>> e.hradius, e.vradius
(5, 1)
Plotting example
----------------
In [1]: c1 = Circle(Point(0,0), 1)
In [2]: Plot(c1)
Out[2]: [0]: cos(t), sin(t), 'mode=parametric'
In [3]: p = Plot()
In [4]: p[0] = c1
In [5]: radius = Segment(c1.center, c1.random_point())
In [6]: p[1] = radius
In [7]: p
Out[7]:
[0]: cos(t), sin(t), 'mode=parametric'
[1]: t*cos(1.546086215036205357975518382),
t*sin(1.546086215036205357975518382), 'mode=parametric'
"""
def __new__(cls, center, hradius, vradius, **kwargs):
hradius = sympify(hradius)
vradius = sympify(vradius)
if not isinstance(center, Point):
raise TypeError("center must be be a Point")
if hradius == vradius:
return Circle(center, hradius, **kwargs)
return GeometryEntity.__new__(cls, center, hradius, vradius, **kwargs)
@property
def center(self):
"""The center of the ellipse."""
return self.__getitem__(0)
@property
def hradius(self):
"""The horizontal radius of the ellipse."""
return self.__getitem__(1)
@property
def vradius(self):
"""The vertical radius of the ellipse."""
return self.__getitem__(2)
@property
def area(self):
"""The area of the ellipse."""
return simplify(S.Pi * self.hradius * self.vradius)
@property
def circumference(self):
"""The circumference of the ellipse."""
# TODO It's fairly complicated, but we could use Ramanujan's
# approximation.
raise NotImplementedError
@property
def foci(self):
"""The foci of the ellipse, if the radii are numerical."""
c = self.center
if self.hradius == self.vradius:
return c
hr, vr = self.hradius, self.vradius
if hr.atoms(C.Symbol) or vr.atoms(C.Symbol):
raise ValueError("foci can only be determined on non-symbolic radii")
v = sqrt(abs(vr**2 - hr**2))
if hr < vr:
return (c+Point(0, -v), c+Point(0, v))
else:
return (c+Point(-v, 0), c+Point(v, 0))
def tangent_line(self, p):
"""
If p is on the ellipse, returns the tangent line through point p.
Otherwise, returns the tangent line(s) from p to the ellipse, or
None if no tangent line is possible (e.g., p inside ellipse).
Example:
In [1]: e = Ellipse(Point(0,0), 3, 2)
In [2]: t = e.tangent_line(e.random_point())
In [3]: p = Plot()
In [4]: p[0] = e
In [5]: p[1] = t
The above will plot an ellipse together with a tangent line.
"""
if p in self:
rise = (self.vradius ** 2)*(self.center[0] - p[0])
run = (self.hradius ** 2)*(p[1] - self.center[1])
p2 = Point(simplify(p[0] + run),
simplify(p[1] + rise))
return Line(p, p2)
else:
# TODO If p is not on the ellipse, attempt to create the
# tangent(s) from point p to the ellipse..?
raise NotImplementedError("Cannot find tangent lines when p is not on the ellipse")
def is_tangent(self, o):
"""Returns True if o is tangent to the ellipse, False otherwise."""
inter = None
if isinstance(o, Ellipse):
inter = self.intersection(o)
return (inter is not None and isinstance(inter[0], Point) and len(inter) == 1)
elif isinstance(o, LinearEntity):
inter = self._do_line_intersection(o)
if (inter is not None) and len(inter) == 1:
return (inter[0] in o)
else:
return False
elif isinstance(o, Polygon):
c = 0
for seg in o.sides:
inter = self._do_line_intersection(seg)
c += len([True for point in inter if point in seg])
return (c == 1)
else:
raise NotImplementedError("Unknown argument type")
def arbitrary_point(self, parameter_name='t'):
"""Returns a symbolic point that is on the ellipse."""
t = C.Symbol(parameter_name, real=True)
return Point(
self.center[0] + self.hradius*C.cos(t),
self.center[1] + self.vradius*C.sin(t))
def plot_interval(self, parameter_name='t'):
"""Returns a typical plot interval used by the plotting module."""
t = C.Symbol(parameter_name, real=True)
return [t, -S.Pi, S.Pi]
def random_point(self):
"""Returns a random point on the ellipse."""
from random import random
from sys import maxint
t = C.Symbol('t', real=True)
p = self.arbitrary_point('t')
# get a random value in [-pi, pi)
subs_val = float(S.Pi)*(2*random() - 1)
return Point(p[0].subs(t, subs_val), p[1].subs(t, subs_val))
def equation(self, x='x', y='y'):
"""
Returns the equation of the ellipse.
Optional parameters x and y can be used to specify symbols, or the
names of the symbols used in the equation.
"""
if isinstance(x, basestring): x = C.Symbol(x, real=True)
if isinstance(y, basestring): y = C.Symbol(y, real=True)
t1 = ((x - self.center[0]) / self.hradius)**2
t2 = ((y - self.center[1]) / self.vradius)**2
return t1 + t2 - 1
def _do_line_intersection(self, o):
"""
Find the intersection of a LinearEntity and the ellipse. Makes no
regards to what the LinearEntity is because it assumes a Line. To
ensure correct intersection results one must invoke intersection()
to remove bad results.
"""
def dot(p1, p2):
sum = 0
for ind in xrange(0, len(p1)):
sum += p1[ind] * p2[ind]
return simplify(sum)
hr_sq = self.hradius ** 2
vr_sq = self.vradius ** 2
lp = o.points
ldir = lp[1] - lp[0]
diff = lp[0] - self.center
mdir = (ldir[0] / hr_sq, ldir[1] / vr_sq)
mdiff = (diff[0] / hr_sq, diff[1] / vr_sq)
a = dot(ldir, mdir)
b = dot(ldir, mdiff)
c = dot(diff, mdiff) - 1
det = simplify(b*b - a*c);
result = []
if det == 0:
t = -b / a
result.append( lp[0] + (lp[1] - lp[0]) * t )
else:
is_good = True
try:
is_good = (det > 0)
except NotImplementedError: #symbolic, allow
is_good = True
if is_good:
root = sqrt(det)
t_a = (-b - root) / a
t_b = (-b + root) / a
result.append( lp[0] + (lp[1] - lp[0]) * t_a )
result.append( lp[0] + (lp[1] - lp[0]) * t_b )
return result
def intersection(self, o):
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity):
# LinearEntity may be a ray/segment, so check the points
# of intersection for coincidence first
result = self._do_line_intersection(o)
if result is not None:
for ind in xrange(len(result)-1, -1, -1):
if result[ind] not in o:
del result[ind]
return result
elif isinstance(o, Ellipse):
if o == self:
return self
else:
# TODO This is a bit more complicated
pass
raise NotImplementedError()
def __eq__(self, o):
return ((self.center == o.center) \
and (self.hradius == o.hradius) \
and (self.vradius == o.vradius))
def __contains__(self, o):
if isinstance(o, Point):
x = C.Symbol('x', real=True, dummy=True)
y = C.Symbol('y', real=True, dummy=True)
res = self.equation(x, y).subs({x: o[0], y: o[1]})
res = trigsimp(simplify(res))
return res == 0
elif isinstance(o, Ellipse):
return (self == o)
return False
class Circle(Ellipse):
"""
A circle in space. Consturcted simply from a center and a radius, or
from three non-collinear points.
Example:
========
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.hradius, c1.vradius, c1.radius
(5, 5, 5)
>>> c2 = Circle(Point(0, 0), Point(1, 1), Point(1, 0))
>>> c2.hradius, c2.vradius, c2.radius, c2.center
(2**(1/2)/2, 2**(1/2)/2, 2**(1/2)/2, Point(1/2, 1/2))
"""
def __new__(cls, *args, **kwargs):
c, r = None, None
if len(args) == 3 and isinstance(args[0], Point):
from polygon import Triangle
t = Triangle(args[0], args[1], args[2])
if t.area == 0:
raise GeometryError("Cannot construct a circle from three collinear points")
c = t.circumcenter
r = t.circumradius
elif len(args) == 2:
# Assume (center, radius) pair
c = args[0]
r = sympify(args[1])
if not (c is None or r is None):
return GeometryEntity.__new__(cls, c, r, **kwargs)
raise GeometryError("Circle.__new__ received unknown arguments")
@property
def hradius(self):
"""The horizontal radius of the ellipse."""
return self.__getitem__(1)
@property
def vradius(self):
"""The vertical radius of the ellipse."""
return self.__getitem__(1)
@property
def radius(self):
"""The radius of the circle."""
return self.__getitem__(1)
@property
def circumference(self):
"""The circumference of the circle."""
return 2 * S.Pi * self.radius
def equation(self, x='x', y='y'):
"""
Returns the equation of the circle.
Optional parameters x and y can be used to specify symbols, or the
names of the symbols used in the equation.
"""
if isinstance(x, basestring): x = C.Symbol(x, real=True)
if isinstance(y, basestring): y = C.Symbol(y, real=True)
t1 = (x - self.center[0])**2
t2 = (y - self.center[1])**2
return t1 + t2 - self.hradius**2
def intersection(self, o):
if isinstance(o, Circle):
dx,dy = o.center - self.center
d = sqrt( simplify(dy**2 + dx**2) )
a = simplify((self.radius**2 - o.radius**2 + d**2) / (2*d))
x2 = self.center[0] + (dx * a/d)
y2 = self.center[1] + (dy * a/d)
h = sqrt( simplify(self.radius**2 - a**2) )
rx = -dy * (h/d)
ry = dx * (h/d)
xi_1 = simplify(x2 + rx)
xi_2 = simplify(x2 - rx)
yi_1 = simplify(y2 + ry)
yi_2 = simplify(y2 - ry)
ret = [Point(xi_1, yi_1)]
if xi_1 != xi_2 or yi_1 != yi_2:
ret.append(Point(xi_2, yi_2))
return ret
elif isinstance(o, Ellipse):
a, b, r = o.hradius, o.vradius, self.radius
x = a*sqrt(simplify((r**2 - b**2)/(a**2 - b**2)))
y = b*sqrt(simplify((a**2 - r**2)/(a**2 - b**2)))
return list(set([Point(x,y), Point(x,-y), Point(-x,y), Point(-x,-y)]))
return Ellipse.intersection(self, o)
|
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64tz_dtype)
from pandas import (Index, Series, isnull, date_range,
NaT, period_range, MultiIndex, IntervalIndex)
from pandas.core.indexes.datetimes import Timestamp, DatetimeIndex
from pandas._libs import lib
from pandas._libs.tslib import iNaT
from pandas.compat import lrange, range, zip, OrderedDict, long
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesConstructors(TestData):
def test_invalid_dtype(self):
# GH15520
msg = 'not understood'
invalid_list = [pd.Timestamp, 'pd.Timestamp', list]
for dtype in invalid_list:
with tm.assert_raises_regex(TypeError, msg):
Series([], name='time', dtype=dtype)
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.])) == 1.0
assert int(Series([1.])) == 1
assert long(Series([1.])) == 1
def test_constructor(self):
assert self.ts.index.is_all_dates
# Pass in Series
derived = Series(self.ts)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, self.ts.index)
# Ensure new index is not created
assert id(self.ts.index) == id(derived.index)
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not self.empty.index.is_all_dates
assert not Series({}).index.is_all_dates
pytest.raises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
mixed.name = 'Series'
rs = Series(mixed).name
xp = 'Series'
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
pytest.raises(NotImplementedError, Series, m)
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
# the are Index() and RangeIndex() which don't compare type equal
# but are just .equals
assert_series_equal(empty, empty2, check_index_type=False)
empty = Series(index=lrange(10))
empty2 = Series(np.nan, index=lrange(10))
assert_series_equal(empty, empty2)
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
assert_series_equal(s2, s1.sort_index())
def test_constructor_iterator(self):
expected = Series(list(range(10)), dtype='int64')
result = Series(range(10), dtype='int64')
assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype='int64')
for obj in [[1, 2, 3], (1, 2, 3),
np.array([1, 2, 3], dtype='int64')]:
result = Series(obj, index=[0, 1, 2])
assert_series_equal(result, expected)
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(lrange(10))
assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(lrange(10))
assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'],
fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# GH12574
pytest.raises(
ValueError, lambda: Series(pd.Categorical([1, 2, 3]),
dtype='int64'))
cat = Series(pd.Categorical([1, 2, 3]), dtype='category')
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype='category')
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_maskedarray(self):
data = ma.masked_all((3, ), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype=int)
result = Series(data)
expected = Series([nan, nan, nan], dtype=float)
assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0, nan, 2], index=index, dtype=float)
assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype=bool)
result = Series(data)
expected = Series([nan, nan, nan], dtype=object)
assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([True, nan, False], index=index, dtype=object)
assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype='M8[ns]')
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype='M8[ns]')
assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), iNaT,
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1., 1., 8.]), dtype='i8')
assert s.dtype == np.dtype('i8')
s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')
assert s.dtype == np.dtype('f8')
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.], np.array([1.])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.
assert not x.equals(y)
assert x[0] == 2.
assert y[0] == 1.
def test_constructor_pass_none(self):
s = Series(None, index=lrange(5))
assert s.dtype == np.float64
s = Series(None, index=lrange(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == 'datetime64[ns]'
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
pytest.raises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dtype_nocast(self):
# 1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly infering on dateimelike looking when object dtype is
# specified
s = Series([Timestamp('20130101'), 'NOV'], dtype=object)
assert s.iloc[0] == Timestamp('20130101')
assert s.iloc[1] == 'NOV'
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = '216 3T19'.split()
wing1 = '2T15 4H19'.split()
wing2 = '416 4T20'.split()
mat = pd.to_datetime('2016-01-22 2019-09-07'.split())
df = pd.DataFrame(
{'wing1': wing1,
'wing2': wing2,
'mat': mat}, index=belly)
result = df.loc['3T19']
assert result.dtype == object
result = df.loc['216']
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = Series(arr)
assert result.dtype == 'M8[ns]'
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype='M8[ns]', index=lrange(5))
assert isnull(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=lrange(5))
assert not isnull(s).all()
s = Series(nan, dtype='M8[ns]', index=lrange(5))
assert isnull(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype='M8[ns]')
assert isnull(s[1])
assert s.dtype == 'M8[ns]'
s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')
assert isnull(s[1])
assert s.dtype == 'M8[ns]'
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == 'M8[ns]'
s.iloc[0] = np.nan
assert s.dtype == 'M8[ns]'
# invalid astypes
for t in ['s', 'D', 'us', 'ms']:
pytest.raises(TypeError, s.astype, 'M8[%s]' % t)
# GH3414 related
pytest.raises(TypeError, lambda x: Series(
Series(dates).astype('int') / 1000000, dtype='M8[ms]'))
pytest.raises(TypeError,
lambda x: Series(dates, dtype='datetime64'))
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp('20130101'), 1], index=['a', 'b'])
assert result['a'] == Timestamp('20130101')
assert result['b'] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')
values2 = dates.view(np.ndarray).astype('datetime64[ns]')
expected = Series(values2, index=dates)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, dates)
assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, index=dates, dtype=object)
assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()],
dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
assert str(Series(dr).iloc[0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
assert str(Series(dr).iloc[0].tz) == 'US/Eastern'
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == 'object'
assert s[2] is pd.NaT
assert 'NaT' in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == 'object'
assert s[2] is pd.NaT
assert 'NaT' in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == 'object'
assert s[2] is np.nan
assert 'NaN' in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr)
assert s.dtype.name == 'datetime64[ns, US/Eastern]'
assert s.dtype == 'datetime64[ns, US/Eastern]'
assert is_datetime64tz_dtype(s.dtype)
assert 'datetime64[ns, US/Eastern]' in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == 'datetime64[ns]'
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize('UTC').tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern', freq='D')
result = s[0]
assert result == Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern', freq='D')
result = s[Series([True, True, False], index=s.index)]
assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
assert_series_equal(result, s)
# astype
result = s.astype(object)
expected = Series(DatetimeIndex(s._values).asobject)
assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz)
assert_series_equal(result, s)
# astype - datetime64[ns, tz]
result = Series(s.values).astype('datetime64[ns, US/Eastern]')
assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
assert_series_equal(result, s)
result = s.astype('datetime64[ns, CET]')
expected = Series(date_range('20130101 06:00:00', periods=3, tz='CET'))
assert_series_equal(result, expected)
# short str
assert 'datetime64[ns, US/Eastern]' in str(s)
# formatting with NaT
result = s.shift()
assert 'datetime64[ns, US/Eastern]' in str(result)
assert 'NaT' in str(result)
# long str
t = Series(date_range('20130101', periods=1000, tz='US/Eastern'))
assert 'datetime64[ns, US/Eastern]' in str(t)
result = pd.DatetimeIndex(s, freq='infer')
tm.assert_index_equal(result, dr)
# inference
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])
assert s.dtype == 'datetime64[ns, US/Pacific]'
assert lib.infer_dtype(s) == 'datetime64'
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])
assert s.dtype == 'object'
assert lib.infer_dtype(s) == 'datetime'
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype='datetime64[ns, US/Eastern]')
expected = Series(pd.DatetimeIndex(['NaT', 'NaT'], tz='US/Eastern'))
assert_series_equal(s, expected)
def test_construction_interval(self):
# construction from interval & array of intervals
index = IntervalIndex.from_breaks(np.arange(3), closed='right')
result = Series(index)
repr(result)
str(result)
tm.assert_index_equal(Index(result.values), index)
result = Series(index.values)
tm.assert_index_equal(Index(result.values), index)
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range('20130101', periods=3, tz='US/Eastern'))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert('UTC'), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range('20130101', periods=5, freq='D')
s = Series(pi)
expected = Series(pi.asobject)
assert_series_equal(s, expected)
assert s.dtype == 'object'
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx)
expected.iloc[0] = 0
expected.iloc[1] = 1
assert_series_equal(result, expected)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_series_equal(
result, expected, check_dtype=True, check_series_type=True)
d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}
_d = sorted(d.items())
ser = Series(d)
expected = Series([x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d]))
check(ser, expected)
d['z'] = 111.
_d.insert(0, ('z', d['z']))
ser = Series(d)
expected = Series([x[1] for x in _d], index=Index(
[x[0] for x in _d], tupleize_cols=False))
ser = ser.reindex(index=expected.index)
check(ser, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
expected = Series(
data=['A', 'B', 'C'],
index=pd.to_timedelta([0, 10, 20], unit='s')
)
result = Series(
data={pd.to_timedelta(0, unit='s'): 'A',
pd.to_timedelta(10, unit='s'): 'B',
pd.to_timedelta(20, unit='s'): 'C'},
index=pd.to_timedelta([0, 10, 20], unit='s')
)
# this should work
assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = Series(data)
refseries = Series(dict(compat.iteritems(data)))
assert_series_equal(refseries, series)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
assert_series_equal(result_datetime64, expected)
assert_series_equal(result_datetime, expected)
assert_series_equal(result_Timestamp, expected)
def test_orderedDict_ctor(self):
# GH3283
import pandas
import random
data = OrderedDict([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
assert all(s.values == list(data.values()))
def test_orderedDict_subclass_ctor(self):
# GH3283
import pandas
import random
class A(OrderedDict):
pass
data = A([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
assert all(s.values == list(data.values()))
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_set(self):
values = set([1, 2, 3, 4, 5])
pytest.raises(TypeError, Series, values)
values = frozenset(values)
pytest.raises(TypeError, Series, values)
def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
assert tm.is_sorted(series.index)
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}
series = Series(data)
assert series.dtype == np.object_
data = {'a': '0', 'b': '1'}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
assert nans.dtype == np.float_
assert len(nans) == len(self.ts)
strings = Series('foo', index=self.ts.index)
assert strings.dtype == np.object_
assert len(strings) == len(self.ts)
d = datetime.now()
dates = Series(d, index=self.ts.index)
assert dates.dtype == 'M8[ns]'
assert len(dates) == len(self.ts)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=self.ts.index, dtype="category")
expected = Series(0, index=self.ts.index).astype("category")
assert categorical.dtype == 'category'
assert len(categorical) == len(self.ts)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1)])
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(
1, 's')])
assert td.dtype == 'timedelta64[ns]'
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1), np.nan], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
td = Series([np.timedelta64(300000000), pd.NaT], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == 'timedelta64[ns]'
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == 'object'
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == 'timedelta64[ns]'
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == 'timedelta64[ns]'
td = Series([np.timedelta64(1, 's')])
assert td.dtype == 'timedelta64[ns]'
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# pytest.raises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
td.astype('int64')
# invalid casting
pytest.raises(TypeError, td.astype, 'int32')
# this is an invalid casting
def f():
Series([timedelta(days=1), 'foo'], dtype='m8[ns]')
pytest.raises(Exception, f)
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ['foo'])
assert td.dtype == 'object'
# these will correctly infer a timedelta
s = Series([None, pd.NaT, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([np.nan, pd.NaT, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([pd.NaT, None, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([pd.NaT, np.nan, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
assert isnull(val)
series[2] = val
assert isnull(series[2])
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype('M8[ns]')
expected = Series([NaT])
assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
for n in [777, 777., 'name', datetime(2001, 11, 11), (1, ), u"\u05D0"]:
for data in [[1, 2, 3], np.ones(3), {'a': 0, 'b': 1}]:
s = Series(data, name=n)
assert s.name == n
def test_constructor_name_unhashable(self):
for n in [['name_list'], np.ones(2), {1: 2}]:
for data in [['name_list'], np.ones(2), {1: 2}]:
pytest.raises(TypeError, Series, data, name=n)
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
assert series.dtype == 'M8[ns]'
def test_constructor_cant_cast_datetime64(self):
msg = "Cannot cast datetime64 to "
with tm.assert_raises_regex(TypeError, msg):
Series(date_range('1/1/2000', periods=10), dtype=float)
with tm.assert_raises_regex(TypeError, msg):
Series(date_range('1/1/2000', periods=10), dtype=int)
def test_constructor_cast_object(self):
s = Series(date_range('1/1/2000', periods=10), dtype=object)
exp = Series(date_range('1/1/2000', periods=10))
tm.assert_series_equal(s, exp)
def test_constructor_generic_timestamp_deprecated(self):
# see gh-15524
with tm.assert_produces_warning(FutureWarning):
dtype = np.timedelta64
s = Series([], dtype=dtype)
assert s.empty
assert s.dtype == 'm8[ns]'
with tm.assert_produces_warning(FutureWarning):
dtype = np.datetime64
s = Series([], dtype=dtype)
assert s.empty
assert s.dtype == 'M8[ns]'
# These timestamps have the wrong frequencies,
# so an Exception should be raised now.
msg = "cannot convert timedeltalike"
with tm.assert_raises_regex(TypeError, msg):
Series([], dtype='m8[ps]')
msg = "cannot convert datetimelike"
with tm.assert_raises_regex(TypeError, msg):
Series([], dtype='M8[ps]')
|
|
from TProtocol import *
from struct import pack, unpack
__all__ = ['TCompactProtocol', 'TCompactProtocolFactory']
CLEAR = 0
FIELD_WRITE = 1
VALUE_WRITE = 2
CONTAINER_WRITE = 3
BOOL_WRITE = 4
FIELD_READ = 5
CONTAINER_READ = 6
VALUE_READ = 7
BOOL_READ = 8
def make_helper(v_from, container):
def helper(func):
def nested(self, *args, **kwargs):
assert self.state in (v_from, container), (self.state, v_from, container)
return func(self, *args, **kwargs)
return nested
return helper
writer = make_helper(VALUE_WRITE, CONTAINER_WRITE)
reader = make_helper(VALUE_READ, CONTAINER_READ)
def makeZigZag(n, bits):
return (n << 1) ^ (n >> (bits - 1))
def fromZigZag(n):
return (n >> 1) ^ -(n & 1)
def writeVarint(trans, n):
out = []
while True:
if n & ~0x7f == 0:
out.append(n)
break
else:
out.append((n & 0xff) | 0x80)
n = n >> 7
trans.write(''.join(map(chr, out)))
def readVarint(trans):
result = 0
shift = 0
while True:
x = trans.readAll(1)
byte = ord(x)
result |= (byte & 0x7f) << shift
if byte >> 7 == 0:
return result
shift += 7
class CompactType:
STOP = 0x00
TRUE = 0x01
FALSE = 0x02
BYTE = 0x03
I16 = 0x04
I32 = 0x05
I64 = 0x06
DOUBLE = 0x07
BINARY = 0x08
LIST = 0x09
SET = 0x0A
MAP = 0x0B
STRUCT = 0x0C
CTYPES = {TType.STOP: CompactType.STOP,
TType.BOOL: CompactType.TRUE, # used for collection
TType.BYTE: CompactType.BYTE,
TType.I16: CompactType.I16,
TType.I32: CompactType.I32,
TType.I64: CompactType.I64,
TType.DOUBLE: CompactType.DOUBLE,
TType.STRING: CompactType.BINARY,
TType.STRUCT: CompactType.STRUCT,
TType.LIST: CompactType.LIST,
TType.SET: CompactType.SET,
TType.MAP: CompactType.MAP
}
TTYPES = {}
for k, v in CTYPES.items():
TTYPES[v] = k
TTYPES[CompactType.FALSE] = TType.BOOL
del k
del v
class TCompactProtocol(TProtocolBase):
"Compact implementation of the Thrift protocol driver."
PROTOCOL_ID = 0x82
VERSION = 1
VERSION_MASK = 0x1f
TYPE_MASK = 0xe0
TYPE_SHIFT_AMOUNT = 5
def __init__(self, trans):
TProtocolBase.__init__(self, trans)
self.state = CLEAR
self.__last_fid = 0
self.__bool_fid = None
self.__bool_value = None
self.__structs = []
self.__containers = []
def __writeVarint(self, n):
writeVarint(self.trans, n)
def writeMessageBegin(self, name, type, seqid):
assert self.state == CLEAR
self.__writeUByte(self.PROTOCOL_ID)
self.__writeUByte(self.VERSION | (type << self.TYPE_SHIFT_AMOUNT))
self.__writeVarint(seqid)
self.__writeString(name)
self.state = VALUE_WRITE
def writeMessageEnd(self):
assert self.state == VALUE_WRITE
self.state = CLEAR
def writeStructBegin(self, name):
assert self.state in (CLEAR, CONTAINER_WRITE, VALUE_WRITE), self.state
self.__structs.append((self.state, self.__last_fid))
self.state = FIELD_WRITE
self.__last_fid = 0
def writeStructEnd(self):
assert self.state == FIELD_WRITE
self.state, self.__last_fid = self.__structs.pop()
def writeFieldStop(self):
self.__writeByte(0)
def __writeFieldHeader(self, type, fid):
delta = fid - self.__last_fid
if 0 < delta <= 15:
self.__writeUByte(delta << 4 | type)
else:
self.__writeByte(type)
self.__writeI16(fid)
self.__last_fid = fid
def writeFieldBegin(self, name, type, fid):
assert self.state == FIELD_WRITE, self.state
if type == TType.BOOL:
self.state = BOOL_WRITE
self.__bool_fid = fid
else:
self.state = VALUE_WRITE
self.__writeFieldHeader(CTYPES[type], fid)
def writeFieldEnd(self):
assert self.state in (VALUE_WRITE, BOOL_WRITE), self.state
self.state = FIELD_WRITE
def __writeUByte(self, byte):
self.trans.write(pack('!B', byte))
def __writeByte(self, byte):
self.trans.write(pack('!b', byte))
def __writeI16(self, i16):
self.__writeVarint(makeZigZag(i16, 16))
def __writeSize(self, i32):
self.__writeVarint(i32)
def writeCollectionBegin(self, etype, size):
assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
if size <= 14:
self.__writeUByte(size << 4 | CTYPES[etype])
else:
self.__writeUByte(0xf0 | CTYPES[etype])
self.__writeSize(size)
self.__containers.append(self.state)
self.state = CONTAINER_WRITE
writeSetBegin = writeCollectionBegin
writeListBegin = writeCollectionBegin
def writeMapBegin(self, ktype, vtype, size):
assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
if size == 0:
self.__writeByte(0)
else:
self.__writeSize(size)
self.__writeUByte(CTYPES[ktype] << 4 | CTYPES[vtype])
self.__containers.append(self.state)
self.state = CONTAINER_WRITE
def writeCollectionEnd(self):
assert self.state == CONTAINER_WRITE, self.state
self.state = self.__containers.pop()
writeMapEnd = writeCollectionEnd
writeSetEnd = writeCollectionEnd
writeListEnd = writeCollectionEnd
def writeBool(self, bool):
if self.state == BOOL_WRITE:
if bool:
ctype = CompactType.TRUE
else:
ctype = CompactType.FALSE
self.__writeFieldHeader(ctype, self.__bool_fid)
elif self.state == CONTAINER_WRITE:
if bool:
self.__writeByte(CompactType.TRUE)
else:
self.__writeByte(CompactType.FALSE)
else:
raise AssertionError, "Invalid state in compact protocol"
writeByte = writer(__writeByte)
writeI16 = writer(__writeI16)
@writer
def writeI32(self, i32):
self.__writeVarint(makeZigZag(i32, 32))
@writer
def writeI64(self, i64):
self.__writeVarint(makeZigZag(i64, 64))
@writer
def writeDouble(self, dub):
self.trans.write(pack('!d', dub))
def __writeString(self, s):
self.__writeSize(len(s))
self.trans.write(s)
writeString = writer(__writeString)
def readFieldBegin(self):
assert self.state == FIELD_READ, self.state
type = self.__readUByte()
if type & 0x0f == TType.STOP:
return (None, 0, 0)
delta = type >> 4
if delta == 0:
fid = self.__readI16()
else:
fid = self.__last_fid + delta
self.__last_fid = fid
type = type & 0x0f
if type == CompactType.TRUE:
self.state = BOOL_READ
self.__bool_value = True
elif type == CompactType.FALSE:
self.state = BOOL_READ
self.__bool_value = False
else:
self.state = VALUE_READ
return (None, self.__getTType(type), fid)
def readFieldEnd(self):
assert self.state in (VALUE_READ, BOOL_READ), self.state
self.state = FIELD_READ
def __readUByte(self):
result, = unpack('!B', self.trans.readAll(1))
return result
def __readByte(self):
result, = unpack('!b', self.trans.readAll(1))
return result
def __readVarint(self):
return readVarint(self.trans)
def __readZigZag(self):
return fromZigZag(self.__readVarint())
def __readSize(self):
result = self.__readVarint()
if result < 0:
raise TException("Length < 0")
return result
def readMessageBegin(self):
assert self.state == CLEAR
proto_id = self.__readUByte()
if proto_id != self.PROTOCOL_ID:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad protocol id in the message: %d' % proto_id)
ver_type = self.__readUByte()
type = (ver_type & self.TYPE_MASK) >> self.TYPE_SHIFT_AMOUNT
version = ver_type & self.VERSION_MASK
if version != self.VERSION:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad version: %d (expect %d)' % (version, self.VERSION))
seqid = self.__readVarint()
name = self.__readString()
return (name, type, seqid)
def readMessageEnd(self):
assert self.state == CLEAR
assert len(self.__structs) == 0
def readStructBegin(self):
assert self.state in (CLEAR, CONTAINER_READ, VALUE_READ), self.state
self.__structs.append((self.state, self.__last_fid))
self.state = FIELD_READ
self.__last_fid = 0
def readStructEnd(self):
assert self.state == FIELD_READ
self.state, self.__last_fid = self.__structs.pop()
def readCollectionBegin(self):
assert self.state in (VALUE_READ, CONTAINER_READ), self.state
size_type = self.__readUByte()
size = size_type >> 4
type = self.__getTType(size_type)
if size == 15:
size = self.__readSize()
self.__containers.append(self.state)
self.state = CONTAINER_READ
return type, size
readSetBegin = readCollectionBegin
readListBegin = readCollectionBegin
def readMapBegin(self):
assert self.state in (VALUE_READ, CONTAINER_READ), self.state
size = self.__readSize()
types = 0
if size > 0:
types = self.__readUByte()
vtype = self.__getTType(types)
ktype = self.__getTType(types >> 4)
self.__containers.append(self.state)
self.state = CONTAINER_READ
return (ktype, vtype, size)
def readCollectionEnd(self):
assert self.state == CONTAINER_READ, self.state
self.state = self.__containers.pop()
readSetEnd = readCollectionEnd
readListEnd = readCollectionEnd
readMapEnd = readCollectionEnd
def readBool(self):
if self.state == BOOL_READ:
return self.__bool_value == CompactType.TRUE
elif self.state == CONTAINER_READ:
return self.__readByte() == CompactType.TRUE
else:
raise AssertionError, "Invalid state in compact protocol: %d" % self.state
readByte = reader(__readByte)
__readI16 = __readZigZag
readI16 = reader(__readZigZag)
readI32 = reader(__readZigZag)
readI64 = reader(__readZigZag)
@reader
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def __readString(self):
len = self.__readSize()
return self.trans.readAll(len)
readString = reader(__readString)
def __getTType(self, byte):
return TTYPES[byte & 0x0f]
class TCompactProtocolFactory:
def __init__(self):
pass
def getProtocol(self, trans):
return TCompactProtocol(trans)
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import os
import unittest
from iptest import IronPythonTestCase, run_test, skipUnlessIronPython
hitCount = 0
class ListTest(IronPythonTestCase):
def test_extend_self(self):
l=['a','b','c']
l.extend(l)
self.assertTrue(l==['a','b','c','a','b','c'])
def test_append_self(self):
"""verify repr and print have the same result for a recursive list"""
a = list('abc')
a.append(a)
self.assertEqual(str(a), "['a', 'b', 'c', [...]]")
## file
fn = os.path.join(self.temporary_dir, "testfile.txt")
fo = open(fn, "wb")
a = list('abc')
a.append(a)
print >>fo, a,
fo.close()
fo = open(fn, "rb")
self.assertTrue(fo.read() == repr(a))
fo.close()
@skipUnlessIronPython()
def test_cli_enumerator(self):
import clr
x = [1,2,3]
y = []
xenum = iter(x)
while xenum.MoveNext():
y.append(xenum.Current)
self.assertEqual(x, y)
@skipUnlessIronPython()
def test_generic_list(self):
"""https://github.com/IronLanguages/ironpython2/issues/109"""
from System.Collections.Generic import List
lst = List[str]()
lst.Add('Hello')
lst.Add('World')
vals = []
for v in lst[1:]:
vals.append(v)
self.assertEqual(vals, ['World'])
def test_assign_to_empty(self):
# should all succeed
y = []
[] = y
[], t = y, 0
[[[]]] = [[y]]
del y
def test_unpack(self):
listOfSize2 = [1, 2]
# Disallow unequal unpacking assignment
def f1(): [a, b, c] = listOfSize2
def f2(): del a
def f3(): [a] = listOfSize2
self.assertRaises(ValueError, f1)
self.assertRaises(NameError, f2)
self.assertRaises(ValueError, f3)
self.assertRaises(NameError, f2)
[a, [b, c]] = [listOfSize2, listOfSize2]
self.assertEqual(a, listOfSize2)
self.assertEqual(b, 1)
self.assertEqual(c, 2)
del a, b, c
[[a, b], c] = (listOfSize2, listOfSize2)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
self.assertEqual(c, listOfSize2)
del a, b, c
def test_sort(self):
"""named params passed to sort"""
LExpected = ['A', 'b', 'c', 'D']
L = ['D', 'c', 'b', 'A']
L.sort(key=lambda x: x.lower())
self.assertTrue(L == LExpected)
l = [1, 2, 3]
l2 = l[:]
l.sort(lambda x, y: x > y)
self.assertEqual(l, l2)
l.sort(lambda x, y: x > y)
self.assertEqual(l, l2)
def test_list_in_list(self):
aList = [['a']]
anItem = ['a']
self.assertEqual( aList.index(anItem), 0 )
self.assertTrue(anItem in aList)
def test_pop(self):
x = [1,2,3,4,5,6,7,8,9,0]
self.assertTrue(x.pop() == 0)
self.assertTrue(x.pop(3) == 4)
self.assertTrue(x.pop(-5) == 5)
self.assertTrue(x.pop(0) == 1)
self.assertTrue(x.pop() == 9)
self.assertTrue(x.pop(2) == 6)
self.assertTrue(x.pop(3) == 8)
self.assertTrue(x.pop(-1) == 7)
self.assertTrue(x.pop(-2) == 2)
self.assertTrue(x.pop() == 3)
def test_add_mul(self):
x = [1,2,3]
x += [4,5,6]
self.assertTrue(x == [1,2,3,4,5,6])
x = [1,2,3]
self.assertEqual(x * 2, [1,2,3,1,2,3])
self.assertEqual(2 * x, [1,2,3,1,2,3])
class mylong(long): pass
self.assertEqual([1, 2] * mylong(2), [1, 2, 1, 2])
self.assertEqual([3, 4].__mul__(mylong(2)), [3, 4, 3, 4])
self.assertEqual([5, 6].__rmul__(mylong(2)), [5, 6, 5, 6])
self.assertEqual(mylong(2) * [7,8] , [7, 8, 7, 8])
self.assertRaises(TypeError, lambda: [1,2] * [3,4])
self.assertRaises(OverflowError, lambda: [1,2] * mylong(203958720984752098475023957209))
def test_reverse(self):
x = ["begin",1,2,3,4,5,6,7,8,9,0,"end"]
del x[6:]
x.reverse()
self.assertTrue(x == [5, 4, 3, 2, 1, "begin"])
x = list("iron python")
x.reverse()
self.assertTrue(x == ['n','o','h','t','y','p',' ','n','o','r','i'])
# should return listreverseenumerator, not reversed
self.assertTrue(type(reversed([2,3,4])) != reversed)
def test_equal(self):
self.assertEqual([2,3] == '', False)
self.assertEqual(list.__eq__([], None), NotImplemented)
class MyEquality(object):
def __eq__(self, other):
return 'abc'
class MyOldEquality(object):
def __eq__(self, other):
return 'def'
self.assertEqual([] == MyEquality(), 'abc')
self.assertEqual([] == MyOldEquality(), 'def')
self.assertEqual([2,3] == (2,3), False)
class MyIterable(object):
def __iter__(self): return MyIterable()
def __next__(self):
yield 'a'
yield 'b'
self.assertEqual(['a', 'b'] == MyIterable(), False)
def test_self_init(self):
a = [1, 2, 3]
list.__init__(a, a)
self.assertEqual(a, [])
def test_index_removed(self):
global hitCount
class clears(object):
def __eq__(self, other):
global hitCount
hitCount = hitCount + 1
del a[:]
return False
class appends(object):
def __eq__(self, other):
global hitCount
hitCount = hitCount + 1
a.append(self)
return False
a = [clears(), clears(),clears(),clears(),clears()]
hitCount = 0
self.assertRaises(ValueError, a.index, 23)
self.assertEqual(hitCount, 1) # should stop after the first equality check
a = [appends(), appends(), appends()]
hitCount = 0
self.assertRaises(ValueError, a.index, 2)
self.assertEqual(hitCount, 3) # should have only checked existing items
@skipUnlessIronPython()
def test_pass_pythonlist_to_clr(self):
##
## test passing pythonlist to clr where IList or ArrayList is requested
## also borrow this place to test passing python dict to clr where
## IDictionary or Hashtable is requested
##
def contains_all_1s(x):
'''check the return value are 11111 or similar'''
if type(x) == tuple:
x = x[0]
s = str(x)
self.assertEqual(s.count("1"), len(s))
def do_something(thetype, pl, cl, check_func):
pt = thetype(pl)
pt.AddRemove()
ct = thetype(cl)
ct.AddRemove()
check_func()
x = pt.Inspect()
y = ct.Inspect()
contains_all_1s(x)
contains_all_1s(y)
self.assertEqual(x, y)
self.assertEqual(pt.Loop(), ct.Loop())
check_func()
self.load_iron_python_test()
import System
import IronPythonTest
# test ListWrapperForIList
pl = range(40)
cl = System.Collections.Generic.List[int]()
for x in pl: cl.Add(x)
def check_content():
for x, y in zip(cl, pl): self.assertEqual(x, y)
do_something(IronPythonTest.UsePythonListAsList, pl, cl, check_content)
# test DictWrapperForIDict
pl = {"redmond" : 10, "seattle" : 20}
cl = System.Collections.Generic.Dictionary[str, int]()
for x, y in pl.iteritems(): cl.Add(x, y)
pll = list(pl.iteritems())
cll = list(cl)
pll.sort(lambda x, y: cmp(x[0], y[0]))
cll.sort(lambda x, y: cmp(x.Key, y.Key))
def check_content():
for x, y in zip(cll, pll):
self.assertEqual(x.Key, y[0])
self.assertEqual(x.Value, y[1])
do_something(IronPythonTest.UsePythonDictAsDictionary, pl, cl, check_content)
def test_inplace_addition(self):
x = [2,3,4]
x += x
self.assertEqual(x, [2,3,4,2,3,4])
test_cases = [ ([], [], []),
([1], [], [1]),
([], [1], [1]),
([1], [1], [1, 1]),
([1], [2], [1, 2]),
([2], [1], [2, 1]),
([1, 2], [], [1, 2]),
([], [1, 2], [1, 2]),
([1, 2], [3], [1, 2, 3]),
([3], [1, 2], [3, 1, 2]),
([1, 2], [3, 4], [1, 2, 3, 4]),
([3, 4], [1, 2], [3, 4, 1, 2]),
([None], [], [None]),
([None], [2], [None, 2]),
([""], [], [""]),
]
for left_operand, right_operand, result in test_cases:
#(No access to copy.deepcopy in IP)
# Create new list to verify no side effects to the RHS list
orig_right = [x for x in right_operand]
left_operand += right_operand
self.assertEqual(left_operand, result)
#Side effects...
self.assertEqual(orig_right, right_operand)
#interesting cases
x = [None]
x += xrange(3)
self.assertEqual(x, [None, 0, 1, 2])
x = [None]
x += (0, 1, 2)
self.assertEqual(x, [None, 0, 1, 2])
x = [None]
x += "012"
self.assertEqual(x, [None, "0", "1", "2"])
x = [None]
x += Exception()
self.assertEqual(x, [None])
#negative cases
neg_cases = [ ([], None),
([], 1),
([], long(1)),
([], 3.14),
([], object),
([], object()),
]
for left_operand, right_operand in neg_cases:
try:
left_operand += right_operand
self.assertUnreachable()
except TypeError:
pass
def test_indexing(self):
l = [2,3,4]
def set(x, i, v): x[i] = v
self.assertRaises(TypeError, lambda : l[2.0])
self.assertRaises(TypeError, lambda : set(l, 2.0, 1))
class mylist(list):
def __getitem__(self, index):
return list.__getitem__(self, int(index))
def __setitem__(self, index, value):
return list.__setitem__(self, int(index), value)
l = mylist(l)
self.assertEqual(l[2.0], 4)
l[2.0] = 1
self.assertEqual(l[2], 1)
def test_getslice(self):
"""overriding __len__ doesn't get called when doing __getslice__"""
class l(list):
def __len__(self):
raise Exception()
x = l()
self.assertEqual(x.__getslice__(-1, -200), [])
class mylist(list):
def __getslice__(self, i, j):
return i, j
class mylong(long): pass
class myint(int): pass
# all indexes to __getslice__ should be ints
for listType in list, mylist:
for input in [0, 1, False, True, myint(0), myint(1), mylong(0), mylong(1), -1, myint(-1), mylong(-1)]:
for x in listType(range(5))[input:input]:
self.assertEqual(type(x), int)
def test_repr(self):
class mylist(list):
def __repr__(self): return 'abc'
self.assertEqual(repr(mylist()), 'abc')
def test_index_multiply(self):
for data in ([1,2], (1,2), 'ab'):
class M:
def __rmul__(self, other):
return 1
class Index(object):
def __index__(self): return 2
class OldIndex:
def __index__(self): return 2
self.assertEqual(data * M(), 1)
self.assertRaises(TypeError, lambda : data.__mul__(M()))
self.assertEqual(data * Index(), data * 2)
self.assertEqual(data * OldIndex(), data * 2)
self.assertEqual(data.__mul__(Index()), data * 2)
self.assertEqual(data.__mul__(OldIndex()), data * 2)
self.assertRaisesMessage(TypeError, "'NoneType' object cannot be interpreted as an index", lambda : data.__mul__(None))
self.assertRaises(TypeError, lambda : data * None)
self.assertRaises(TypeError, lambda : None * data)
def test_sequence_assign(self):
tokens = [(chr(ord('a') + val), val) for val in range(0,10)]
(first,pos),tokens = tokens[0], tokens[1:]
self.assertEqual(first, 'a')
self.assertEqual(pos, 0)
self.assertEqual(tokens, [('b', 1), ('c', 2), ('d', 3), ('e', 4), ('f', 5), ('g', 6), ('h', 7), ('i', 8), ('j', 9)])
def test_inheritance(self):
listIter = type(iter([2,3,4]))
reverseListIter = type(reversed([2,3,4]))
for base in (listIter, reverseListIter):
def subclass():
class x(base): pass
self.assertRaises(TypeError, subclass)
def test_backwards_slicing_no_step(self):
class mylist(object):
def __getitem__(self, index):
return 'stuff'[index]
a = list('stuff')
for val in (a, 'stuff', tuple('stuff'), mylist()):
a[1:0] = val
self.assertEqual(a, list("stuff"[:1] + "stuff" + "stuff"[1:]))
a = list('stuff')
for val in (a, 'stuff', tuple('stuff'), mylist()):
a[1:0:1] = a
self.assertEqual(a, list("stuff"[:1] + "stuff" + "stuff"[1:]))
a = list('stuff')
def test_cp20125(self):
class Temp(list):
def __init__(self, value):
self.value = value
def __mul__(self, other):
return self.value * other
t1 = Temp(3.0)
self.assertEqual(t1 * 3.0, 9.0)
run_test(__name__)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: tutorial_zonal_statistics.py
# Purpose:
#
# Author: Maik Heistermann, Kai Muehlbauer
#
# Created: 26.08.2015
# Copyright: (c) heistermann, muehlbauer 2015
# Licence: The MIT License
# ------------------------------------------------------------------------------
import os
from osgeo import osr
import wradlib
import pylab as plt
import numpy as np
from matplotlib.collections import PatchCollection
from matplotlib.colors import from_levels_and_colors
import matplotlib.patches as patches
import datetime as dt
def testplot(cats, catsavg, xy, data,
levels=[0, 1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50, 100], title=""):
"""Quick test plot layout for this example file
"""
colors = plt.cm.spectral(np.linspace(0, 1, len(levels)))
mycmap, mynorm = from_levels_and_colors(levels, colors, extend="max")
radolevels = [0, 1, 2, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50, 100]
radocolors = plt.cm.spectral(np.linspace(0, 1, len(radolevels)))
radocmap, radonorm = from_levels_and_colors(radolevels, radocolors, extend="max")
fig = plt.figure(figsize=(14, 8))
# Average rainfall sum
ax = fig.add_subplot(121, aspect="equal")
coll = PatchCollection(cats, array=catsavg, cmap=mycmap, norm=mynorm,
edgecolors='white', lw=0.5)
ax.add_collection(coll)
ax.autoscale()
plt.colorbar(coll, ax=ax, shrink=0.5)
plt.xlabel("GK2 Easting")
plt.ylabel("GK2 Northing")
plt.title(title)
plt.draw()
# Original radar data
ax1 = fig.add_subplot(122, aspect="equal")
pm = plt.pcolormesh(xy[:, :, 0], xy[:, :, 1], np.ma.masked_invalid(data),
cmap=radocmap, norm=radonorm)
coll = PatchCollection(cats, facecolor='None', edgecolor='white', lw=0.5)
ax1.add_collection(coll)
cb = plt.colorbar(pm, ax=ax1, shrink=0.5)
cb.set_label("(mm/h)")
plt.xlabel("GK2 Easting")
plt.ylabel("GK2 Northing")
plt.title("Original radar rain sums")
plt.draw()
plt.tight_layout()
def ex_tutorial_zonal_statistics():
# Get RADOLAN grid coordinates
grid_xy_radolan = wradlib.georef.get_radolan_grid(900, 900)
x_radolan = grid_xy_radolan[:, :, 0]
y_radolan = grid_xy_radolan[:, :, 1]
# create radolan projection osr object
proj_stereo = wradlib.georef.create_osr("dwd-radolan")
# create Gauss Krueger zone 2 projection osr object
proj_gk = osr.SpatialReference()
proj_gk.ImportFromEPSG(31466)
# transform radolan polar stereographic projection to GK2
xy = wradlib.georef.reproject(grid_xy_radolan,
projection_source=proj_stereo,
projection_target=proj_gk)
# Open shapefile (already in GK2)
shpfile = "../../../examples/data/agger/agger_merge.shp"
dataset, inLayer = wradlib.io.open_shape(shpfile)
cats, keys = wradlib.georef.get_shape_coordinates(inLayer)
# Read and prepare the actual data (RADOLAN)
f = "../../../examples/data/radolan/raa01-sf_10000-1406100050-dwd---bin.gz"
data, attrs = wradlib.io.read_RADOLAN_composite(f, missing=np.nan)
sec = attrs['secondary']
data.flat[sec] = np.nan
# Reduce grid size using a bounding box (to enhancing performance)
bbox = inLayer.GetExtent()
buffer = 5000.
bbox = dict(left=bbox[0]-buffer, right=bbox[1]+buffer, bottom=bbox[2]-buffer, top=bbox[3]+buffer)
mask, shape = wradlib.zonalstats.mask_from_bbox(xy[..., 0], xy[..., 1], bbox)
xy_ = np.vstack((xy[..., 0][mask].ravel(),xy[..., 1][mask].ravel())).T
data_ = data[mask]
###########################################################################
# Approach #1: Assign grid points to each polygon and compute the average.
#
# - Uses matplotlib.path.Path
# - Each point is weighted equally (assumption: polygon >> grid cell)
# - this is quick, but theoretically dirty
###########################################################################
t1 = dt.datetime.now()
try:
# Create instance of type GridPointsToPoly from zonal data file
obj1 = wradlib.zonalstats.GridPointsToPoly('test_zonal_points_cart')
except:
# Create instance of type ZonalDataPoint from source grid and catchment array
zd = wradlib.zonalstats.ZonalDataPoint(xy_, cats, srs=proj_gk, buf=500.)
# dump to file
zd.dump_vector('test_zonal_points_cart')
# Create instance of type GridPointsToPoly from zonal data object
obj1 = wradlib.zonalstats.GridPointsToPoly(zd)
isecs1 = obj1.zdata.isecs
t2 = dt.datetime.now()
# Compute stats for target polygons
avg1 = obj1.mean(data_.ravel())
var1 = obj1.var(data_.ravel())
t3 = dt.datetime.now()
print("Approach #1 (create object) takes: %f seconds" % (t2 - t1).total_seconds())
print("Approach #1 (compute average) takes: %f seconds" % (t3 - t2).total_seconds())
# Just a test for plotting results with zero buffer
zd2 = wradlib.zonalstats.ZonalDataPoint(xy_, cats, buf=0)
# Create instance of type GridPointsToPoly from zonal data object
obj2 = wradlib.zonalstats.GridPointsToPoly(zd2)
isecs2 = obj2.zdata.isecs
# Illustrate results for an example catchment i
i = 6 # try e.g. 48, 100
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
# Target polygon patches
trg_patches = [patches.Polygon(item, True) for item in obj1.zdata.trg.data]
trg_patch = [trg_patches[i]]
p = PatchCollection(trg_patch, facecolor="None", edgecolor="black", linewidth=2)
ax.add_collection(p)
# pips
sources = obj1.zdata.src.data
plt.scatter(sources[:, 0], sources[:, 1], s=200, c="grey", edgecolor="None", label="all points")
plt.scatter(isecs2[i][:, 0], isecs2[i][:, 1], s=200, c="green", edgecolor="None", label="buffer=0 m")
plt.scatter(isecs1[i][:, 0], isecs1[i][:, 1], s=50, c="red", edgecolor="None", label="buffer=500 m")
bbox = wradlib.zonalstats.get_bbox(cats[i][:, 0], cats[i][:, 1])
plt.xlim(bbox["left"]-2000, bbox["right"]+2000)
plt.ylim(bbox["bottom"]-2000, bbox["top"]+2000)
plt.legend()
plt.title("Catchment #%d: Points considered for stats" % i)
# Plot average rainfall and original data
testplot(trg_patches, avg1, xy, data, title="Catchment rainfall mean (GridPointsToPoly)")
testplot(trg_patches, var1, xy, data, levels = np.arange(0,np.max(var1),1.), title="Catchment rainfall variance (GridPointsToPoly)")
###########################################################################
# Approach #2: Compute weighted mean based on fraction of source polygons in target polygons
#
# - This is more accurate (no assumptions), but probably slower...
###########################################################################
t1 = dt.datetime.now()
# Create vertices for each grid cell (MUST BE DONE IN NATIVE RADOLAN COORDINATES)
grdverts = wradlib.zonalstats.grid_centers_to_vertices(x_radolan[mask],
y_radolan[mask], 1., 1.)
# And reproject to Cartesian reference system (here: GK2)
grdverts = wradlib.georef.reproject(grdverts,
projection_source=proj_stereo,
projection_target=proj_gk)
try:
# Create instance of type GridCellsToPoly from zonal data file
obj3 = wradlib.zonalstats.GridCellsToPoly('test_zonal_poly_cart')
except Exception as e:
print(e)
# Create instance of type ZonalDataPoly from source grid and
# catchment array
zd = wradlib.zonalstats.ZonalDataPoly(grdverts, cats, srs=proj_gk)
# dump to file
zd.dump_vector('test_zonal_poly_cart')
# Create instance of type GridPointsToPoly from zonal data object
obj3 = wradlib.zonalstats.GridCellsToPoly(zd)
t2 = dt.datetime.now()
# Compute stats for target polygons
avg3 = obj3.mean(data_.ravel())
var3 = obj3.var(data_.ravel())
t3 = dt.datetime.now()
print("Approach #2 (create object) takes: %f seconds" % (t2 - t1).total_seconds())
print("Approach #2 (compute average) takes: %f seconds" % (t3 - t2).total_seconds())
# Target polygon patches
trg_patches = [patches.Polygon(item, True) for item in obj3.zdata.trg.data]
# Plot average rainfall and original data
testplot(trg_patches, avg3, xy, data,
title="Catchment rainfall mean (GridCellsToPoly)")
testplot(trg_patches, var3, xy, data, levels=np.arange(0, np.max(var3), 1.),
title="Catchment rainfall variance (GridCellsToPoly)")
# Illustrate results for an example catchment i
i = 6 # try any index between 0 and 13
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
# Grid cell patches
src_index = obj3.zdata.get_source_index(i)
grd_patches = [patches.Polygon(item)
for item in obj3.zdata.src.get_data_by_idx(src_index)]
p = PatchCollection(grd_patches, facecolor="None", edgecolor="black")
ax.add_collection(p)
# Target polygon patches
trg_patch = [trg_patches[i]]
p = PatchCollection(trg_patch, facecolor="None", edgecolor="red", linewidth=2)
ax.add_collection(p)
# View the actual intersections
isecs = obj3.zdata.get_isec(i)
isec_patches = wradlib.zonalstats.numpy_to_pathpatch(isecs)
colors = 100*np.linspace(0, 1., len(isec_patches))
p = PatchCollection(isec_patches, cmap=plt.cm.jet, alpha=0.5)
p.set_array(np.array(colors))
ax.add_collection(p)
bbox = wradlib.zonalstats.get_bbox(cats[i][:, 0], cats[i][:, 1])
plt.xlim(bbox["left"]-2000, bbox["right"]+2000)
plt.ylim(bbox["bottom"]-2000, bbox["top"]+2000)
plt.draw()
# Compare estimates
maxlim = np.max(np.concatenate((avg1, avg3)))
fig = plt.figure(figsize=(14, 8))
ax = fig.add_subplot(111, aspect="equal")
plt.scatter(avg1, avg3, edgecolor="None", alpha=0.5)
plt.xlabel("Average of points in or close to polygon (mm)")
plt.ylabel("Area-weighted average (mm)")
plt.xlim(0, maxlim)
plt.ylim(0, maxlim)
plt.plot([-1, maxlim+1], [-1, maxlim+1], color="black")
plt.show()
# =======================================================
if __name__ == '__main__':
ex_tutorial_zonal_statistics()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from collections import namedtuple
import mxnet as mx
import mxnet.ndarray as nd
from mxnet.base import MXNetError
from mxnet import gluon
from mxnet.base import MXNetError
from mxnet.gluon.data.vision import transforms
from mxnet import image
from mxnet.test_utils import *
from common import assertRaises, setup_module, with_seed, teardown
import numpy as np
@with_seed()
def test_to_tensor():
# 3D Input
data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
assert_almost_equal(out_nd.asnumpy(), np.transpose(
data_in.astype(dtype=np.float32) / 255.0, (2, 0, 1)))
# 4D Input
data_in = np.random.uniform(0, 255, (5, 300, 300, 3)).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
assert_almost_equal(out_nd.asnumpy(), np.transpose(
data_in.astype(dtype=np.float32) / 255.0, (0, 3, 1, 2)))
# Invalid Input
invalid_data_in = nd.random.uniform(0, 255, (5, 5, 300, 300, 3)).astype(dtype=np.uint8)
transformer = transforms.ToTensor()
assertRaises(MXNetError, transformer, invalid_data_in)
# Bounds (0->0, 255->1)
data_in = np.zeros((10, 20, 3)).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
assert same(out_nd.asnumpy(), np.transpose(np.zeros(data_in.shape, dtype=np.float32), (2, 0, 1)))
data_in = np.full((10, 20, 3), 255).astype(dtype=np.uint8)
out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
assert same(out_nd.asnumpy(), np.transpose(np.ones(data_in.shape, dtype=np.float32), (2, 0, 1)))
@with_seed()
def test_normalize():
# 3D Input
data_in_3d = nd.random.uniform(0, 1, (3, 300, 300))
out_nd_3d = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))(data_in_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
assert_almost_equal(data_expected_3d, out_nd_3d.asnumpy())
# 4D Input
data_in_4d = nd.random.uniform(0, 1, (2, 3, 300, 300))
out_nd_4d = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))(data_in_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
assert_almost_equal(data_expected_4d, out_nd_4d.asnumpy())
# Invalid Input - Neither 3D or 4D input
invalid_data_in = nd.random.uniform(0, 1, (5, 5, 3, 300, 300))
normalize_transformer = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))
assertRaises(MXNetError, normalize_transformer, invalid_data_in)
# Invalid Input - Channel neither 1 or 3
invalid_data_in = nd.random.uniform(0, 1, (5, 4, 300, 300))
normalize_transformer = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))
assertRaises(MXNetError, normalize_transformer, invalid_data_in)
@with_seed()
def test_resize():
def _test_resize_with_diff_type(dtype):
# test normal case
data_in = nd.random.uniform(0, 255, (300, 200, 3)).astype(dtype)
out_nd = transforms.Resize(200)(data_in)
data_expected = mx.image.imresize(data_in, 200, 200, 1)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test 4D input
data_bath_in = nd.random.uniform(0, 255, (3, 300, 200, 3)).astype(dtype)
out_batch_nd = transforms.Resize(200)(data_bath_in)
for i in range(len(out_batch_nd)):
assert_almost_equal(mx.image.imresize(data_bath_in[i], 200, 200, 1).asnumpy(),
out_batch_nd[i].asnumpy())
# test interp = 2
out_nd = transforms.Resize(200, interpolation=2)(data_in)
data_expected = mx.image.imresize(data_in, 200, 200, 2)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test height not equals to width
out_nd = transforms.Resize((200, 100))(data_in)
data_expected = mx.image.imresize(data_in, 200, 100, 1)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test keep_ratio
out_nd = transforms.Resize(150, keep_ratio=True)(data_in)
data_expected = mx.image.imresize(data_in, 150, 225, 1)
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test size below zero
invalid_transform = transforms.Resize(-150, keep_ratio=True)
assertRaises(MXNetError, invalid_transform, data_in)
# test size more than 2:
invalid_transform = transforms.Resize((100, 100, 100), keep_ratio=True)
assertRaises(MXNetError, invalid_transform, data_in)
for dtype in ['uint8', 'float32', 'float64']:
_test_resize_with_diff_type(dtype)
@with_seed()
def test_crop_resize():
def _test_crop_resize_with_diff_type(dtype):
# test normal case
data_in = nd.arange(60).reshape((5, 4, 3)).astype(dtype)
out_nd = transforms.CropResize(0, 0, 3, 2)(data_in)
out_np = out_nd.asnumpy()
assert(out_np.sum() == 180)
assert((out_np[0:2,1,1].flatten() == [4, 16]).all())
# test 4D input
data_bath_in = nd.arange(180).reshape((2, 6, 5, 3)).astype(dtype)
out_batch_nd = transforms.CropResize(1, 2, 3, 4)(data_bath_in)
out_batch_np = out_batch_nd.asnumpy()
assert(out_batch_np.sum() == 7524)
assert((out_batch_np[0:2,0:4,1,1].flatten() == [37, 52, 67, 82, 127, 142, 157, 172]).all())
# test normal case with resize
data_in = nd.random.uniform(0, 255, (300, 200, 3)).astype(dtype)
out_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 1)(data_in)
data_expected = transforms.Resize(size=25, interpolation=1)(nd.slice(data_in, (0, 0, 0), (50, 100, 3)))
assert_almost_equal(out_nd.asnumpy(), data_expected.asnumpy())
# test 4D input with resize
data_bath_in = nd.random.uniform(0, 255, (3, 300, 200, 3)).astype(dtype)
out_batch_nd = transforms.CropResize(0, 0, 100, 50, (25, 25), 1)(data_bath_in)
for i in range(len(out_batch_nd)):
actual = transforms.Resize(size=25, interpolation=1)(nd.slice(data_bath_in[i], (0, 0, 0), (50, 100, 3))).asnumpy()
expected = out_batch_nd[i].asnumpy()
assert_almost_equal(expected, actual)
# test with resize height and width should be greater than 0
transformer = transforms.CropResize(0, 0, 100, 50, (-25, 25), 1)
assertRaises(MXNetError, transformer, data_in)
# test height and width should be greater than 0
transformer = transforms.CropResize(0, 0, -100, -50)
assertRaises(MXNetError, transformer, data_in)
# test cropped area is bigger than input data
transformer = transforms.CropResize(150, 200, 200, 500)
assertRaises(MXNetError, transformer, data_in)
assertRaises(MXNetError, transformer, data_bath_in)
for dtype in ['uint8', 'float32', 'float64']:
_test_crop_resize_with_diff_type(dtype)
# test nd.image.crop backward
def test_crop_backward(test_nd_arr, TestCase):
a_np = test_nd_arr.asnumpy()
b_np = a_np[(slice(TestCase.y, TestCase.y + TestCase.height), slice(TestCase.x, TestCase.x + TestCase.width), slice(0, 3))]
data = mx.sym.Variable('data')
crop_sym = mx.sym.image.crop(data, TestCase.x, TestCase.y, TestCase.width, TestCase.height)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[(slice(TestCase.y, TestCase.y + TestCase.height), slice(TestCase.x, TestCase.x + TestCase.width), slice(0, 3))] = b_np
check_symbolic_backward(crop_sym, [a_np], [b_np], [expected_in_grad])
TestCase = namedtuple('TestCase', ['x', 'y', 'width', 'height'])
test_list = [TestCase(0, 0, 3, 3), TestCase(2, 1, 1, 2), TestCase(0, 1, 3, 2)]
for dtype in ['uint8', 'float32', 'float64']:
data_in = nd.arange(60).reshape((5, 4, 3)).astype(dtype)
for test_case in test_list:
test_crop_backward(data_in, test_case)
@with_seed()
def test_flip_left_right():
data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
flip_in = data_in[:, ::-1, :]
data_trans = nd.image.flip_left_right(nd.array(data_in, dtype='uint8'))
assert_almost_equal(flip_in, data_trans.asnumpy())
@with_seed()
def test_flip_top_bottom():
data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
flip_in = data_in[::-1, :, :]
data_trans = nd.image.flip_top_bottom(nd.array(data_in, dtype='uint8'))
assert_almost_equal(flip_in, data_trans.asnumpy())
@with_seed()
def test_transformer():
from mxnet.gluon.data.vision import transforms
transform = transforms.Compose([
transforms.Resize(300),
transforms.Resize(300, keep_ratio=True),
transforms.CenterCrop(256),
transforms.RandomResizedCrop(224),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
transforms.RandomBrightness(0.1),
transforms.RandomContrast(0.1),
transforms.RandomSaturation(0.1),
transforms.RandomHue(0.1),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.RandomRotation([-10., 10.]),
transforms.Normalize([0, 0, 0], [1, 1, 1])])
transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read()
@with_seed()
def test_rotate():
transformer = transforms.Rotate(10.)
assertRaises(TypeError, transformer, mx.nd.ones((3, 30, 60), dtype='uint8'))
single_image = mx.nd.ones((3, 30, 60), dtype='float32')
single_output = transformer(single_image)
assert same(single_output.shape, (3, 30, 60))
batch_image = mx.nd.ones((3, 3, 30, 60), dtype='float32')
batch_output = transformer(batch_image)
assert same(batch_output.shape, (3, 3, 30, 60))
input_image = nd.array([[[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.]]])
rotation_angles_expected_outs = [
(90., nd.array([[[0., 1., 0.],
[0., 0., 0.],
[0., 0., 0.]]])),
(180., nd.array([[[0., 0., 0.],
[1., 0., 0.],
[0., 0., 0.]]])),
(270., nd.array([[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 0.]]])),
(360., nd.array([[[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.]]])),
]
for rot_angle, expected_result in rotation_angles_expected_outs:
transformer = transforms.Rotate(rot_angle)
ans = transformer(input_image)
print(ans, expected_result)
assert_almost_equal(ans, expected_result, atol=1e-6)
@with_seed()
def test_random_rotation():
# test exceptions for probability input outside of [0,1]
assertRaises(ValueError, transforms.RandomRotation, [-10, 10.], rotate_with_proba=1.1)
assertRaises(ValueError, transforms.RandomRotation, [-10, 10.], rotate_with_proba=-0.3)
# test `forward`
transformer = transforms.RandomRotation([-10, 10.])
assertRaises(TypeError, transformer, mx.nd.ones((3, 30, 60), dtype='uint8'))
single_image = mx.nd.ones((3, 30, 60), dtype='float32')
single_output = transformer(single_image)
assert same(single_output.shape, (3, 30, 60))
batch_image = mx.nd.ones((3, 3, 30, 60), dtype='float32')
batch_output = transformer(batch_image)
assert same(batch_output.shape, (3, 3, 30, 60))
# test identity (rotate_with_proba = 0)
transformer = transforms.RandomRotation([-100., 100.], rotate_with_proba=0.0)
data = mx.nd.random_normal(shape=(3, 30, 60))
assert_almost_equal(data, transformer(data))
@with_seed()
def test_random_transforms():
from mxnet.gluon.data.vision import transforms
tmp_t = transforms.Compose([transforms.Resize(300), transforms.RandomResizedCrop(224)])
transform = transforms.Compose([transforms.RandomApply(tmp_t, 0.5)])
img = mx.nd.ones((10, 10, 3), dtype='uint8')
iteration = 1000
num_apply = 0
for _ in range(iteration):
out = transform(img)
if out.shape[0] == 224:
num_apply += 1
assert_almost_equal(num_apply/float(iteration), 0.5, 0.1)
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
"""Support for monitoring Repetier Server Sensors."""
from datetime import datetime
import logging
import time
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import DEVICE_CLASS_TIMESTAMP
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import REPETIER_API, SENSOR_TYPES, UPDATE_SIGNAL
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Repetier Server sensors."""
if discovery_info is None:
return
sensor_map = {
"bed_temperature": RepetierTempSensor,
"extruder_temperature": RepetierTempSensor,
"chamber_temperature": RepetierTempSensor,
"current_state": RepetierSensor,
"current_job": RepetierJobSensor,
"job_end": RepetierJobEndSensor,
"job_start": RepetierJobStartSensor,
}
entities = []
for info in discovery_info:
printer_name = info["printer_name"]
api = hass.data[REPETIER_API][printer_name]
printer_id = info["printer_id"]
sensor_type = info["sensor_type"]
temp_id = info["temp_id"]
name = f"{info['name']}{SENSOR_TYPES[sensor_type][3]}"
if temp_id is not None:
_LOGGER.debug("%s Temp_id: %s", sensor_type, temp_id)
name = f"{name}{temp_id}"
sensor_class = sensor_map[sensor_type]
entity = sensor_class(api, temp_id, name, printer_id, sensor_type)
entities.append(entity)
add_entities(entities, True)
class RepetierSensor(SensorEntity):
"""Class to create and populate a Repetier Sensor."""
def __init__(self, api, temp_id, name, printer_id, sensor_type):
"""Init new sensor."""
self._api = api
self._attributes = {}
self._available = False
self._temp_id = temp_id
self._name = name
self._printer_id = printer_id
self._sensor_type = sensor_type
self._state = None
self._attr_device_class = SENSOR_TYPES[self._sensor_type][4]
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def extra_state_attributes(self):
"""Return sensor attributes."""
return self._attributes
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return SENSOR_TYPES[self._sensor_type][1]
@property
def icon(self):
"""Icon to use in the frontend."""
return SENSOR_TYPES[self._sensor_type][2]
@property
def should_poll(self):
"""Return False as entity is updated from the component."""
return False
@property
def native_value(self):
"""Return sensor state."""
return self._state
@callback
def update_callback(self):
"""Get new data and update state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Connect update callbacks."""
self.async_on_remove(
async_dispatcher_connect(self.hass, UPDATE_SIGNAL, self.update_callback)
)
def _get_data(self):
"""Return new data from the api cache."""
data = self._api.get_data(self._printer_id, self._sensor_type, self._temp_id)
if data is None:
_LOGGER.debug(
"Data not found for %s and %s", self._sensor_type, self._temp_id
)
self._available = False
return None
self._available = True
return data
def update(self):
"""Update the sensor."""
data = self._get_data()
if data is None:
return
state = data.pop("state")
_LOGGER.debug("Printer %s State %s", self._name, state)
self._attributes.update(data)
self._state = state
class RepetierTempSensor(RepetierSensor):
"""Represent a Repetier temp sensor."""
@property
def native_value(self):
"""Return sensor state."""
if self._state is None:
return None
return round(self._state, 2)
def update(self):
"""Update the sensor."""
data = self._get_data()
if data is None:
return
state = data.pop("state")
temp_set = data["temp_set"]
_LOGGER.debug("Printer %s Setpoint: %s, Temp: %s", self._name, temp_set, state)
self._attributes.update(data)
self._state = state
class RepetierJobSensor(RepetierSensor):
"""Represent a Repetier job sensor."""
@property
def native_value(self):
"""Return sensor state."""
if self._state is None:
return None
return round(self._state, 2)
class RepetierJobEndSensor(RepetierSensor):
"""Class to create and populate a Repetier Job End timestamp Sensor."""
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_TIMESTAMP
def update(self):
"""Update the sensor."""
data = self._get_data()
if data is None:
return
job_name = data["job_name"]
start = data["start"]
print_time = data["print_time"]
from_start = data["from_start"]
time_end = start + round(print_time, 0)
self._state = datetime.utcfromtimestamp(time_end).isoformat()
remaining = print_time - from_start
remaining_secs = int(round(remaining, 0))
_LOGGER.debug(
"Job %s remaining %s",
job_name,
time.strftime("%H:%M:%S", time.gmtime(remaining_secs)),
)
class RepetierJobStartSensor(RepetierSensor):
"""Class to create and populate a Repetier Job Start timestamp Sensor."""
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_TIMESTAMP
def update(self):
"""Update the sensor."""
data = self._get_data()
if data is None:
return
job_name = data["job_name"]
start = data["start"]
from_start = data["from_start"]
self._state = datetime.utcfromtimestamp(start).isoformat()
elapsed_secs = int(round(from_start, 0))
_LOGGER.debug(
"Job %s elapsed %s",
job_name,
time.strftime("%H:%M:%S", time.gmtime(elapsed_secs)),
)
|
|
import numpy as np
import matplotlib
import matplotlib.pylab as plt
import healpy as hp
import string
import yt
import os
import glob
from PIL import Image as PIL_Image
from images2gif import writeGif
from scipy.special import sph_harm,sph_jn
import beatbox
from beatbox.multiverse import Multiverse
# ===================================================================
def set_k_filter(self):
"""
Define a filter over the k space for the modes between kmin and kmax
"""
#Define lower & upper bounds for the filter
Universe.high_k_cutoff = Universe.truncated_nmax*Universe.Deltak
Universe.low_k_cutoff = Universe.truncated_nmin*Universe.Deltak
# Define the filter
low_k_filter = (~(Universe.n < Universe.truncated_nmin)).astype(int)
high_k_filter = (~(Universe.n > Universe.truncated_nmax)).astype(int)
Universe.kfilter = high_k_filter*low_k_filter
return
def populate_response_matrix(self):
"""
Populate the R matrix for the default range of l and n, or
or over the range specified above
"""
truncated_nmax = Universe.truncated_nmax
truncated_nmin = Universe.truncated_nmin
truncated_lmax = Universe.truncated_lmax
truncated_lmin = Universe.truncated_lmin
lms = Universe.lms
kfilter = Universe.kfilter
# Initialize R matrix:
NY = (truncated_lmax + 1)**2 - (truncated_lmin)**2
# Find the indices of the non-zero elements of the filter
ind = np.where(Universe.kfilter>0)
# The n index spans 2x that length, 1st half for the cos coefficients, 2nd half
# for the sin coefficients
NN = 2*len(ind[1])
R_long = np.zeros([NY,NN], dtype=np.complex128)
k, theta, phi = Universe.k[ind], np.arctan2(Universe.ky[ind],Universe.kx[ind]), np.arccos(Universe.kz[ind]/Universe.k[ind])
# We need to fix the 'nan' theta element that came from having ky=0
theta[np.isnan(theta)] = np.pi/2.0
# Get ready to loop over y
y = 0
A = [sph_jn(truncated_lmax,ki)[0] for ki in k]
# Loop over y, computing elements of R_yn
for i in lms:
l = i[0]
m = i[1]
trigpart = np.cos(np.pi*l/2.0)
B = np.asarray([A[ki][l] for ki in range(len(k))])
R_long[y,:NN/2] = 4.0 * np.pi * sph_harm(m,l,theta,phi).reshape(NN/2)*B.reshape(NN/2) * trigpart
trigpart = np.sin(np.pi*l/2.0)
R_long[y,NN/2:] = 4.0 * np.pi * sph_harm(m,l,theta,phi).reshape(NN/2)*B.reshape(NN/2)* trigpart
y = y+1
Universe.R = np.zeros([NY,len(ind[1])], dtype=np.complex128)
Universe.R = np.append(R_long[:,0:len(ind[1])/2], R_long[:,len(ind[1]):3*len(ind[1])/2], axis=1)
return
def get_number_of_fns(self):
'''
Get the number of fn modes.
'''
ind = np.where(Universe.kfilter>0)
fn_length = len(ind[1])
Universe.numfn = fn_length
return fn_length
# ====================================================================
class Universe(object):
"""
A simple model universe in a box.
"""
# ====================================================================
#Initialize the class variables
PIXSCALE = 0.1
BOXSIZE = 4.0
# Real space: define a coordinate grid:
NPIX = int(BOXSIZE/PIXSCALE) + 1
Nj = np.complex(0.0,NPIX)
#x, y, z = np.mgrid[-BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj, -BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj, -BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj]
x, y, z = np.mgrid[-BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj, -BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj, -BOXSIZE/2.0+BOXSIZE/(2*float(NPIX)):BOXSIZE/2.0-BOXSIZE/(2*float(NPIX)):Nj]
print beatbox.Multiverse.truncated_nmin
# Define the truncatad range of modes (in n and l) we want in our Universe:
try:
truncated_nmax = beatbox.Multiverse.truncated_nmax
truncated_nmin = beatbox.Multiverse.truncated_nmin
truncated_lmax = beatbox.Multiverse.truncated_lmax
truncated_lmin = beatbox.Multiverse.truncated_lmin
except NameError:
truncated_nmax = 2
truncated_nmin = 1
truncated_lmax = 8
truncated_lmin = 0
# If only truncated_lmax is provided, calculated the largest truncated_nmax we can reconstruct
if (truncated_lmax is not None) and (truncated_nmax is None):
truncated_nmax = int(np.floor((3.0*(truncated_lmax+1)**2.0/(4.0*np.pi))**(1.0/3.0)))
# Else define a default value for truncated_nmax if not already done
elif truncated_nmax is None:
truncated_nmax = 6
# If only truncated_nmax is provided, calculated the truncated_lmax needed for no information
# from the 3D map to be lost
if (truncated_nmax is not None) and (truncated_lmax is None):
truncated_lmax = int(np.ceil(-0.5+2.0*truncated_nmax**(3.0/2.0)*np.sqrt(np.pi/3.0)))
# Make a y_max-long tupple of l and m pairs
if None not in (truncated_lmin, truncated_lmax):
lms = [(l, m) for l in range(truncated_lmin,truncated_lmax+1) for m in range(-l, l+1)]
# Fourier space: define a coordinate grid:
# The nmax we need for the resolution we want in our Universe is:
nmax = int(BOXSIZE/(2*PIXSCALE))
Deltak = 2.0*np.pi/BOXSIZE
kmax = nmax*Deltak
kx, ky, kz = np.meshgrid(np.linspace(-kmax,kmax,NPIX),np.linspace(-kmax,kmax,NPIX),np.linspace(-kmax,kmax,NPIX), indexing='ij')
k = np.sqrt(np.power(kx, 2)+np.power(ky,2)+np.power(kz,2))
nx, ny, nz = np.meshgrid(np.linspace(-nmax,nmax,NPIX),np.linspace(-nmax,nmax,NPIX),np.linspace(-nmax,nmax,NPIX), indexing='ij');
n = np.sqrt(np.power(nx, 2)+np.power(ny,2)+np.power(nz,2));
# Define the computer Fourier coordinates, used for iFFT
kmax_for_iFFt = 1/(2*PIXSCALE)
Deltak_for_iFFT = (1/BOXSIZE)
kx_for_iFFT = nx/BOXSIZE
ky_for_iFFT = ny/BOXSIZE
kz_for_iFFT = nz/BOXSIZE
# Define filter in k-space, that contains the modes we want:
kfilter = None
set_Universe_k_filter = set_k_filter
#Define and populate the R matrix:
R = None
populate_Universe_R = populate_response_matrix
numfn = None
get_numfn = get_number_of_fns
#==========================================================
def __init__(self):
# The potential map (pure real):
self.phi = self.x * 0.0
# The CMB temperature map:
self.Tmap = None
self.NSIDE = None
return
def __str__(self):
return "an empty model universe, containing a grid 41x41x41 pixels (and corresponding k grid in Fourrier space), a k filter and the corresponding R matrix mapping between those k values and a range of l (given by the Multiverse)"
# ----------------------------------------------------------------
def read_in_CMB_T_map(self,from_this=None):
if from_this is None:
print "No CMB T map file supplied."
self.Tmapfile = None
else:
self.Tmapfile = from_this
self.Tmap = hp.read_map(from_this)
self.NSIDE = hp.npix2nside(len(self.Tmap))
return
def write_CMB_T_map(self, from_this=None, to_this='my_map'):
if from_this is None:
print "No CMB T map supplied"
else:
self.Tmapfile=to_this+".fits"
hp.write_map(self.Tmapfile, from_this)
return
def show_CMB_T_map(self,Tmap=None, max=100, title = "CMB graviational potential fluctuations as seen from inside the LSS", from_perspective_of = "observer", cmap=None):
if Tmap is None:
self.NSIDE = 256
self.Tmap = hp.alm2map(self.alm,self.NSIDE)
else:
self.Tmap = Tmap
if from_perspective_of == "observer":
dpi = 300
figsize_inch = 60, 40
fig = plt.figure(figsize=figsize_inch, dpi=dpi)
# Sky map:
hp.mollview(self.Tmap, rot=(-90,0,0), min=-max, max=max, title=title + ", $\ell_{max}=$%d " % self.truncated_lmax, cmap=cmap, unit="$\mu$K")
plt.savefig(title+".png", dpi=dpi, bbox_inches="tight")
else:
# Interactive "external" view ([like this](http://zonca.github.io/2013/03/interactive-3d-plot-of-sky-map.html)) pass
# beatbox.zoncaview(self.Tmap)
# This did not work, sadly. Maybe we can find a 3D
# spherical surface plot routine using matplotlib? For
# now, just use the healpix vis.
R = (0.0,0.0,0.0) # (lon,lat,psi) to specify center of map and rotation to apply
hp.orthview(self.Tmap,rot=R,flip='geo',half_sky=True,title="CMB graviational potential fluctuations as seen from outside the LSS, $\ell_{max}$=%d" % self.truncated_lmax)
print "Ahem - we can't visualize maps on the surface of the sphere yet, sorry."
return
def decompose_T_map_into_spherical_harmonics(self,lmax=None):
"""
See healpy documentation at https://healpy.readthedocs.org/en/latest/generated/healpy.sphtfunc.map2alm.html
self.alm is a 1D numpy array of type=complexx128.
Indexing is described at https://healpy.readthedocs.org/en/latest/generated/healpy.sphtfunc.Alm.html
"""
if lmax is None:
self.lmax = 3*self.NSIDE - 1
else:
self.lmax = lmax
self.mmax = self.lmax
self.alm = hp.sphtfunc.map2alm(self.Tmap,lmax=self.lmax,mmax=self.mmax)
return
def show_one_spherical_harmonic_of_CMB_T_map(self,l=1,m=1,max=20):
"""
To do this we need to make a healpy-format alm array, with
just one non-zero complex value in it, which we extract
from the parent alm array. Since healpy only returns positive
m coefficients, we just ask to see component with that |m|.
"""
projected_alm = self.alm * 0.0
i = hp.Alm.getidx(self.lmax, l, np.abs(m)) # Note |m| here
projected_alm[i] = self.alm[i]
projected_map = hp.alm2map(projected_alm,self.NSIDE)
hp.mollview(projected_map)
return
def show_lowest_spherical_harmonics_of_CMB_T_map(self,lmax=10,max=20, cmap=None, title=None):
"""
To do this, we construct a healpy-formatted alm array based on
a subset of the parent one, again observing the positive m-only
convention.
"""
truncated_alm = self.alm * 0.0
i = []
for l in range(lmax+1):
for m in range(l+1):
i.append(hp.Alm.getidx(self.lmax, l, m))
print "Displaying sky map of the l = ",l," and lower spherical harmonics only..."
truncated_alm[i] = self.alm[i]
self.truncated_map = hp.alm2map(truncated_alm, 256)
dpi = 300
figsize_inch = 60, 40
fig = plt.figure(figsize=figsize_inch, dpi=dpi)
hp.mollview(self.truncated_map,rot=(-90,0,0),min=-max,max=max, cmap=cmap, unit="$10^{-6}c^2$", title=title)
plt.savefig("lmax"+str(lmax)+".png", dpi=dpi, bbox_inches="tight")
return
def get_alm(self,l=None,m=None,lms=None):
"""
hp.map2alm only returns the positive m coefficients - we need
to derive the negative ones ourselves if we are going to
do anything with them outside healpy. See
http://stackoverflow.com/questions/30888908/healpy-map2alm-function-does-not-return-expected-number-of-alm-values?lq=1
for discussion.
"""
if (l is None or m is None) and lms is None:
return None
elif l is None and m is None:
ay = np.zeros(len(lms),dtype=np.complex128)
for i in lms:
if i[1] >= 0:
index = hp.Alm.getidx(self.lmax, i[0], i[1])
prefactor = 1.0
value = self.alm[index]
else:
index = hp.Alm.getidx(self.lmax, i[0], -i[1])
prefactor = (-1.0)**i[1]
value = np.conjugate(self.alm[index])
ay[i[0]**2+i[0]+i[1]-(lms[0][0])**2] = prefactor * value
return ay
elif m >= 0:
index = hp.Alm.getidx(self.lmax, l, m)
prefactor = 1.0
value = self.alm[index]
else:
index = hp.Alm.getidx(self.lmax, l, -m)
prefactor = (-1.0)**m
value = np.conjugate(self.alm[index])
return prefactor * value
def put_alm(self,value,l=None,m=None,lms=None):
'''
Re-arranges the value or vector of a_y values into the
correct order to be used by healpy as a_lm.
If lms is given, len(lms) must equal len(value), while
if l and m are specified, value must be a scalar.
'''
if (l is None or m is None) and lms is None:
return None
elif l is None and m is None:
if len(lms) != len(value):
print 'a_y and (l, m) are of unequal lenghts, cannot proceed'
return
index = np.zeros(len(lms), dtype=int)
count = 0
for i in lms:
index[count] = hp.Alm.getidx(max(lms)[0], i[0], i[1])
count = count+1
lmax = max(lms)[0]
mmax = max(lms)[1]
self.alm = np.zeros(mmax*(2*lmax+1-mmax)/2+lmax+1, dtype=np.complex128)
# Throw away the negative indices (which correspond to the negative m's)
# since the maps are real, negative m coefficients can be deduced
# from the positive ones.
index_positive = index[~(index<0)]
ind1 = np.arange(len(value))
self.alm[index_positive] = value[ind1[~(index<0)]]
return
index = hp.Alm.getidx(self.truncated_lmax, l, m)
self.alm[index] = value
return
def alm2ay(self, truncated_lmax=None, truncated_lmin=None, usedefault=1):
"""
Read its own a_lm array, and return the corresponding
a_y array (in the correct order).
The conversion between y index and l_max is:
(l+1)**2-(2l+1)/2 +1/2 +m = l**2+2*l+1-l-1/2+1/2+m = l**2+l+1+m
and the first element has index 0 so subtract 1, so
y=l**2+l+m is the index, need to subtract the elements before lmin
so y=l**2+l+m-(lmin+1)**2
"""
if usedefault == 1:
truncated_lmax = self.truncated_lmax
truncated_lmin = self.truncated_lmin
lms = self.lms
# Make a y_max-long tupple of l and m pairs
else:
lms = [(l, m) for l in range(truncated_lmin,truncated_lmax+1) for m in range(-l, l+1)]
ay = np.zeros((truncated_lmax+1)**2-(truncated_lmin)**2,dtype=np.complex128)
ay = self.get_alm(lms=lms)
self.ay=ay
return ay
def ay2alm(self, ay,truncated_lmax=None, truncated_lmin=None, usedefault=1):
"""
Repackage the a_y array into healpy-readable a_lm's
"""
if usedefault == 1:
truncated_lmax = self.truncated_lmin
truncated_lmin = self.truncated_lmax
lms=self.lms
# Make a y_max-long tupple of l and m pairs
else:
lms = [(l, m) for l in range(truncated_lmin,truncated_lmax+1) for m in range(-l, l+1)]
self.put_alm(ay, lms=lms)
return
def ay2ayreal_for_inference(self,value):
'''
Reorganize the ays so that only independent measurements are kept,
and split the real and imaginary values into different elements.
The negative m values ara dependent on the positive m, so they must
be discarted, and all but m=0 values are complex.
Therefore, we replace the positive m values by their respective real
part, and the negative m values by the imaginary part of the
corresponding positive m. This way, each l retains 2l+1 independent
real degrees of freedom.
'''
#Select the m values out the the lms tupples
m = np.array([m[1] for m in self.lms])
#Find the indices of the positive ms
pos_ind = (m>0)
#Find the indices of the m=0
zero_ind = (m==0)
#Find the indices of the negative ms
neg_ind = (m<0)
ay_real = np.zeros(len(self.lms), dtype=np.float)
ay_real[pos_ind] = value[pos_ind].real
ay_real[neg_ind] = value[pos_ind].imag
ay_real[zero_ind] = value[zero_ind].astype(np.float)
return ay_real
def ayreal2ay_for_mapping(self,ay_real):
#Select the m values out the the lms tupples
m = np.array([m[1] for m in self.lms])
#Find the indices of the positive ms
pos_ind = (m>0)
#Find the indices of the m=0
zero_ind = (m==0)
#Find the indices of the negative ms
neg_ind = (m<0)
ay = np.zeros(len(self.lms), dtype=np.complex128)
ay[pos_ind] = ay_real[pos_ind].real+1j*ay_real[neg_ind]
ay[neg_ind] = ((ay_real[pos_ind].T-1j*ay_real[neg_ind].T) * (-1.)**m[neg_ind]).T
ay[zero_ind] = ay_real[zero_ind].astype(np.complex128)
self.ay=ay
return
def write_out_spherical_harmonic_coefficients(self,lmax=10):
outfile = string.join(string.split(self.Tmapfile,'.')[0:-1],'.') + '_alm_lmax' + str(lmax) + '.txt'
f = open(outfile, 'w')
f.write("# l m alm_real alm_imag\n")
count = 0
for l in range(lmax+1):
for m in range(-l,l+1):
alm = self.get_alm(l,m)
line = " {0:d} {1:d} {2:g} {3:g}\n".format(l,m,float(np.real(alm)),float(np.imag(alm)))
f.write(line)
count += 1
f.close()
print count,"alm's (lmax =",lmax,") written to",outfile
return
# ----------------------------------------------------------------
def populate_instance_response_matrix(self,truncated_nmax=None, truncated_nmin=None,truncated_lmax=None, truncated_lmin=None, usedefault=1):
"""
Populate the R matrix for the default range of l and n, or
or over the range specified above
"""
if usedefault == 1:
truncated_nmax = self.truncated_nmax
truncated_nmin = self.truncated_nmin
truncated_lmax = self.truncated_lmax
truncated_lmin = self.truncated_lmin
lms = self.lms
kfilter = self.kfilter
else:
low_k_cutoff = truncated_nmin*self.Deltak
high_k_cutoff = truncated_nmax*self.Deltak
self.set_instance_k_filter(truncated_nmax=truncated_nmax,truncated_nmin=truncated_nmin)
lms = [(l, m) for l in range(truncated_lmin,truncated_lmax+1) for m in range(-l, l+1)]
# Initialize R matrix:
NY = (truncated_lmax + 1)**2-(truncated_lmin)**2
# Find the indices of the non-zero elements of the filter
ind = np.where(self.kfilter>0)
# The n index spans 2x that length, 1st half for the cos coefficients, 2nd half
# for the sin coefficients
NN = 2*len(ind[1])
R_long = np.zeros([NY,NN], dtype=np.complex128)
# In case we need n1, n2, n3 at some point...:
# n1, n2, n3 = self.kx[ind]/self.Deltak , self.ky[ind]/self.Deltak, self.kz[ind]/self.Deltak
k, theta, phi = self.k[ind], np.arctan2(self.ky[ind],self.kx[ind]), np.arccos(self.kz[ind]/self.k[ind])
# We need to fix the 'nan' theta element that came from having ky=0
theta[np.isnan(theta)] = np.pi/2.0
# Get ready to loop over y
y = 0
A = [sph_jn(truncated_lmax,ki)[0] for ki in k]
# Loop over y, computing elements of R_yn
for i in lms:
l = i[0]
m = i[1]
trigpart = np.cos(np.pi*l/2.0)
B = np.asarray([A[ki][l] for ki in range(len(k))])
R_long[y,:NN/2] = 4.0 * np.pi * sph_harm(m,l,theta,phi).reshape(NN/2)*B.reshape(NN/2) * trigpart
trigpart = np.sin(np.pi*l/2.0)
R_long[y,NN/2:] = 4.0 * np.pi * sph_harm(m,l,theta,phi).reshape(NN/2)*B.reshape(NN/2)* trigpart
y = y+1
self.R = np.zeros([NY,len(ind[1])], dtype=np.complex128)
self.R = np.append(R_long[:,0:len(ind[1])/2], R_long[:,len(ind[1]):3*len(ind[1])/2], axis=1)
return
# ----------------------------------------------------------------
def load_mathematica_data(self):
f= open("data/f_ns.txt", 'r')
data = f.read()
f.close()
columns = data.split()
f_n=np.zeros(len(columns))
for count in range(int(len(columns))):
f_n[count] = float(columns[count])
g= open("data/fncoordinates.txt", 'r')
data2 = g.read()
g.close()
columns2 = data2.split()
k_vec=np.zeros(len(columns2))
for count2 in range(int(len(columns2))):
k_vec[count2] = float(columns2[count2])
k_x=k_vec[0::3]
k_y=k_vec[1::3]
k_z=k_vec[2::3]
return f_n, k_x, k_y, k_z
# ----------------------------------------------------------------
def set_instance_k_filter(self,truncated_nmax=None,truncated_nmin=None):
"""
Define a filter over the k space for the modes between kmin and kmax
"""
#Make sure we have lower & upper bounds for the filter
if truncated_nmax is None:
self.high_k_cutoff = self.truncated_nmax*self.Deltak
else:
self.truncated_nmax = truncated_nmax
self.high_k_cutoff = truncated_nmax*self.Deltak
if truncated_nmin is None:
self.low_k_cutoff=self.truncated_nmin*self.Deltak
else:
self.truncated_nmin = truncated_nmin
self.low_k_cutoff = truncated_nmin*self.Deltak
# Define the filter
low_k_filter = (~(self.n < self.truncated_nmin)).astype(int)
high_k_filter = (~(self.n > self.truncated_nmax)).astype(int)
self.kfilter = high_k_filter*low_k_filter
return
def generate_a_random_potential_field(self,truncated_nmax=6,truncated_nmin=2,n_s=0.97,kstar=0.02,PSnorm=2.43e-9,Pdist=1,Pmax=2*np.pi,Pvar=0.0, printout=1, do_fft=1):
#is this realy necessary since filter def moved up in __init__ function??
# Set the k filter:
if (beatbox.Universe.kfilter is None) or (truncated_nmax != beatbox.Universe.truncated_nmax) or (truncated_nmin != beatbox.Universe.truncated_nmin):
self.set_instance_k_filter(truncated_nmax=truncated_nmax,truncated_nmin=truncated_nmin)
# Define the constants that go in the power spectrum
# scalar spectral index
self.n_s = n_s
# power spectrum normalization
self.PSnorm = PSnorm
# Change units of the pivot scale kstar from Mpc^-1 to normalize the smallest k
# mode to 1 (i.e. the radius of the CMB photosphere at 13.94Gpc)
self.kstar = kstar*1.394e4
# Draw Gaussian random Fourier coefficients with a k^{-3+(n_s-1)} power spectrum:
self.Power_Spectrum = self.PSnorm*10000*np.power((self.k/self.kstar) ,(-3+(self.n_s-1)))
self.Power_Spectrum[np.isinf(self.Power_Spectrum)] = 10**-9
fn_Norm = np.random.rayleigh(np.sqrt(self.Power_Spectrum/2.))*self.kfilter
# Draw the phases for the modes: use p=1 for a uniform distribution in [0,Pmax],
# and p=0 for a Gaussian distribution with mean Pmax and variance Pvar
self.Pdist = Pdist
self.Pvar = Pvar
self.Pmax = Pmax
if Pdist == 1:
fn_Phase = np.random.uniform(0, Pmax*np.ones(self.k.shape,dtype=np.float_) )*self.kfilter
else:
fn_Phase = np.random.normal(Pmax, np.sqrt(Pvar)*np.ones(self.k.shape,dtype=np.float_) )*self.kfilter
self.fn_Phase = fn_Phase
self.fn_Norm = fn_Norm
# Need to ensure that f_-k = f^*_k
# FT = fn_R + fn_I*1j
FT = fn_Norm*np.cos(fn_Phase)+fn_Norm*np.sin(fn_Phase)*1j
self.FT = FT
X = np.concatenate((np.append(FT[:self.nmax, self.nmax ,self.nmax ], 0), np.conjugate(FT[:self.nmax, self.nmax ,self.nmax ])[::-1]), axis=0)
Z = np.concatenate( ( FT[:, :self.nmax ,self.nmax ], X.reshape(2*self.nmax+1,1), np.conjugate(FT[:, :self.nmax ,self.nmax ])[::-1,::-1]), axis=1 )
self.fngrid = np.concatenate( (FT[:,:,:self.nmax], Z.reshape(2*self.nmax+1,2*self.nmax+1,1), np.conjugate( FT[:,:,:self.nmax])[::-1,::-1,::-1] ), axis=2 )
if printout is 1:
print "Generated ",self.fngrid[~(self.fngrid[:,:,:] == 0)].size," potential Fourier coefficients"
if Pdist == 1:
print " with phases uniformly distributed between 0 and ", Pmax
else:
print " with phases sampled from a Gaussian distribution with mean ", Pmax," and variance ", Pvar
# Evaluate it on our Phi grid:
if do_fft == 1:
self.evaluate_potential_given_fourier_coefficients(printout=printout)
return
def evaluate_potential_given_fourier_coefficients(self,printout=1):
self.phi = np.zeros(self.x.shape,dtype=np.float_)
ComplexPhi = np.zeros(self.x.shape,dtype=np.complex128)
#THIS PART DID THE iFFT MANUALLY
# for i in range((2*self.nmax+1)**3):
# phase = self.kx.reshape((2*self.nmax+1)**3,1)[i] * self.x + self.ky.reshape((2*self.nmax+1)**3,1)[i] * self.y + self.kz.reshape((2*self.nmax+1)**3,1)[i] * self.z
# ComplexPhi += self.fngrid.reshape((2*self.nmax+1)**3,1)[i] * (np.cos(phase)+np.sin(phase)*1j)
#Now use iFFT to invert the Fourier coefficients f_n to a real space potential
ComplexPhi = np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(self.fngrid* self.Deltak_for_iFFT**3)))
# Throw out the residual imaginary part of the potential [< O(10^-16)]
self.phi = ComplexPhi.real*(self.kx_for_iFFT.shape[0])**3
if printout is 1:
print "Built potential grid, with dimensions ",self.phi.shape,\
" and mean value ", round(np.mean(self.phi),4),"+/-",round(np.std(self.phi),7)
return
def rearrange_fn_from_grid_to_vector(self):
'''
It's easiest to generate a potential from the prior on a 3D
grid, so we can use the iFFT. For the linear algebra in the
inference, we need the fourier coefficients arranged in a
vector.
'''
ind = np.where(self.kfilter>0)
fn_long = np.zeros(2*len(ind[1]))
fn_long[:len(ind[1])] = (self.fngrid[ind]).real
fn_long[len(ind[1]):] = (self.fngrid[ind]).imag
self.fn = np.zeros(len(ind[1]))
self.fn[:len(ind[1])/2] = fn_long[:len(ind[1])/2]
self.fn[len(ind[1])/2:] = fn_long[len(ind[1]):3*len(ind[1])/2]
return
def rearrange_fn_from_vector_to_grid(self):
'''
It's easiest to generate a potential from the prior on a 3D
grid, so we can use the iFFT. For the linear algebra in the
inference, we need the fourier coefficients arranged in a
vector.
'''
self.fn=np.squeeze(self.fn)
ind = np.where(self.kfilter>0)
fn_long = np.zeros((2*len(ind[1])))
fn_long[:len(ind[1])/2] = self.fn[:len(ind[1])/2]
fn_long[len(ind[1])-1:len(ind[1])/2-1 :-1] = self.fn[:len(ind[1])/2]
fn_long[len(ind[1]):3*len(ind[1])/2] = self.fn[len(ind[1])/2:]
fn_long[:3*len(ind[1])/2-1 :-1] = -self.fn[:len(ind[1])/2]
self.fngrid = np.zeros(self.kfilter.shape, dtype=np.complex128)
self.fngrid[ind]=fn_long[:len(ind[1])] + 1j*fn_long[len(ind[1]):]
return
def get_ordered_fn_indices(self):
'''
Get the indices of the Fourrier modes in the vector used
for the inference and sort them by increasing k value.
'''
ind = np.where(self.kfilter>0)
k, theta, phi = self.k[ind], np.arctan2(self.ky[ind], self.kx[ind]), np.arccos(self.kz[ind]/self.k[ind])
kvec_long = np.zeros(2*len(ind[1]))
kvec_long[:len(ind[1])] = k
kvec_long[len(ind[1]):] = k
kvec = np.zeros(len(ind[1]))
kvec[:len(ind[1])/2] = kvec_long[:len(ind[1])/2]
kvec[len(ind[1])/2:] = kvec_long[len(ind[1]):3*len(ind[1])/2]
ind_for_ordered_fn = np.argsort(kvec)
return ind_for_ordered_fn
def get_instance_numfn(self):
'''
Get the number of fn modes.
'''
ind = np.where(self.kfilter>0)
fn_length = len(ind[1])
return fn_length
def transform_3D_potential_into_alm(self, truncated_nmax=None, truncated_nmin=None,truncated_lmax=None, truncated_lmin=None, usedefault=1, fn=None):
'''
From the f_n on a 3D grid, rearrange the Fourier coefficients
in a vector and generate the R matrix. From these, calculate the a_y
and finally rearrange them in a a_lm vector useable by healpy to
make a T map.
The method can do this either for the harmonics correcponding to the
full range of n values of the 3D potential (if usedefault=1), or else
for the specified values. If truncated_nmax is too large for the
specified truncated_lmax, some information will be lost.
'''
# Make a vector out of the fn grid of Fourier coefficients
if fn is None:
self.rearrange_fn_from_grid_to_vector()
if usedefault == 1:
# Populate the R matrix
if beatbox.Universe.R is None:
self.populate_instance_response_matrix(truncated_nmax=truncated_nmax, truncated_nmin=truncated_nmin,truncated_lmax=truncated_lmax, truncated_lmin=truncated_lmin,usedefault=usedefault)
# Calculate the a_y matrix
ay = np.dot(self.R,self.fn)
self.ay = ay
# Reorganize a_y into a_lm
self.ay2alm(ay, usedefault=usedefault)
else:
# Populate the R matrix
self.populate_instance_response_matrix(truncated_nmax=truncated_nmax, truncated_nmin=truncated_nmin,truncated_lmax=truncated_lmax, truncated_lmin=truncated_lmin, usedefault=0)
# Calculate the a_y matrix
ay = np.dot(self.R,self.fn)
self.ay = ay
# Reorganize a_y into a_lm
self.ay2alm(ay,truncated_lmax=truncated_lmax, truncated_lmin=truncated_lmin, usedefault=0)
return
def show_potential_with_yt(self,output='',angle=np.pi/4.0, N_layer=5, alpha_norm=5.0, cmap='BrBG', Proj=0, Slice=0, gifmaking=0, show3D=0, continoursshade = 50.0, boxoutput='scratch/opac_phi3D_Gauss_phases_mean', slicerad=1):
"""
Visualize the gravitational potential using yt. We're after something
like http://yt-project.org/doc/_images/vr_sample.jpg - described
at http://yt-project.org/doc/visualizing/volume_rendering.html
"""
# Load the potential field into a yt data structure,
# offsetting such that minimum value is zero.
# First get extrema of phi array:
mi = np.min(self.phi)
ma = np.max(self.phi)
print mi, ma
# Symmetrize to put zero at center of range:
ma = np.max(np.abs([mi,ma]))
mi = -ma
# Offset to make minimum value zero:
offset = ma
ma = 2.0*ma
mi = 0.0
# Size of the box containing the phi
# Physical -2 to 2 box
# bbox = np.array([[-2, 2], [-2, 2], [-2, 2]])
# Physical box from the iFFT
bbox = np.array([[np.min(self.x), np.max(self.x)], [np.min(self.y), np.max(self.y)], [np.min(self.z), np.max(self.z)]])
# Apply offset and store phi array in a yt data structure,
# I'm putting some random density units here
# (seems to be needed to display properly):
xnorm=np.sqrt(self.x**2 + self.y**2 + self.z**2);
if (Slice is not 1) and (Proj is not 1):
indgtr = (~(xnorm < 0.9)).astype(int)
indsmlr = (~(xnorm > 1.1)).astype(int)
ind = indgtr*indsmlr
sphere = np.ones(self.phi.shape)
sphere = 5.*ind
#sphere = 0.0007*ind
negsphere = -self.phi*ind
else:
sphere = np.zeros(self.phi.shape)
negsphere = np.zeros(self.phi.shape)
#self.phi[0,0,200]=-40
#self.phi[-1,-1,200]=20
#phiprime=self.phi
#phiprime[np.where(self.phi<-18)]=-20
# ds = yt.load_uniform_grid((dict(density=(self.phi+sphere, 'g/cm**3'), Xnorm=(xnorm, 'g/cm**3'))), self.phi.shape, bbox=bbox, nprocs=1)
ds = yt.load_uniform_grid((dict(density=(self.phi+offset+sphere, 'g/cm**3'), Xnorm=(xnorm, 'g/cm**3'))), self.phi.shape, bbox=bbox, nprocs=1)
field = 'density'
#Check that the loaded field is recognized by yt
# print ds.field_list
# Here's Sam's gist, from https://gist.github.com/samskillman/0e574d1a4f67d3a3b1b1
# im, sc = yt.volume_render(ds, field='phi')
# sc.annotate_domain(ds)
# sc.annotate_axes()
# im = sc.render()
# im.write_png(output, background='white')
# volume_render is not yet available, though.
# Following the example at http://yt-project.org/doc/visualizing/volume_rendering.html
# Set minimum and maximum of plotting range (in proper yt units):
dd = ds.all_data()
mi2, ma2 = dd.quantities.extrema(field)
#print "Extrema of ds phi:",mi,ma, mi2, ma2
use_log = False
# Instantiate the ColorTransferFunction.
# tf = yt.ColorTransferFunction((mi2, ma2))
# tf.grey_opacity=True
# Add some isopotential surface layers:
# tf.add_layers(N_layer, 0.0000005*(ma2 - mi2) / N_layer, alpha=alpha_norm*np.ones(N_layer,dtype='float64'), colormap = cmap)
# Instantiate the ColorTransferFunction using the transfer function helper.
from IPython.core.display import Image
from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper
tfh = yt.TransferFunctionHelper(ds)
tfh.set_field('density')
tfh.set_log(False)
tfh.set_bounds()
tfh.build_transfer_function()
tfh.tf.grey_opacity=True
#For small units, wide Gaussians:
tfh.tf.add_layers(N_layer, w=0.0005*(ma2 - mi2) /N_layer, mi=0.2*ma, ma=ma-0.2*ma, alpha=alpha_norm*np.ones(N_layer,dtype='float64'), col_bounds=[0.2*ma,ma-0.2*ma] , colormap=cmap)
#For big units, small Gaussians
#tfh.tf.add_layers(N_layer, w=0.00000005*(ma2 - mi2) /N_layer, mi=0.3*ma, ma=ma-0.2*ma, alpha=alpha_norm*np.ones(N_layer,dtype='float64'), col_bounds=[0.3*ma,ma-0.3*ma] , colormap=cmap)
if (Slice is not 1) and (Proj is not 1):
tfh.tf.map_to_colormap(5., 10.0, colormap='jet', scale=continoursshade)
#tfh.tf.map_to_colormap(0.001, 0.0014, colormap='jet', scale=continoursshade)
#tfh.tf.add_layers(1, w=0.001*ma2, mi=0.0108, ma=0.012, colormap='Pastel1', col_bounds=[0.01, 0.012])
# Check if the transfer function captures the data properly:
densityplot1 = tfh.plot('densityplot1')
densityplot2 = tfh.plot('densityplot2', profile_field='cell_mass')
# Set up the camera parameters: center, looking direction, width, resolution
c = (np.max(self.x)+np.min(self.x))/2.0
Lx = np.sqrt(2.0)*np.cos(angle)
Ly = np.sqrt(2.0)*np.sin(angle)
Lz = 0.75
L = np.array([Lx, Ly, Lz])
W = ds.quan(1.6, 'unitary')
N = 512
# Create a camera object
cam = ds.camera(c, L, W, N, tfh.tf, fields=[field], log_fields = [use_log], no_ghost = False)
cam.transfer_function = tfh.tf
if self.Pdist == 1:
im1 = cam.snapshot('scratch/opac_phi3D_Uniform_phases_0-'+str(self.Pmax)+'.png', clip_ratio=5)
else:
im1 = cam.snapshot('scratch/'+boxoutput+str(self.Pmax)+'_var'+str(self.Pvar)+'.png', clip_ratio=5)
im1.write_png('scratch/transparent_bg.png', background=[0.,0.,0.,0.])
im1.write_png('scratch/white1_bg.png', background=[1.,1.,1.,1.])
nim = cam.draw_grids(im1)
#im=cam.snapshot
#nim = cam.draw_box(im, np.array([0.25,0.25,0.25]), np.array([0.75,0.75,0.75]))
if show3D == 1:
nim.write_png(boxoutput)
cam.show()
# Make a color bar with the colormap.
# cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
self.cam = cam
if gifmaking == 1:
# Add the domain box to the image:
nim = cam.draw_domain(im1)
# Save the image to a file:
nim.write_png(output)
if Proj == 1:
s = yt.ProjectionPlot(ds, "z", "density")
#this still doesnt work :
s.annotate_sphere([0., 0., 0.], radius=(1, 'kpc'),
circle_args={'color':'red', "linewidth": 3})
s.show()
s.save('phi')
if Slice == 1:
w = yt.SlicePlot(ds, "z", "density", center=[0,0,slicerad])
w.set_cmap(field="density", cmap=cmap)
circrad = np.sqrt(1-slicerad*slicerad)
w.annotate_sphere([0., 0., 0.], radius=(circrad, 'cm'),
circle_args={'color':'red',"linewidth": 3})
w.show()
w.save('phi')
return
def show_potential_from_all_angles_with_yt(self,output='phi.gif'):
# Create 36 frames for the animated gif, one for each angle:
steps = 36
angles = np.arange(steps)*np.pi/np.float(steps)/2.0+np.pi/4
# current bug: the frames jump at pi/4, 3pi/4 etc..
# book-keeping:
folder = 'frames/'
os.system("rm -rf "+folder)
os.system("mkdir -p "+folder)
# Now create the individual frames:
for k,angle in enumerate(angles):
framefile = folder+str(k).zfill(3)
print "Making frame",k,": ",framefile,"at viewing angle",angle
self.show_potential_with_yt(output=framefile,angle=angle, N_layer=6, alpha_norm=5.0, cmap='BrBG', Proj=0, Slice=0, gifmaking=1)
# Create an animated gif of all the frames:
images = [PIL_Image.open(framefile) for framefile in glob.glob(folder+'*.png')]
writeGif(output, images, duration=0.2)
return
def make_gif_from_frames_with_yt(self,folder='../frames/', output='phi.gif'):
# Create an animated gif of all the frames:
images = [PIL_Image.open(framefile) for framefile in glob.glob(folder+'*.png')]
writeGif(output, images, duration=0.2)
return
# ====================================================================
"""
Response matrix from Roger's mathematica notebook:
# Construct the klst:
nmax = 6;
klst = {};
Do[
If[0 < n1^2 + n2^2 + n3^2 <= nmax^2, klst = Append[klst, {n1, n2, n3}]],
{n1, -nmax, nmax}, {n2, -nmax, nmax}, {n3, -nmax, nmax}
];
NN = Length[klst];
# Set size of box, via separation in k space:
[CapitalDelta]k = .5 [Pi];
# Construct llst, an array of l's and m's for use in Spherical Harmonics:
# Note that the monopole and dipole are ignored!
lmax = 10;
llst = {};
Do[
If[1 < l <= lmax, llst = Append[llst, {l, m}]], {l, 2, lmax}, {m, -l, l}
];
llst; # Not sure what this line does.
L = Length[llst];
# Construct R matrix:
R = Chop[ # Clean out rounding errors (esp in imaginary parts)
Table[4. [Pi] I^llst[[y, 1]] # i^l - imaginary i!
SphericalHarmonicY[llst[[y, 1]],
llst[[y, 2]],
ArcCos[klst[[n, 3]]/Norm[klst[[n]]]], # theta'
If[klst[[n, 1]] == klst[[n, 2]] == 0, 0, ArcTan[klst[[n, 1]], klst[[n, 2]]]]] # phi'
[Conjugate] # Take complex conjugate of the Ylm
SphericalBesselJ[llst[[y, 1]], [CapitalDelta]k Norm[klst[[n]]]], # Norm gives the length of the k vector
{y, 1, L}, # for y in range 1 to L
{n, 1, NN} # for n in range 1 to NN
] # End of Table command
];
# Write it out:
(*Export["myn.txt",R]*)
"""
|
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from niftynet.application.base_application import BaseApplication
from niftynet.engine.application_factory import \
ApplicationNetFactory, InitializerFactory, OptimiserFactory
from niftynet.engine.application_variables import \
CONSOLE, NETWORK_OUTPUT, TF_SUMMARIES
from niftynet.engine.sampler_grid_v2 import GridSampler
from niftynet.engine.sampler_resize_v2 import ResizeSampler
from niftynet.engine.sampler_uniform_v2 import UniformSampler
from niftynet.engine.sampler_weighted_v2 import WeightedSampler
from niftynet.engine.sampler_balanced_v2 import BalancedSampler
from niftynet.engine.windows_aggregator_grid import GridSamplesAggregator
from niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator
from niftynet.io.image_reader import ImageReader
from niftynet.layer.crop import CropLayer
from niftynet.layer.histogram_normalisation import \
HistogramNormalisationLayer
from niftynet.layer.loss_regression import LossFunction
from niftynet.layer.mean_variance_normalisation import \
MeanVarNormalisationLayer
from niftynet.layer.pad import PadLayer
from niftynet.layer.post_processing import PostProcessingLayer
from niftynet.layer.rand_flip import RandomFlipLayer
from niftynet.layer.rand_rotation import RandomRotationLayer
from niftynet.layer.rand_spatial_scaling import RandomSpatialScalingLayer
from niftynet.layer.rgb_histogram_equilisation import \
RGBHistogramEquilisationLayer
from niftynet.evaluation.regression_evaluator import RegressionEvaluator
from niftynet.layer.rand_elastic_deform import RandomElasticDeformationLayer
from niftynet.engine.windows_aggregator_identity import WindowAsImageAggregator
SUPPORTED_INPUT = set(['image', 'output', 'weight', 'sampler', 'inferred'])
class RegressionApplication(BaseApplication):
REQUIRED_CONFIG_SECTION = "REGRESSION"
def __init__(self, net_param, action_param, action):
BaseApplication.__init__(self)
tf.logging.info('starting regression application')
self.action = action
self.net_param = net_param
self.action_param = action_param
self.data_param = None
self.regression_param = None
self.SUPPORTED_SAMPLING = {
'uniform': (self.initialise_uniform_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
'weighted': (self.initialise_weighted_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
'resize': (self.initialise_resize_sampler,
self.initialise_resize_sampler,
self.initialise_resize_aggregator),
'balanced': (self.initialise_balanced_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
}
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.regression_param = task_param
# initialise input image readers
if self.is_training:
reader_names = ('image', 'output', 'weight', 'sampler')
elif self.is_inference:
# in the inference process use `image` input only
reader_names = ('image',)
elif self.is_evaluation:
reader_names = ('image', 'output', 'inferred')
else:
tf.logging.fatal(
'Action `%s` not supported. Expected one of %s',
self.action, self.SUPPORTED_PHASES)
raise ValueError
try:
reader_phase = self.action_param.dataset_to_infer
except AttributeError:
reader_phase = None
file_lists = data_partitioner.get_file_lists_by(
phase=reader_phase, action=self.action)
self.readers = [
ImageReader(reader_names).initialise(
data_param, task_param, file_list) for file_list in file_lists]
# initialise input preprocessing layers
mean_var_normaliser = MeanVarNormalisationLayer(image_name='image') \
if self.net_param.whitening else None
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer') \
if (self.net_param.histogram_ref_file and
self.net_param.normalisation) else None
rgb_normaliser = RGBHistogramEquilisationLayer(
image_name='image',
name='rbg_norm_layer') if self.net_param.rgb_normalisation else None
normalisation_layers = []
if histogram_normaliser is not None:
normalisation_layers.append(histogram_normaliser)
if mean_var_normaliser is not None:
normalisation_layers.append(mean_var_normaliser)
if rgb_normaliser is not None:
normalisation_layers.append(rgb_normaliser)
volume_padding_layer = [PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size,
mode=self.net_param.volume_padding_mode,
pad_to=self.net_param.volume_padding_to_size)
]
# initialise training data augmentation layers
augmentation_layers = []
if self.is_training:
train_param = self.action_param
if train_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=train_param.random_flipping_axes))
if train_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=train_param.scaling_percentage[0],
max_percentage=train_param.scaling_percentage[1],
antialiasing=train_param.antialiasing,
isotropic=train_param.isotropic_scaling))
if train_param.rotation_angle:
rotation_layer = RandomRotationLayer()
if train_param.rotation_angle:
rotation_layer.init_uniform_angle(
train_param.rotation_angle)
augmentation_layers.append(rotation_layer)
if train_param.do_elastic_deformation:
spatial_rank = list(self.readers[0].spatial_ranks.values())[0]
augmentation_layers.append(RandomElasticDeformationLayer(
spatial_rank=spatial_rank,
num_controlpoints=train_param.num_ctrl_points,
std_deformation_sigma=train_param.deformation_sigma,
proportion_to_augment=train_param.proportion_to_deform))
# only add augmentation to first reader (not validation reader)
self.readers[0].add_preprocessing_layers(
volume_padding_layer + normalisation_layers + augmentation_layers)
for reader in self.readers[1:]:
reader.add_preprocessing_layers(
volume_padding_layer + normalisation_layers)
def initialise_uniform_sampler(self):
self.sampler = [[UniformSampler(
reader=reader,
window_sizes=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_weighted_sampler(self):
self.sampler = [[WeightedSampler(
reader=reader,
window_sizes=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_resize_sampler(self):
self.sampler = [[ResizeSampler(
reader=reader,
window_sizes=self.data_param,
batch_size=self.net_param.batch_size,
shuffle=self.is_training,
smaller_final_batch_mode=self.net_param.smaller_final_batch_mode,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_grid_sampler(self):
self.sampler = [[GridSampler(
reader=reader,
window_sizes=self.data_param,
batch_size=self.net_param.batch_size,
spatial_window_size=self.action_param.spatial_window_size,
window_border=self.action_param.border,
smaller_final_batch_mode=self.net_param.smaller_final_batch_mode,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_balanced_sampler(self):
self.sampler = [[BalancedSampler(
reader=reader,
window_sizes=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_grid_aggregator(self):
self.output_decoder = GridSamplesAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir,
window_border=self.action_param.border,
interp_order=self.action_param.output_interp_order,
postfix=self.action_param.output_postfix,
fill_constant=self.action_param.fill_constant)
def initialise_resize_aggregator(self):
self.output_decoder = ResizeSamplesAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir,
window_border=self.action_param.border,
interp_order=self.action_param.output_interp_order,
postfix=self.action_param.output_postfix)
def initialise_identity_aggregator(self):
self.output_decoder = WindowAsImageAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir,
postfix=self.action_param.output_postfix)
def initialise_sampler(self):
if self.is_training:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][0]()
elif self.is_inference:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][1]()
def initialise_aggregator(self):
if self.net_param.force_output_identity_resizing:
self.initialise_identity_aggregator()
else:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][2]()
def initialise_network(self):
w_regularizer = None
b_regularizer = None
reg_type = self.net_param.reg_type.lower()
decay = self.net_param.decay
if reg_type == 'l2' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l2_regularizer(decay)
b_regularizer = regularizers.l2_regularizer(decay)
elif reg_type == 'l1' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l1_regularizer(decay)
b_regularizer = regularizers.l1_regularizer(decay)
self.net = ApplicationNetFactory.create(self.net_param.name)(
num_classes=1,
w_initializer=InitializerFactory.get_initializer(
name=self.net_param.weight_initializer),
b_initializer=InitializerFactory.get_initializer(
name=self.net_param.bias_initializer),
w_regularizer=w_regularizer,
b_regularizer=b_regularizer,
acti_func=self.net_param.activation_function)
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
def switch_sampler(for_training):
with tf.name_scope('train' if for_training else 'validation'):
sampler = self.get_sampler()[0][0 if for_training else -1]
return sampler.pop_batch_op()
if self.is_training:
self.patience = self.action_param.patience
self.mode = self.action_param.early_stopping_mode
if self.action_param.validation_every_n > 0:
data_dict = tf.cond(tf.logical_not(self.is_validation),
lambda: switch_sampler(for_training=True),
lambda: switch_sampler(for_training=False))
else:
data_dict = switch_sampler(for_training=True)
image = tf.cast(data_dict['image'], tf.float32)
net_args = {'is_training': self.is_training,
'keep_prob': self.net_param.keep_prob}
net_out = self.net(image, **net_args)
with tf.name_scope('Optimiser'):
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.action_param.lr)
loss_func = LossFunction(loss_type=self.action_param.loss_type)
weight_map = data_dict.get('weight', None)
border=self.regression_param.loss_border
if border == None or tf.reduce_sum(tf.abs(border)) == 0:
data_loss = loss_func(
prediction=net_out,
ground_truth=data_dict['output'],
weight_map=weight_map)
else:
crop_layer = CropLayer(border)
weight_map = None if weight_map is None else crop_layer(weight_map)
data_loss = loss_func(
prediction=crop_layer(net_out),
ground_truth=crop_layer(data_dict['output']),
weight_map=weight_map)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if self.net_param.decay > 0.0 and reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
else:
loss = data_loss
# Get all vars
to_optimise = tf.trainable_variables()
vars_to_freeze = \
self.action_param.vars_to_freeze or \
self.action_param.vars_to_restore
if vars_to_freeze:
import re
var_regex = re.compile(vars_to_freeze)
# Only optimise vars that are not frozen
to_optimise = \
[v for v in to_optimise if not var_regex.search(v.name)]
tf.logging.info(
"Optimizing %d out of %d trainable variables, "
"the other variables are fixed (--vars_to_freeze %s)",
len(to_optimise),
len(tf.trainable_variables()),
vars_to_freeze)
self.total_loss = loss
grads = self.optimiser.compute_gradients(
loss, var_list=to_optimise, colocate_gradients_with_ops=True)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=self.total_loss, name='total_loss',
average_over_devices=True, collection=CONSOLE)
outputs_collector.add_to_collection(
var=self.total_loss, name='total_loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
elif self.is_inference:
data_dict = switch_sampler(for_training=False)
image = tf.cast(data_dict['image'], tf.float32)
net_args = {'is_training': self.is_training,
'keep_prob': self.net_param.keep_prob}
net_out = self.net(image, **net_args)
net_out = PostProcessingLayer('IDENTITY')(net_out)
outputs_collector.add_to_collection(
var=net_out, name='window',
average_over_devices=False, collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=data_dict['image_location'], name='location',
average_over_devices=False, collection=NETWORK_OUTPUT)
self.initialise_aggregator()
def interpret_output(self, batch_output):
if self.is_inference:
return self.output_decoder.decode_batch(
{'window_reg':batch_output['window']}, batch_output['location'])
return True
def initialise_evaluator(self, eval_param):
self.eval_param = eval_param
self.evaluator = RegressionEvaluator(self.readers[0],
self.regression_param,
eval_param)
def add_inferred_output(self, data_param, task_param):
return self.add_inferred_output_like(data_param, task_param, 'output')
|
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path, subprocess
from ..mesonlib import EnvironmentException, version_compare
from .compilers import (
GCC_STANDARD,
d_dmd_buildtype_args,
d_gdc_buildtype_args,
d_ldc_buildtype_args,
get_gcc_soname_args,
gnu_color_args,
Compiler,
CompilerArgs,
)
d_feature_args = {'gcc': {'unittest': '-funittest',
'version': '-fversion',
'import_dir': '-J'
},
'llvm': {'unittest': '-unittest',
'version': '-d-version',
'import_dir': '-J'
},
'dmd': {'unittest': '-unittest',
'version': '-version',
'import_dir': '-J'
}
}
class DCompiler(Compiler):
def __init__(self, exelist, version, is_cross, **kwargs):
self.language = 'd'
super().__init__(exelist, version, **kwargs)
self.id = 'unknown'
self.is_cross = is_cross
def sanity_check(self, work_dir, environment):
source_name = os.path.join(work_dir, 'sanity.d')
output_name = os.path.join(work_dir, 'dtest')
with open(source_name, 'w') as ofile:
ofile.write('''void main() {
}
''')
pc = subprocess.Popen(self.exelist + self.get_output_args(output_name) + [source_name], cwd=work_dir)
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('D compiler %s can not compile programs.' % self.name_string())
if subprocess.call(output_name) != 0:
raise EnvironmentException('Executables created by D compiler %s are not runnable.' % self.name_string())
def needs_static_linker(self):
return True
def name_string(self):
return ' '.join(self.exelist)
def get_linker_exelist(self):
return self.exelist[:]
def get_preprocess_only_args(self):
return ['-E']
def get_compile_only_args(self):
return ['-c']
def depfile_for_object(self, objfile):
return objfile + '.' + self.get_depfile_suffix()
def get_depfile_suffix(self):
return 'deps'
def get_pic_args(self):
return ['-fPIC']
def get_std_shared_lib_link_args(self):
return ['-shared']
def get_soname_args(self, prefix, shlib_name, suffix, soversion, is_shared_module):
# FIXME: Make this work for Windows, MacOS and cross-compiling
return get_gcc_soname_args(GCC_STANDARD, prefix, shlib_name, suffix, soversion, is_shared_module)
def get_feature_args(self, kwargs, build_to_src):
res = []
if 'unittest' in kwargs:
unittest = kwargs.pop('unittest')
unittest_arg = d_feature_args[self.id]['unittest']
if not unittest_arg:
raise EnvironmentException('D compiler %s does not support the "unittest" feature.' % self.name_string())
if unittest:
res.append(unittest_arg)
if 'versions' in kwargs:
versions = kwargs.pop('versions')
if not isinstance(versions, list):
versions = [versions]
version_arg = d_feature_args[self.id]['version']
if not version_arg:
raise EnvironmentException('D compiler %s does not support the "feature versions" feature.' % self.name_string())
for v in versions:
res.append('{0}={1}'.format(version_arg, v))
if 'import_dirs' in kwargs:
import_dirs = kwargs.pop('import_dirs')
if not isinstance(import_dirs, list):
import_dirs = [import_dirs]
import_dir_arg = d_feature_args[self.id]['import_dir']
if not import_dir_arg:
raise EnvironmentException('D compiler %s does not support the "string import directories" feature.' % self.name_string())
for idir_obj in import_dirs:
basedir = idir_obj.get_curdir()
for idir in idir_obj.get_incdirs():
# Avoid superfluous '/.' at the end of paths when d is '.'
if idir not in ('', '.'):
expdir = os.path.join(basedir, idir)
else:
expdir = basedir
srctreedir = os.path.join(build_to_src, expdir)
res.append('{0}{1}'.format(import_dir_arg, srctreedir))
if kwargs:
raise EnvironmentException('Unknown D compiler feature(s) selected: %s' % ', '.join(kwargs.keys()))
return res
def get_buildtype_linker_args(self, buildtype):
return []
def get_std_exe_link_args(self):
return []
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
# This method is to be used by LDC and DMD.
# GDC can deal with the verbatim flags.
if not rpath_paths and not install_rpath:
return []
paths = ':'.join([os.path.join(build_dir, p) for p in rpath_paths])
if build_rpath != '':
paths += ':' + build_rpath
if len(paths) < len(install_rpath):
padding = 'X' * (len(install_rpath) - len(paths))
if not paths:
paths = padding
else:
paths = paths + ':' + padding
return ['-L-rpath={}'.format(paths)]
def _get_compiler_check_args(self, env, extra_args, dependencies, mode='compile'):
if extra_args is None:
extra_args = []
elif isinstance(extra_args, str):
extra_args = [extra_args]
if dependencies is None:
dependencies = []
elif not isinstance(dependencies, list):
dependencies = [dependencies]
# Collect compiler arguments
args = CompilerArgs(self)
for d in dependencies:
# Add compile flags needed by dependencies
args += d.get_compile_args()
if mode == 'link':
# Add link flags needed to find dependencies
args += d.get_link_args()
if mode == 'compile':
# Add DFLAGS from the env
args += env.coredata.get_external_args(self.language)
elif mode == 'link':
# Add LDFLAGS from the env
args += env.coredata.get_external_link_args(self.language)
# extra_args must override all other arguments, so we add them last
args += extra_args
return args
def compiles(self, code, env, extra_args=None, dependencies=None, mode='compile'):
args = self._get_compiler_check_args(env, extra_args, dependencies, mode)
with self.compile(code, args, mode) as p:
return p.returncode == 0
def has_multi_arguments(self, args, env):
return self.compiles('int i;\n', env, extra_args=args)
@classmethod
def translate_args_to_nongnu(cls, args):
dcargs = []
# Translate common arguments to flags the LDC/DMD compilers
# can understand.
# The flags might have been added by pkg-config files,
# and are therefore out of the user's control.
for arg in args:
if arg == '-pthread':
continue
if arg.startswith('-Wl,'):
linkargs = arg[arg.index(',') + 1:].split(',')
for la in linkargs:
dcargs.append('-L' + la.strip())
continue
elif arg.startswith('-link-defaultlib') or arg.startswith('-linker'):
# these are special arguments to the LDC linker call,
# arguments like "-link-defaultlib-shared" do *not*
# denote a library to be linked, but change the default
# Phobos/DRuntime linking behavior, while "-linker" sets the
# default linker.
dcargs.append(arg)
continue
elif arg.startswith('-l'):
# translate library link flag
dcargs.append('-L' + arg)
continue
elif arg.startswith('-L/') or arg.startswith('-L./'):
# we need to handle cases where -L is set by e.g. a pkg-config
# setting to select a linker search path. We can however not
# unconditionally prefix '-L' with '-L' because the user might
# have set this flag too to do what it is intended to for this
# compiler (pass flag through to the linker)
# Hence, we guess here whether the flag was intended to pass
# a linker search path.
dcargs.append('-L' + arg)
continue
dcargs.append(arg)
return dcargs
class GnuDCompiler(DCompiler):
def __init__(self, exelist, version, is_cross, **kwargs):
DCompiler.__init__(self, exelist, version, is_cross, **kwargs)
self.id = 'gcc'
default_warn_args = ['-Wall', '-Wdeprecated']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
self.base_options = ['b_colorout', 'b_sanitize', 'b_staticpic']
self._has_color_support = version_compare(self.version, '>=4.9')
# dependencies were implemented before, but broken - support was fixed in GCC 7.1+
# (and some backported versions)
self._has_deps_support = version_compare(self.version, '>=7.1')
def get_colorout_args(self, colortype):
if self._has_color_support:
return gnu_color_args[colortype][:]
return []
def get_dependency_gen_args(self, outtarget, outfile):
if not self._has_deps_support:
return []
return ['-MD', '-MQ', outtarget, '-MF', outfile]
def get_output_args(self, target):
return ['-o', target]
def get_linker_output_args(self, target):
return ['-o', target]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_warn_args(self, level):
return self.warn_args[level]
def get_werror_args(self):
return ['-Werror']
def get_linker_search_args(self, dirname):
return ['-L' + dirname]
def get_buildtype_args(self, buildtype):
return d_gdc_buildtype_args[buildtype]
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
return self.build_unix_rpath_args(build_dir, from_dir, rpath_paths, build_rpath, install_rpath)
class LLVMDCompiler(DCompiler):
def __init__(self, exelist, version, is_cross, **kwargs):
DCompiler.__init__(self, exelist, version, is_cross, **kwargs)
self.id = 'llvm'
self.base_options = ['b_coverage', 'b_colorout']
def get_colorout_args(self, colortype):
if colortype == 'always':
return ['-enable-color']
return []
def get_dependency_gen_args(self, outtarget, outfile):
# LDC using the -deps flag returns a non-Makefile dependency-info file, which
# the backends can not use. So we disable this feature for now.
return []
def get_output_args(self, target):
return ['-of', target]
def get_linker_output_args(self, target):
return ['-of', target]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_warn_args(self, level):
if level == '2' or level == '3':
return ['-wi', '-dw']
else:
return ['-wi']
def get_werror_args(self):
return ['-w']
def get_coverage_args(self):
return ['-cov']
def get_buildtype_args(self, buildtype):
return d_ldc_buildtype_args[buildtype]
def get_pic_args(self):
return ['-relocation-model=pic']
def get_linker_search_args(self, dirname):
# -L is recognized as "add this to the search path" by the linker,
# while the compiler recognizes it as "pass to linker". So, the first
# -L is for the compiler, telling it to pass the second -L to the linker.
return ['-L-L' + dirname]
@classmethod
def unix_args_to_native(cls, args):
return cls.translate_args_to_nongnu(args)
class DmdDCompiler(DCompiler):
def __init__(self, exelist, version, is_cross, **kwargs):
DCompiler.__init__(self, exelist, version, is_cross, **kwargs)
self.id = 'dmd'
self.base_options = ['b_coverage', 'b_colorout']
def get_colorout_args(self, colortype):
if colortype == 'always':
return ['-color=on']
return []
def get_dependency_gen_args(self, outtarget, outfile):
# LDC using the -deps flag returns a non-Makefile dependency-info file, which
# the backends can not use. So we disable this feature for now.
return []
def get_output_args(self, target):
return ['-of' + target]
def get_werror_args(self):
return ['-w']
def get_linker_output_args(self, target):
return ['-of' + target]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_warn_args(self, level):
return ['-wi']
def get_coverage_args(self):
return ['-cov']
def get_linker_search_args(self, dirname):
# -L is recognized as "add this to the search path" by the linker,
# while the compiler recognizes it as "pass to linker". So, the first
# -L is for the compiler, telling it to pass the second -L to the linker.
return ['-L-L' + dirname]
def get_buildtype_args(self, buildtype):
return d_dmd_buildtype_args[buildtype]
def get_std_shared_lib_link_args(self):
return ['-shared', '-defaultlib=libphobos2.so']
@classmethod
def unix_args_to_native(cls, args):
return cls.translate_args_to_nongnu(args)
|
|
# Copyright (c) 2009-2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Gabe Black
# William Wang
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice, PioDevice, IsaFake, BadAddr, DmaDevice
from Pci import PciConfigAll
from Ethernet import NSGigE, IGbE_e1000, IGbE_igb
from Ide import *
from Platform import Platform
from Terminal import Terminal
from Uart import Uart
from SimpleMemory import SimpleMemory
from Gic import *
class AmbaDevice(BasicPioDevice):
type = 'AmbaDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
amba_id = Param.UInt32("ID of AMBA device for kernel detection")
class AmbaIntDevice(AmbaDevice):
type = 'AmbaIntDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
int_delay = Param.Latency("100ns",
"Time between action and interrupt generation by device")
class AmbaDmaDevice(DmaDevice):
type = 'AmbaDmaDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
pio_addr = Param.Addr("Address for AMBA slave interface")
pio_latency = Param.Latency("10ns", "Time between action and write/read result by AMBA DMA Device")
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
amba_id = Param.UInt32("ID of AMBA device for kernel detection")
class A9SCU(BasicPioDevice):
type = 'A9SCU'
cxx_header = "dev/arm/a9scu.hh"
class RealViewCtrl(BasicPioDevice):
type = 'RealViewCtrl'
cxx_header = "dev/arm/rv_ctrl.hh"
proc_id0 = Param.UInt32(0x0C000000, "Processor ID, SYS_PROCID")
proc_id1 = Param.UInt32(0x0C000222, "Processor ID, SYS_PROCID1")
idreg = Param.UInt32(0x00000000, "ID Register, SYS_ID")
class AmbaFake(AmbaDevice):
type = 'AmbaFake'
cxx_header = "dev/arm/amba_fake.hh"
ignore_access = Param.Bool(False, "Ignore reads/writes to this device, (e.g. IsaFake + AMBA)")
amba_id = 0;
class Pl011(Uart):
type = 'Pl011'
cxx_header = "dev/arm/pl011.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num = Param.UInt32("Interrupt number that connects to GIC")
end_on_eot = Param.Bool(False, "End the simulation when a EOT is received on the UART")
int_delay = Param.Latency("100ns", "Time between action and interrupt generation by UART")
class Sp804(AmbaDevice):
type = 'Sp804'
cxx_header = "dev/arm/timer_sp804.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num0 = Param.UInt32("Interrupt number that connects to GIC")
clock0 = Param.Clock('1MHz', "Clock speed of the input")
int_num1 = Param.UInt32("Interrupt number that connects to GIC")
clock1 = Param.Clock('1MHz', "Clock speed of the input")
amba_id = 0x00141804
class CpuLocalTimer(BasicPioDevice):
type = 'CpuLocalTimer'
cxx_header = "dev/arm/timer_cpulocal.hh"
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
int_num_timer = Param.UInt32("Interrrupt number used per-cpu to GIC")
int_num_watchdog = Param.UInt32("Interrupt number for per-cpu watchdog to GIC")
# Override the default clock
clock = '1GHz'
class PL031(AmbaIntDevice):
type = 'PL031'
cxx_header = "dev/arm/rtc_pl031.hh"
time = Param.Time('01/01/2009', "System time to use ('Now' for actual time)")
amba_id = 0x00341031
class Pl050(AmbaIntDevice):
type = 'Pl050'
cxx_header = "dev/arm/kmi.hh"
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer display")
is_mouse = Param.Bool(False, "Is this interface a mouse, if not a keyboard")
int_delay = '1us'
amba_id = 0x00141050
class Pl111(AmbaDmaDevice):
type = 'Pl111'
cxx_header = "dev/arm/pl111.hh"
pixel_clock = Param.Clock('24MHz', "Pixel clock")
vnc = Param.VncInput(Parent.any, "Vnc server for remote frame buffer display")
amba_id = 0x00141111
class RealView(Platform):
type = 'RealView'
cxx_header = "dev/arm/realview.hh"
system = Param.System(Parent.any, "system")
pci_cfg_base = Param.Addr(0, "Base address of PCI Configuraiton Space")
mem_start_addr = Param.Addr(0, "Start address of main memory")
max_mem_size = Param.Addr('256MB', "Maximum amount of RAM supported by platform")
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range = AddrRange(Addr('2GB'),
size = '64MB'),
zero = True)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot.arm')
# Reference for memory map and interrupt number
# RealView Platform Baseboard Explore for Cortex-A9 User Guide(ARM DUI 0440A)
# Chapter 4: Programmer's Reference
class RealViewPBX(RealView):
uart = Pl011(pio_addr=0x10009000, int_num=44)
realview_io = RealViewCtrl(pio_addr=0x10000000)
gic = Pl390()
timer0 = Sp804(int_num0=36, int_num1=36, pio_addr=0x10011000)
timer1 = Sp804(int_num0=37, int_num1=37, pio_addr=0x10012000)
local_cpu_timer = CpuLocalTimer(int_num_timer=29, int_num_watchdog=30, pio_addr=0x1f000600)
clcd = Pl111(pio_addr=0x10020000, int_num=55)
kmi0 = Pl050(pio_addr=0x10006000, int_num=52)
kmi1 = Pl050(pio_addr=0x10007000, int_num=53, is_mouse=True)
a9scu = A9SCU(pio_addr=0x1f000000)
cf_ctrl = IdeController(disks=[], pci_func=0, pci_dev=7, pci_bus=2,
io_shift = 1, ctrl_offset = 2, Command = 0x1,
BAR0 = 0x18000000, BAR0Size = '16B',
BAR1 = 0x18000100, BAR1Size = '1B',
BAR0LegacyIO = True, BAR1LegacyIO = True)
l2x0_fake = IsaFake(pio_addr=0x1f002000, pio_size=0xfff)
flash_fake = IsaFake(pio_addr=0x40000000, pio_size=0x20000000,
fake_mem=True)
dmac_fake = AmbaFake(pio_addr=0x10030000)
uart1_fake = AmbaFake(pio_addr=0x1000a000)
uart2_fake = AmbaFake(pio_addr=0x1000b000)
uart3_fake = AmbaFake(pio_addr=0x1000c000)
smc_fake = AmbaFake(pio_addr=0x100e1000)
sp810_fake = AmbaFake(pio_addr=0x10001000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x10010000)
gpio0_fake = AmbaFake(pio_addr=0x10013000)
gpio1_fake = AmbaFake(pio_addr=0x10014000)
gpio2_fake = AmbaFake(pio_addr=0x10015000)
ssp_fake = AmbaFake(pio_addr=0x1000d000)
sci_fake = AmbaFake(pio_addr=0x1000e000)
aaci_fake = AmbaFake(pio_addr=0x10004000)
mmc_fake = AmbaFake(pio_addr=0x10005000)
rtc = PL031(pio_addr=0x10017000, int_num=42)
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge):
self.gic.pio = bus.master
self.l2x0_fake.pio = bus.master
self.a9scu.pio = bus.master
self.local_cpu_timer.pio = bus.master
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, l2x0, a9scu, local_cpu_timer)
bridge.ranges = [AddrRange(self.realview_io.pio_addr,
self.a9scu.pio_addr - 1),
AddrRange(self.flash_fake.pio_addr,
self.flash_fake.pio_addr + \
self.flash_fake.pio_size - 1)]
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
self.clcd.dma = bus.slave
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.cf_ctrl.pio = bus.master
self.cf_ctrl.config = bus.master
self.cf_ctrl.dma = bus.slave
self.dmac_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.smc_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.gpio0_fake.pio = bus.master
self.gpio1_fake.pio = bus.master
self.gpio2_fake.pio = bus.master
self.ssp_fake.pio = bus.master
self.sci_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.rtc.pio = bus.master
self.flash_fake.pio = bus.master
# Reference for memory map and interrupt number
# RealView Emulation Baseboard User Guide (ARM DUI 0143B)
# Chapter 4: Programmer's Reference
class RealViewEB(RealView):
uart = Pl011(pio_addr=0x10009000, int_num=44)
realview_io = RealViewCtrl(pio_addr=0x10000000)
gic = Pl390(dist_addr=0x10041000, cpu_addr=0x10040000)
timer0 = Sp804(int_num0=36, int_num1=36, pio_addr=0x10011000)
timer1 = Sp804(int_num0=37, int_num1=37, pio_addr=0x10012000)
clcd = Pl111(pio_addr=0x10020000, int_num=23)
kmi0 = Pl050(pio_addr=0x10006000, int_num=20)
kmi1 = Pl050(pio_addr=0x10007000, int_num=21, is_mouse=True)
l2x0_fake = IsaFake(pio_addr=0x1f002000, pio_size=0xfff, warn_access="1")
flash_fake = IsaFake(pio_addr=0x40000000, pio_size=0x20000000-1,
fake_mem=True)
dmac_fake = AmbaFake(pio_addr=0x10030000)
uart1_fake = AmbaFake(pio_addr=0x1000a000)
uart2_fake = AmbaFake(pio_addr=0x1000b000)
uart3_fake = AmbaFake(pio_addr=0x1000c000)
smcreg_fake = IsaFake(pio_addr=0x10080000, pio_size=0x10000-1)
smc_fake = AmbaFake(pio_addr=0x100e1000)
sp810_fake = AmbaFake(pio_addr=0x10001000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x10010000)
gpio0_fake = AmbaFake(pio_addr=0x10013000)
gpio1_fake = AmbaFake(pio_addr=0x10014000)
gpio2_fake = AmbaFake(pio_addr=0x10015000)
ssp_fake = AmbaFake(pio_addr=0x1000d000)
sci_fake = AmbaFake(pio_addr=0x1000e000)
aaci_fake = AmbaFake(pio_addr=0x10004000)
mmc_fake = AmbaFake(pio_addr=0x10005000)
rtc_fake = AmbaFake(pio_addr=0x10017000, amba_id=0x41031)
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge):
self.gic.pio = bus.master
self.l2x0_fake.pio = bus.master
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, l2x0)
bridge.ranges = [AddrRange(self.realview_io.pio_addr,
self.gic.cpu_addr - 1),
AddrRange(self.flash_fake.pio_addr, Addr.max)]
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
self.clcd.dma = bus.slave
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.dmac_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.smc_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.gpio0_fake.pio = bus.master
self.gpio1_fake.pio = bus.master
self.gpio2_fake.pio = bus.master
self.ssp_fake.pio = bus.master
self.sci_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.mmc_fake.pio = bus.master
self.rtc_fake.pio = bus.master
self.flash_fake.pio = bus.master
self.smcreg_fake.pio = bus.master
class VExpress_EMM(RealView):
mem_start_addr = '2GB'
max_mem_size = '2GB'
pci_cfg_base = 0x30000000
uart = Pl011(pio_addr=0x1c090000, int_num=37)
realview_io = RealViewCtrl(proc_id0=0x14000000, proc_id1=0x14000000, pio_addr=0x1C010000)
gic = Pl390(dist_addr=0x2C001000, cpu_addr=0x2C002000)
local_cpu_timer = CpuLocalTimer(int_num_timer=29, int_num_watchdog=30, pio_addr=0x2C080000)
timer0 = Sp804(int_num0=34, int_num1=34, pio_addr=0x1C110000, clock0='1MHz', clock1='1MHz')
timer1 = Sp804(int_num0=35, int_num1=35, pio_addr=0x1C120000, clock0='1MHz', clock1='1MHz')
clcd = Pl111(pio_addr=0x1c1f0000, int_num=46)
kmi0 = Pl050(pio_addr=0x1c060000, int_num=44)
kmi1 = Pl050(pio_addr=0x1c070000, int_num=45, is_mouse=True)
cf_ctrl = IdeController(disks=[], pci_func=0, pci_dev=0, pci_bus=2,
io_shift = 2, ctrl_offset = 2, Command = 0x1,
BAR0 = 0x1C1A0000, BAR0Size = '256B',
BAR1 = 0x1C1A0100, BAR1Size = '4096B',
BAR0LegacyIO = True, BAR1LegacyIO = True)
pciconfig = PciConfigAll(size='256MB')
ethernet = IGbE_e1000(pci_bus=0, pci_dev=0, pci_func=0,
InterruptLine=1, InterruptPin=1)
ide = IdeController(disks = [], pci_bus=0, pci_dev=1, pci_func=0,
InterruptLine=2, InterruptPin=2)
vram = SimpleMemory(range = AddrRange(0x18000000, size='32MB'),
zero = True)
rtc = PL031(pio_addr=0x1C170000, int_num=36)
l2x0_fake = IsaFake(pio_addr=0x2C100000, pio_size=0xfff)
uart1_fake = AmbaFake(pio_addr=0x1C0A0000)
uart2_fake = AmbaFake(pio_addr=0x1C0B0000)
uart3_fake = AmbaFake(pio_addr=0x1C0C0000)
sp810_fake = AmbaFake(pio_addr=0x1C020000, ignore_access=True)
watchdog_fake = AmbaFake(pio_addr=0x1C0F0000)
aaci_fake = AmbaFake(pio_addr=0x1C040000)
lan_fake = IsaFake(pio_addr=0x1A000000, pio_size=0xffff)
usb_fake = IsaFake(pio_addr=0x1B000000, pio_size=0x1ffff)
mmc_fake = AmbaFake(pio_addr=0x1c050000)
def setupBootLoader(self, mem_bus, cur_sys, loc):
self.nvmem = SimpleMemory(range = AddrRange(0, size = '64MB'),
zero = True)
self.nvmem.port = mem_bus.master
cur_sys.boot_loader = loc('boot_emm.arm')
cur_sys.atags_addr = 0x80000100
# Attach I/O devices that are on chip and also set the appropriate
# ranges for the bridge
def attachOnChipIO(self, bus, bridge):
self.gic.pio = bus.master
self.local_cpu_timer.pio = bus.master
# Bridge ranges based on excluding what is part of on-chip I/O
# (gic, a9scu)
bridge.ranges = [AddrRange(0x2F000000, size='16MB'),
AddrRange(0x30000000, size='256MB'),
AddrRange(0x40000000, size='512MB'),
AddrRange(0x18000000, size='64MB'),
AddrRange(0x1C000000, size='64MB')]
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus):
self.uart.pio = bus.master
self.realview_io.pio = bus.master
self.timer0.pio = bus.master
self.timer1.pio = bus.master
self.clcd.pio = bus.master
self.clcd.dma = bus.slave
self.kmi0.pio = bus.master
self.kmi1.pio = bus.master
self.cf_ctrl.pio = bus.master
self.cf_ctrl.dma = bus.slave
self.cf_ctrl.config = bus.master
self.rtc.pio = bus.master
bus.use_default_range = True
self.vram.port = bus.master
self.ide.pio = bus.master
self.ide.config = bus.master
self.ide.dma = bus.slave
self.ethernet.pio = bus.master
self.ethernet.config = bus.master
self.ethernet.dma = bus.slave
self.pciconfig.pio = bus.default
self.l2x0_fake.pio = bus.master
self.uart1_fake.pio = bus.master
self.uart2_fake.pio = bus.master
self.uart3_fake.pio = bus.master
self.sp810_fake.pio = bus.master
self.watchdog_fake.pio = bus.master
self.aaci_fake.pio = bus.master
self.lan_fake.pio = bus.master
self.usb_fake.pio = bus.master
self.mmc_fake.pio = bus.master
|
|
#!/usr/bin/env python
"""
@package mi.core.instrument.data_particle_generator Base data particle generator
@file mi/core/instrument/data_particle_generator.py
@author Steve Foley
@brief Contains logic to generate data particles to be exchanged between
the driver and agent. This involves a JSON interchange format
"""
import json
import time
import ntplib
import base64
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, ReadOnlyException, NotImplementedException, InstrumentParameterException
from mi.core.log import get_logger
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
log = get_logger()
class CommonDataParticleType(BaseEnum):
"""
This enum defines all the common particle types defined in the modules. Currently there is only one, but by
using an enum here we have the opportunity to define more common data particles.
"""
RAW = "raw"
class DataParticleKey(BaseEnum):
PKT_FORMAT_ID = "pkt_format_id"
PKT_VERSION = "pkt_version"
STREAM_NAME = "stream_name"
INTERNAL_TIMESTAMP = "internal_timestamp"
PORT_TIMESTAMP = "port_timestamp"
DRIVER_TIMESTAMP = "driver_timestamp"
PREFERRED_TIMESTAMP = "preferred_timestamp"
QUALITY_FLAG = "quality_flag"
VALUES = "values"
VALUE_ID = "value_id"
VALUE = "value"
BINARY = "binary"
NEW_SEQUENCE = "new_sequence"
class DataParticleValue(BaseEnum):
JSON_DATA = "JSON_Data"
ENG = "eng"
OK = "ok"
CHECKSUM_FAILED = "checksum_failed"
OUT_OF_RANGE = "out_of_range"
INVALID = "invalid"
QUESTIONABLE = "questionable"
class DataParticle(object):
"""
This class is responsible for storing and ultimately generating data
particles in the designated format from the associated inputs. It
fills in fields as necessary, and is a valid Data Particle
that can be sent up to the InstrumentAgent.
It is the intent that this class is subclassed as needed if an instrument must
modify fields in the outgoing packet. The hope is to have most of the superclass
code be called by the child class with just values overridden as needed.
"""
# data particle type is intended to be defined in each derived data particle class. This value should be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
""" Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
self.raw_data = raw_data
def __eq__(self, arg):
"""
Equality check for testing purposes.
"""
if self.data_particle_type() != arg.data_particle_type():
log.debug('Data particle type does not match: %s %s', self.data_particle_type(), arg.data_particle_type())
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
generated1 = self.generate()
generated2 = arg.generate()
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
@classmethod
def type(cls):
"""
return the data particle type
@return: data particle type
"""
return cls._data_particle_type
def set_internal_timestamp(self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unix_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_value(self, value_id, value):
"""
Set a content value, restricted as necessary
@param value_id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (value_id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(value_id, value))
def get_value(self, value_id):
""" Return a stored value
@param value_id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(value_id):
return self.contents[value_id]
else:
raise NotImplementedException("Value %s not available in particle!", value_id)
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# Do we wan't downstream processes to check this?
# for time in [DataParticleKey.INTERNAL_TIMESTAMP,
# DataParticleKey.DRIVER_TIMESTAMP,
# DataParticleKey.PORT_TIMESTAMP]:
# if not self._check_timestamp(self.contents[time]):
# raise SampleException("Invalid port agent timestamp in raw packet")
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
values = self._build_parsed_values()
if all([self.contents[DataParticleKey.PREFERRED_TIMESTAMP] == DataParticleKey.PORT_TIMESTAMP,
self.contents[DataParticleKey.PORT_TIMESTAMP] == 0,
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] is not None]):
self.contents[DataParticleKey.PREFERRED_TIMESTAMP] = DataParticleKey.INTERNAL_TIMESTAMP
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted ignored, maintained only to avoid breaking drivers
@return A dictionary representing this particle
@throws InstrumentDriverException If there is a problem with the inputs
"""
return self.generate_dict()
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self):
"""
Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws SampleException When there is a problem with the preferred
timestamp in the sample.
"""
if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:
raise SampleException("Missing preferred timestamp, %s, in particle" %
self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
# This should be handled downstream. Don't want to not publish data because
# the port agent stopped putting out timestamps
# if self.contents[self.contents[DataParticleKey.PREFERRED_TIMESTAMP]] == None:
# raise SampleException("Preferred timestamp, %s, is not defined" %
# self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
return True
def _encode_value(self, name, value, encoding_function):
"""
Encode a value using the encoding function, if it fails store the error in a queue
"""
encoded_val = None
try:
encoded_val = encoding_function(value)
except Exception:
log.error("Data particle error encoding. Name:%s Value:%s", name, value)
self._encoding_errors.append({name: value})
return {DataParticleKey.VALUE_ID: name,
DataParticleKey.VALUE: encoded_val}
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
class RawDataParticleKey(BaseEnum):
PAYLOAD = "raw"
LENGTH = "length"
TYPE = "type"
CHECKSUM = "checksum"
class RawDataParticle(DataParticle):
"""
This class a common data particle for generating data particles of raw
data.
It essentially is a translation of the port agent packet
"""
_data_particle_type = CommonDataParticleType.RAW
def _build_parsed_values(self):
"""
Build a particle out of a port agent packet.
@returns A list that is ready to be added to the "values" tag before
the structure is JSONified
"""
port_agent_packet = self.raw_data
if not isinstance(port_agent_packet, dict):
raise SampleException("raw data not a dictionary")
for param in ["raw", "length", "type", "checksum"]:
if param not in port_agent_packet:
raise SampleException("raw data not a complete port agent packet. missing %s" % param)
payload = None
length = None
ptype = None
checksum = None
# Attempt to convert values
try:
payload = base64.b64encode(port_agent_packet.get("raw"))
except TypeError:
pass
try:
length = int(port_agent_packet.get("length"))
except TypeError:
pass
try:
ptype = int(port_agent_packet.get("type"))
except TypeError:
pass
try:
checksum = int(port_agent_packet.get("checksum"))
except TypeError:
pass
result = [{
DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,
DataParticleKey.VALUE: payload,
DataParticleKey.BINARY: True},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,
DataParticleKey.VALUE: length},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,
DataParticleKey.VALUE: ptype},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,
DataParticleKey.VALUE: checksum},
]
return result
|
|
#
# Copyright 2015 Naver Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import tempfile
import traceback
from fabric.colors import *
from fabric.api import *
from conf_mnode import *
from conf_dnode import *
from conf_cluster import *
from conf_redis import *
from conf_gateway import *
from conf_smr import *
TELNET_TIMEOUT = 300
def check_config():
# Check attributes
attrs = [
# Management node
"CONF_MASTER_IP",
"CONF_MASTER_PORT",
"CONF_MASTER_MGMT_CONS",
"LOCAL_BINARY_PATH",
"MIN_TIME_TO_ATTEMPT_MIG2PC",
"USERNAME",
"REMOTE_NBASE_ARC",
"REMOTE_BIN_DIR",
"REMOTE_PGS_DIR",
"REMOTE_GW_DIR",
"ARC_BASH_PROFILE",
"SHELL",
# Data node
"REDIS_VERSION",
"GW_VERSION",
"SMR_VERSION",
"GW_BASE_PORT",
"PGS_BASE_PORT",
"BGSAVE_BASE",
"ID_GAP",
"CRONSAVE_BASE_HOUR",
"CRONSAVE_BASE_MIN",
"NUM_WORKERS_PER_GATEWAY",
"NUM_CLNT_MIN",
"CLIENT_TIMEOUT"]
ok = True
for attr in attrs:
if attr in globals() == False:
print "%s is not defined in config." % magenta(attr)
ok = False
elif globals()[attr] == None:
print "%s is empty." % magenta(attr)
ok = False
return ok
# redis configuration to file
# parameter : conf = {"redis_config_key" : "redis_config_value", ...}
# return : path of temporary-redis-config-file
def make_redis_conf_file(conf):
try:
(fd, filename) = tempfile.mkstemp()
tfile = os.fdopen(fd, "w")
for e in sorted(conf.iteritems(), key=lambda (k,v): (k)):
k = e[0]
v = e[1]
if k == "cronsave":
for save in v:
tfile.write("%s %d %d" % (k, save[0], save[1]) + os.linesep)
elif k == "client-output-buffer-limit":
for o in v:
tfile.write("%s %s" % (k, o) + os.linesep)
else:
tfile.write("%s %s" % (k, v) + os.linesep)
tfile.close()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=3, file=sys.stdout)
return None
return filename
# make redis configuartion
# return : conf = {"redis_config_key" : "redis_config_value", ...}
def make_redis_conf(cluster_name, smr_base_port, redis_port, cronsave_times):
try:
conf = {}
for e in REDIS_CONFIG:
if e[0] == 'cronsave':
conf["cronsave"] = cronsave_times
elif e[0] == "smr-local-port":
conf[e[0]] = str(smr_base_port)
elif e[0] == "port":
conf[e[0]] = str(redis_port)
else:
conf[e[0]] = e[1]
redis_conf = get_cluster_opt(cluster_name)("redis").v()
if redis_conf != None :
for k, v in redis_conf.iteritems():
if k == "client-output-buffer-limit":
for confv in conf[k]:
find = False
for i in range(len(confv)):
if confv[i].split(" ")[0] == v.split(" ")[0]:
confv[i] = v
find = True
if find == False:
confv.append(v)
else:
conf[k] = v
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=3, file=sys.stdout)
return None
return conf
def get_cluster_conf(cluster_name):
conf = filter(lambda x: x['cluster_name'] == cluster_name, CLUSTER_CONFIG)
if len(conf) > 1:
warn(red('Too many configurations for %s' % cluster_name))
return None
elif len(conf) == 0:
return None
return conf[0]
class Traverser:
def __init__(self, d, lift, op):
self.d = d
self.lift = lift
self.op = op
def __call__(self, arg):
self.d = self.lift(self.d)
result = self.op(self.d, arg)
return Traverser(result, self.lift, self.op)
def v(self):
return self.d
def make_dict_traverser(dictionary):
return Traverser(dictionary,
lambda d: d if isinstance(d, dict) else {},
lambda d, k: d[k] if d.has_key(k) else None)
def get_cluster_opt(cluster_name):
conf = get_cluster_conf(cluster_name)
# Set default configurations
if conf == None:
conf = {
"cluster_name" : cluster_name,
"smr" : SMR_CONFIG,
}
elif conf.has_key("smr") == False:
conf["smr"] = SMR_CONFIG
else:
for k, v in SMR_CONFIG.items():
if conf["smr"].has_key(k) == False:
conf["smr"][k] = v
return make_dict_traverser(conf)
def get_gw_additional_option():
if GW_ADDITIONAL_OPTION.has_key("opt"):
return GW_ADDITIONAL_OPTION["opt"]
else:
return ""
def make_bash_profile_file(arc_path):
try:
(fd, filename) = tempfile.mkstemp()
tfile = os.fdopen(fd, "w")
tfile.write("export NBASE_ARC_HOME=%s" % arc_path + os.linesep)
tfile.write("export PATH=$NBASE_ARC_HOME/bin:$PATH")
tfile.close()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=3, file=sys.stdout)
return None
return filename
|
|
from __future__ import absolute_import, unicode_literals
from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms.fields import IntegerField, BooleanField
from django.forms.util import ErrorList
from django.forms.widgets import Media, HiddenInput
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.six.moves import xrange
from django.utils.translation import ugettext as _
__all__ = ('BaseFormSet', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
@python_2_unicode_compatible
class BaseFormSet(object):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.error_class = error_class
self._errors = None
self._non_form_errors = None
# construct the forms in the formset
self._construct_forms()
def __str__(self):
return self.as_table()
def __iter__(self):
"""Yields the forms in the order they should be rendered"""
return iter(self.forms)
def __getitem__(self, index):
"""Returns the form at the given index, based on the rendering order"""
return self.forms[index]
def __len__(self):
return len(self.forms)
def __bool__(self):
"""All formsets have a management form which is not included in the length"""
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
@property
def management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError('ManagementForm data is missing or has been tampered with')
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MAX_NUM_FORM_COUNT: self.max_num
})
return form
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[TOTAL_FORM_COUNT]
else:
initial_forms = self.initial_form_count()
total_forms = initial_forms + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if self.max_num is not None:
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the inital data if it's there, 0 otherwise.
initial_forms = self.initial and len(self.initial) or 0
if self.max_num is not None and (initial_forms > self.max_num >= 0):
initial_forms = self.max_num
return initial_forms
def _construct_forms(self):
# instantiate all the forms and put them in self.forms
self.forms = []
for i in xrange(self.total_form_count()):
self.forms.append(self._construct_form(i))
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix(i),
'error_class': self.error_class,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial and not 'initial' in kwargs:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
@property
def initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
@property
def extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
@property
def empty_form(self, **kwargs):
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix('__prefix__'),
'empty_permitted': True,
}
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, None)
return form
# Maybe this should just go away?
@property
def cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
@property
def deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion. Raises an
AttributeError if deletion is not allowed.
"""
if not self.is_valid() or not self.can_delete:
raise AttributeError("'%s' object has no attribute 'deleted_forms'" % self.__class__.__name__)
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
@property
def ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order specified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
@classmethod
def get_default_prefix(cls):
return 'form'
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is not None:
return self._non_form_errors
return self.error_class()
@property
def errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
def _should_delete_form(self, form):
"""
Returns whether or not the form was marked for deletion.
"""
return form.cleaned_data.get(DELETION_FIELD_NAME, False)
def is_valid(self):
"""
Returns True if every form in self.forms is valid.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
err = self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
forms_valid &= form.is_valid()
return forms_valid and not bool(self.non_form_errors())
def full_clean(self):
"""
Cleans all of self.data and populates self._errors.
"""
self._errors = []
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
# Give self.clean() a chance to do cross-form validation.
try:
self.clean()
except ValidationError as e:
self._non_form_errors = self.error_class(e.messages)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accesible
via formset.non_form_errors()
"""
pass
def has_changed(self):
"""
Returns true if data in any form differs from initial.
"""
return any(form.has_changed() for form in self)
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index+1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart, i.e. it
has FileInput. Otherwise, False.
"""
return self.forms and self.forms[0].is_multipart()
@property
def media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return Media()
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = ' '.join([form.as_table() for form in self])
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = ' '.join([form.as_p() for form in self])
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = ' '.join([form.as_ul() for form in self])
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None):
"""Return a FormSet for the given form class."""
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'max_num': max_num}
return type(form.__name__ + str('FormSet'), (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
|
|
"""Class to hold all cover accessories."""
import logging
from pyhap.const import (
CATEGORY_GARAGE_DOOR_OPENER,
CATEGORY_WINDOW,
CATEGORY_WINDOW_COVERING,
)
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
ATTR_POSITION,
ATTR_TILT_POSITION,
DOMAIN,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_STOP_COVER,
STATE_CLOSED,
STATE_CLOSING,
STATE_ON,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_state_change_event
from .accessories import TYPES, HomeAccessory
from .const import (
ATTR_OBSTRUCTION_DETECTED,
CHAR_CURRENT_DOOR_STATE,
CHAR_CURRENT_POSITION,
CHAR_CURRENT_TILT_ANGLE,
CHAR_HOLD_POSITION,
CHAR_OBSTRUCTION_DETECTED,
CHAR_POSITION_STATE,
CHAR_TARGET_DOOR_STATE,
CHAR_TARGET_POSITION,
CHAR_TARGET_TILT_ANGLE,
CONF_LINKED_OBSTRUCTION_SENSOR,
HK_DOOR_CLOSED,
HK_DOOR_CLOSING,
HK_DOOR_OPEN,
HK_DOOR_OPENING,
HK_POSITION_GOING_TO_MAX,
HK_POSITION_GOING_TO_MIN,
HK_POSITION_STOPPED,
PROP_MAX_VALUE,
PROP_MIN_VALUE,
SERV_GARAGE_DOOR_OPENER,
SERV_WINDOW,
SERV_WINDOW_COVERING,
)
DOOR_CURRENT_HASS_TO_HK = {
STATE_OPEN: HK_DOOR_OPEN,
STATE_CLOSED: HK_DOOR_CLOSED,
STATE_OPENING: HK_DOOR_OPENING,
STATE_CLOSING: HK_DOOR_CLOSING,
}
# HomeKit only has two states for
# Target Door State:
# 0: Open
# 1: Closed
# Opening is mapped to 0 since the target is Open
# Closing is mapped to 1 since the target is Closed
DOOR_TARGET_HASS_TO_HK = {
STATE_OPEN: HK_DOOR_OPEN,
STATE_CLOSED: HK_DOOR_CLOSED,
STATE_OPENING: HK_DOOR_OPEN,
STATE_CLOSING: HK_DOOR_CLOSED,
}
_LOGGER = logging.getLogger(__name__)
@TYPES.register("GarageDoorOpener")
class GarageDoorOpener(HomeAccessory):
"""Generate a Garage Door Opener accessory for a cover entity.
The cover entity must be in the 'garage' device class
and support no more than open, close, and stop.
"""
def __init__(self, *args):
"""Initialize a GarageDoorOpener accessory object."""
super().__init__(*args, category=CATEGORY_GARAGE_DOOR_OPENER)
state = self.hass.states.get(self.entity_id)
serv_garage_door = self.add_preload_service(SERV_GARAGE_DOOR_OPENER)
self.char_current_state = serv_garage_door.configure_char(
CHAR_CURRENT_DOOR_STATE, value=0
)
self.char_target_state = serv_garage_door.configure_char(
CHAR_TARGET_DOOR_STATE, value=0, setter_callback=self.set_state
)
self.char_obstruction_detected = serv_garage_door.configure_char(
CHAR_OBSTRUCTION_DETECTED, value=False
)
self.linked_obstruction_sensor = self.config.get(CONF_LINKED_OBSTRUCTION_SENSOR)
if self.linked_obstruction_sensor:
self._async_update_obstruction_state(
self.hass.states.get(self.linked_obstruction_sensor)
)
self.async_update_state(state)
async def run(self):
"""Handle accessory driver started event.
Run inside the Home Assistant event loop.
"""
if self.linked_obstruction_sensor:
self._subscriptions.append(
async_track_state_change_event(
self.hass,
[self.linked_obstruction_sensor],
self._async_update_obstruction_event,
)
)
await super().run()
@callback
def _async_update_obstruction_event(self, event):
"""Handle state change event listener callback."""
self._async_update_obstruction_state(event.data.get("new_state"))
@callback
def _async_update_obstruction_state(self, new_state):
"""Handle linked obstruction sensor state change to update HomeKit value."""
if not new_state:
return
detected = new_state.state == STATE_ON
if self.char_obstruction_detected.value == detected:
return
self.char_obstruction_detected.set_value(detected)
_LOGGER.debug(
"%s: Set linked obstruction %s sensor to %d",
self.entity_id,
self.linked_obstruction_sensor,
detected,
)
def set_state(self, value):
"""Change garage state if call came from HomeKit."""
_LOGGER.debug("%s: Set state to %d", self.entity_id, value)
params = {ATTR_ENTITY_ID: self.entity_id}
if value == HK_DOOR_OPEN:
if self.char_current_state.value != value:
self.char_current_state.set_value(HK_DOOR_OPENING)
self.async_call_service(DOMAIN, SERVICE_OPEN_COVER, params)
elif value == HK_DOOR_CLOSED:
if self.char_current_state.value != value:
self.char_current_state.set_value(HK_DOOR_CLOSING)
self.async_call_service(DOMAIN, SERVICE_CLOSE_COVER, params)
@callback
def async_update_state(self, new_state):
"""Update cover state after state changed."""
hass_state = new_state.state
target_door_state = DOOR_TARGET_HASS_TO_HK.get(hass_state)
current_door_state = DOOR_CURRENT_HASS_TO_HK.get(hass_state)
if ATTR_OBSTRUCTION_DETECTED in new_state.attributes:
obstruction_detected = (
new_state.attributes[ATTR_OBSTRUCTION_DETECTED] is True
)
self.char_obstruction_detected.set_value(obstruction_detected)
if target_door_state is not None:
self.char_target_state.set_value(target_door_state)
if current_door_state is not None:
self.char_current_state.set_value(current_door_state)
class OpeningDeviceBase(HomeAccessory):
"""Generate a base Window accessory for a cover entity.
This class is used for WindowCoveringBasic and
WindowCovering
"""
def __init__(self, *args, category, service):
"""Initialize a OpeningDeviceBase accessory object."""
super().__init__(*args, category=category)
state = self.hass.states.get(self.entity_id)
self.features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
self._supports_stop = self.features & SUPPORT_STOP
self.chars = []
if self._supports_stop:
self.chars.append(CHAR_HOLD_POSITION)
self._supports_tilt = self.features & SUPPORT_SET_TILT_POSITION
if self._supports_tilt:
self.chars.extend([CHAR_TARGET_TILT_ANGLE, CHAR_CURRENT_TILT_ANGLE])
self.serv_cover = self.add_preload_service(service, self.chars)
if self._supports_stop:
self.char_hold_position = self.serv_cover.configure_char(
CHAR_HOLD_POSITION, setter_callback=self.set_stop
)
if self._supports_tilt:
self.char_target_tilt = self.serv_cover.configure_char(
CHAR_TARGET_TILT_ANGLE, setter_callback=self.set_tilt
)
self.char_current_tilt = self.serv_cover.configure_char(
CHAR_CURRENT_TILT_ANGLE, value=0
)
def set_stop(self, value):
"""Stop the cover motion from HomeKit."""
if value != 1:
return
self.async_call_service(
DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: self.entity_id}
)
def set_tilt(self, value):
"""Set tilt to value if call came from HomeKit."""
_LOGGER.info("%s: Set tilt to %d", self.entity_id, value)
# HomeKit sends values between -90 and 90.
# We'll have to normalize to [0,100]
value = round((value + 90) / 180.0 * 100.0)
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_TILT_POSITION: value}
self.async_call_service(DOMAIN, SERVICE_SET_COVER_TILT_POSITION, params, value)
@callback
def async_update_state(self, new_state):
"""Update cover position and tilt after state changed."""
# update tilt
current_tilt = new_state.attributes.get(ATTR_CURRENT_TILT_POSITION)
if isinstance(current_tilt, (float, int)):
# HomeKit sends values between -90 and 90.
# We'll have to normalize to [0,100]
current_tilt = (current_tilt / 100.0 * 180.0) - 90.0
current_tilt = int(current_tilt)
self.char_current_tilt.set_value(current_tilt)
self.char_target_tilt.set_value(current_tilt)
class OpeningDevice(OpeningDeviceBase, HomeAccessory):
"""Generate a Window/WindowOpening accessory for a cover entity.
The cover entity must support: set_cover_position.
"""
def __init__(self, *args, category, service):
"""Initialize a WindowCovering accessory object."""
super().__init__(*args, category=category, service=service)
state = self.hass.states.get(self.entity_id)
self.char_current_position = self.serv_cover.configure_char(
CHAR_CURRENT_POSITION, value=0
)
target_args = {"value": 0}
if self.features & SUPPORT_SET_POSITION:
target_args["setter_callback"] = self.move_cover
else:
# If its tilt only we lock the position state to 0 (closed)
# since CHAR_CURRENT_POSITION/CHAR_TARGET_POSITION are required
# by homekit, but really don't exist.
_LOGGER.debug(
"%s does not support setting position, current position will be locked to closed",
self.entity_id,
)
target_args["properties"] = {PROP_MIN_VALUE: 0, PROP_MAX_VALUE: 0}
self.char_target_position = self.serv_cover.configure_char(
CHAR_TARGET_POSITION, **target_args
)
self.char_position_state = self.serv_cover.configure_char(
CHAR_POSITION_STATE, value=HK_POSITION_STOPPED
)
self.async_update_state(state)
def move_cover(self, value):
"""Move cover to value if call came from HomeKit."""
_LOGGER.debug("%s: Set position to %d", self.entity_id, value)
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_POSITION: value}
self.async_call_service(DOMAIN, SERVICE_SET_COVER_POSITION, params, value)
@callback
def async_update_state(self, new_state):
"""Update cover position and tilt after state changed."""
current_position = new_state.attributes.get(ATTR_CURRENT_POSITION)
if isinstance(current_position, (float, int)):
current_position = int(current_position)
self.char_current_position.set_value(current_position)
self.char_target_position.set_value(current_position)
position_state = _hass_state_to_position_start(new_state.state)
self.char_position_state.set_value(position_state)
super().async_update_state(new_state)
@TYPES.register("Window")
class Window(OpeningDevice):
"""Generate a Window accessory for a cover entity with DEVICE_CLASS_WINDOW.
The entity must support: set_cover_position.
"""
def __init__(self, *args):
"""Initialize a Window accessory object."""
super().__init__(*args, category=CATEGORY_WINDOW, service=SERV_WINDOW)
@TYPES.register("WindowCovering")
class WindowCovering(OpeningDevice):
"""Generate a WindowCovering accessory for a cover entity.
The entity must support: set_cover_position.
"""
def __init__(self, *args):
"""Initialize a WindowCovering accessory object."""
super().__init__(
*args, category=CATEGORY_WINDOW_COVERING, service=SERV_WINDOW_COVERING
)
@TYPES.register("WindowCoveringBasic")
class WindowCoveringBasic(OpeningDeviceBase, HomeAccessory):
"""Generate a Window accessory for a cover entity.
The cover entity must support: open_cover, close_cover,
stop_cover (optional).
"""
def __init__(self, *args):
"""Initialize a WindowCoveringBasic accessory object."""
super().__init__(
*args, category=CATEGORY_WINDOW_COVERING, service=SERV_WINDOW_COVERING
)
state = self.hass.states.get(self.entity_id)
self.char_current_position = self.serv_cover.configure_char(
CHAR_CURRENT_POSITION, value=0
)
self.char_target_position = self.serv_cover.configure_char(
CHAR_TARGET_POSITION, value=0, setter_callback=self.move_cover
)
self.char_position_state = self.serv_cover.configure_char(
CHAR_POSITION_STATE, value=HK_POSITION_STOPPED
)
self.async_update_state(state)
def move_cover(self, value):
"""Move cover to value if call came from HomeKit."""
_LOGGER.debug("%s: Set position to %d", self.entity_id, value)
if (
self._supports_stop
and value > 70
or not self._supports_stop
and value >= 50
):
service, position = (SERVICE_OPEN_COVER, 100)
elif value < 30 or not self._supports_stop:
service, position = (SERVICE_CLOSE_COVER, 0)
else:
service, position = (SERVICE_STOP_COVER, 50)
params = {ATTR_ENTITY_ID: self.entity_id}
self.async_call_service(DOMAIN, service, params)
# Snap the current/target position to the expected final position.
self.char_current_position.set_value(position)
self.char_target_position.set_value(position)
@callback
def async_update_state(self, new_state):
"""Update cover position after state changed."""
position_mapping = {STATE_OPEN: 100, STATE_CLOSED: 0}
hk_position = position_mapping.get(new_state.state)
if hk_position is not None:
if self.char_current_position.value != hk_position:
self.char_current_position.set_value(hk_position)
if self.char_target_position.value != hk_position:
self.char_target_position.set_value(hk_position)
position_state = _hass_state_to_position_start(new_state.state)
if self.char_position_state.value != position_state:
self.char_position_state.set_value(position_state)
super().async_update_state(new_state)
def _hass_state_to_position_start(state):
"""Convert hass state to homekit position state."""
if state == STATE_OPENING:
return HK_POSITION_GOING_TO_MAX
if state == STATE_CLOSING:
return HK_POSITION_GOING_TO_MIN
return HK_POSITION_STOPPED
|
|
import json
import time
import sys
import copy
from multiprocessing import Pool, cpu_count
import sqlite3
from collections import OrderedDict
from unidecode import unidecode
print "Working with " + str(cpu_count()) + " processors"
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def getComments_worker(ids):
conn = sqlite3.connect("ask.db")
c = conn.cursor()
comments = []
if len(ids[0])>1:
cmd = """select * from comments INDEXED BY idx1 where link_id in (%(comment_id)s) order by ups desc limit 100""" % {'comment_id':"'" + "','".join(ids) + "'"}
else:
cmd = """select * from comments INDEXED BY idx1 where link_id=='%(comment_id)s' order by ups desc limit 100""" % {'comment_id':ids}
print cmd
c.execute(cmd)
for row in c.fetchall():
comment = {}
comment['rowid'] = row[0]
comment['link_id'] = row[1]
comment['parent_id'] = row[2]
comment['subreddit'] = row[3]
comment['ups'] = row[4]
comment['downs'] = row[5]
cmd2 = """select * from com where rowid = %(row_id)s""" % {'row_id':row[0]}
c.execute(cmd2)
for row2 in c.fetchone():
comment['body'] = row2
if "[deleted]" != comment['body']:
comments.append(comment)
conn.close()
return comments
def search_normal(search_text):
conn = sqlite3.connect("ask.db")
start = time.time()
datas = {}
c = conn.cursor()
cmd = """select rowid,* from sub where title match '%(search_string)s'""" % {'search_string':search_text}
print cmd
c.execute(cmd)
for row in c.fetchall():
datas[row[0]] = {}
datas[row[0]]['title'] = row[1]
datas[row[0]]['selftext'] = row[2]
print "Matched submissions from title in ",
print time.time()-start
start = time.time()
cmd = """select rowid,* from sub where selftext match '%(search_string)s'""" % {'search_string':search_text}
print cmd
c.execute(cmd)
for row in c.fetchall():
datas[row[0]] = {}
datas[row[0]]['title'] = row[1]
datas[row[0]]['selftext'] = row[2]
print "Matched submissions from selftext in ",
print time.time()-start
start = time.time()
rowids = []
for rowid in datas:
rowids.append(str(rowid))
cmd = """select * from submissions where rowid in (%(rowids)s) order by ups desc limit 100""" % {'rowids':",".join(rowids)}
c.execute(cmd)
for row in c.fetchall():
rowid = int(row[0])
datas[rowid]['id'] = row[1]
datas[rowid]['subreddit'] = row[2]
datas[rowid]['created_utc'] = row[3]
datas[rowid]['ups'] = row[4]
datas[rowid]['downs'] = row[5]
datas[rowid]['url'] = row[6]
datas[rowid]['comments'] = []
print "Got content from submissions in ",
print time.time()-start
start = time.time()
reformated = OrderedDict()
for rowid in datas:
if 'id' in datas[rowid]:
reformated[datas[rowid]['id']] = datas[rowid]
datas = copy.deepcopy(reformated)
ids = []
for id_str in datas:
ids.append(str(id_str))
print "Got " + str(len(ids)) + " submissions to get comments from..."
N = cpu_count()
ids_partitions = ids #chunks(ids,int(len(ids)/N))
p = Pool(N)
for comments in p.map(getComments_worker, ids_partitions):
for comment in comments:
datas[comment['link_id']]['comments'].append(comment)
p.terminate()
print "Got relevant comments in ",
print time.time()-start
start = time.time()
print (len(datas))
datas2 = copy.deepcopy(datas)
for data in datas:
if len(datas[data]['comments'])==0:
datas2.pop(data)
print "Pruned comments in ",
print time.time()-start
start = time.time()
print (len(datas2))
#print json.dumps(datas,indent=4)
conn.close()
return datas2
def inverse_search(search_text):
conn = sqlite3.connect("ask.db")
start = time.time()
c = conn.cursor()
cmd = """select rowid,* from com where body match '%(search_string)s'""" % {'search_string':search_text}
print cmd
rowids = []
c.execute(cmd)
for row in c.fetchall():
rowids.append(str(row[0]))
rowids = list(set(rowids))
print "Matched comments from body in ",
print time.time()-start
start = time.time()
ids = []
cmd = """select link_id from comments where rowid in (%(rowids)s)""" % {'rowids':",".join(rowids)}
for row in c.execute(cmd):
ids.append(row[0])
print "Got link_ids from comments in ",
print time.time()-start
start = time.time()
cmd = """select * from submissions INDEXED BY idx2 WHERE id in (%(ids)s) and ups > 10 order by ups desc limit 160""" % {'ids':"'" + "','".join(ids) + "'"}
datas = OrderedDict()
newids = []
c.execute(cmd)
for row in c.fetchall():
str_id = row[1]
datas[str_id] = OrderedDict()
cmd2 = """select * from sub where rowid = %(row_id)s""" % {'row_id':row[0]}
c.execute(cmd2)
for row2 in c.fetchall():
datas[str_id]['title'] = row2[0]
datas[str_id]['selftext'] = row2[1]
newids.append(str_id)
datas[str_id]['subreddit'] = row[2]
datas[str_id]['created_utc'] = row[3]
datas[str_id]['ups'] = row[4]
datas[str_id]['downs'] = row[5]
datas[str_id]['url'] = row[6]
datas[str_id]['comments'] = []
print "Got info from submissions in ",
print time.time()-start
start = time.time()
ids = newids
N = cpu_count()
ids_partitions = ids#chunks(ids,2)
p = Pool(N)
for comments in p.map(getComments_worker, ids_partitions):
for comment in comments:
datas[comment['link_id']]['comments'].append(comment)
p.terminate()
print "Got relevant comments in ",
print time.time()-start
start = time.time()
print (len(datas))
datas2 = copy.deepcopy(datas)
for data in datas:
found = False
for i in range(len(datas[data]['comments'])):
if search_text in datas[data]['comments'][i]['body']:
found = True
if not found or len(datas[data]['comments'])==0:
print found
print len(datas[data]['comments'])
datas2.pop(data)
print "Pruned comments in ",
print time.time()-start
start = time.time()
print (len(datas2))
conn.close()
return datas2
#search_normal('darwin')
#inverse_search('darwin')
|
|
from sympy.core.evalf import PrecisionExhausted, complex_accuracy
from sympy import pi, I, Symbol, Add, Rational, exp, sqrt, sin, cos, \
fibonacci, Integral, oo, E, atan, log, integrate, floor, ceiling, \
factorial, binomial, Sum, zeta, Catalan, Pow, GoldenRatio, sympify, \
sstr, Function, Mul, Pow, Derivative
from sympy.mpmath.libmp.libmpf import from_float
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
n = Symbol('n')
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_evalf_helpers():
assert complex_accuracy((from_float(2.0),None,35,None)) == 35
assert complex_accuracy((from_float(2.0),from_float(10.0),35,100)) == 37
assert complex_accuracy((from_float(2.0),from_float(1000.0),35,100)) == 43
assert complex_accuracy((from_float(2.0),from_float(10.0),100,35)) == 35
assert complex_accuracy((from_float(2.0),from_float(1000.0),100,35)) == 35
def test_evalf_basic():
assert NS('pi',15) == '3.14159265358979'
assert NS('2/3',10) == '0.6666666667'
assert NS('355/113-pi',6) == '2.66764e-7'
assert NS('16*atan(1/5)-4*atan(1/239)', 15) == '3.14159265358979'
def test_cancellation():
assert NS(Add(pi,Rational(1,10**1000),-pi,evaluate=False),15,maxn=1200) == '1.00000000000000e-1000'
def test_evalf_powers():
assert NS('pi**(10**20)',10) == '1.339148777e+49714987269413385435'
assert NS(pi**(10**100),10) == ('4.946362032e+4971498726941338543512682882'
'9089887365167832438044244613405349992494711208'
'95526746555473864642912223')
assert NS('2**(1/10**50)',15) == '1.00000000000000'
assert NS('2**(1/10**50)-1',15) == '6.93147180559945e-51'
# Evaluation of Rump's ill-conditioned polynomial
def test_evalf_rump():
a = 1335*y**6/4+x**2*(11*x**2*y**2-y**6-121*y**4-2)+11*y**8/2+x/(2*y)
assert NS(a, 15, subs={x:77617, y:33096}) == '-0.827396059946821'
def test_evalf_complex():
assert NS('2*sqrt(pi)*I',10) == '3.544907702*I'
assert NS('3+3*I',15) == '3.00000000000000 + 3.00000000000000*I'
assert NS('E+pi*I',15) == '2.71828182845905 + 3.14159265358979*I'
assert NS('pi * (3+4*I)',15) == '9.42477796076938 + 12.5663706143592*I'
assert NS('I*(2+I)',15) == '-1.00000000000000 + 2.00000000000000*I'
#assert NS('(pi+E*I)*(E+pi*I)',15) in ('.0e-15 + 17.25866050002*I', '.0e-17 + 17.25866050002*I', '-.0e-17 + 17.25866050002*I')
assert NS('(pi+E*I)*(E+pi*I)',15,chop=True) == '17.2586605000200*I'
def test_evalf_complex_powers():
assert NS('(E+pi*I)**100000000000000000') == \
'-3.58896782867793e+61850354284995199 + 4.58581754997159e+61850354284995199*I'
# XXX: rewrite if a+a*I simplification introduced in sympy
#assert NS('(pi + pi*I)**2') in ('.0e-15 + 19.7392088021787*I', '.0e-16 + 19.7392088021787*I')
assert NS('(pi + pi*I)**2', chop=True) == '19.7392088021787*I'
assert NS('(pi + 1/10**8 + pi*I)**2') == '6.2831853e-8 + 19.7392088650106*I'
assert NS('(pi + 1/10**12 + pi*I)**2') == '6.283e-12 + 19.7392088021850*I'
#assert NS('(pi + pi*I)**4') == '-389.63636413601 + .0e-14*I'
assert NS('(pi + pi*I)**4', chop=True) == '-389.636364136010'
assert NS('(pi + 1/10**8 + pi*I)**4') == '-389.636366616512 + 2.4805021e-6*I'
assert NS('(pi + 1/10**12 + pi*I)**4') == '-389.636364136258 + 2.481e-10*I'
assert NS('(10000*pi + 10000*pi*I)**4', chop=True) == '-3.89636364136010e+18'
def test_evalf_exponentiation():
assert NS(sqrt(-pi)) == '1.77245385090552*I'
assert NS(Pow(pi*I, Rational(1,2), evaluate=False)) == '1.25331413731550 + 1.25331413731550*I'
assert NS(pi**I) == '0.413292116101594 + 0.910598499212615*I'
assert NS(pi**(E+I/3)) == '20.8438653991931 + 8.36343473930031*I'
assert NS((pi+I/3)**(E+I/3)) == '17.2442906093590 + 13.6839376767037*I'
assert NS(exp(pi)) == '23.1406926327793'
assert NS(exp(pi+E*I)) == '-21.0981542849657 + 9.50576358282422*I'
assert NS(pi**pi) == '36.4621596072079'
assert NS((-pi)**pi) == '-32.9138577418939 - 15.6897116534332*I'
assert NS((-pi)**(-pi)) == '-0.0247567717232697 + 0.0118013091280262*I'
# An example from Smith, "Multiple Precision Complex Arithmetic and Functions"
def test_evalf_complex_cancellation():
A = Rational('63287/100000')
B = Rational('52498/100000')
C = Rational('69301/100000')
D = Rational('83542/100000')
F = Rational('2231321613/2500000000')
# XXX: the number of returned mantissa digits in the real part could
# change with the implementation. What matters is that the returned digits are
# correct.
assert NS((A+B*I)*(C+D*I),6) == '6.44862e-6 + 0.892529*I'
assert NS((A+B*I)*(C+D*I),10) == '6.447099821e-6 + 0.8925286452*I'
assert NS((A+B*I)*(C+D*I) - F*I, 5) in ('6.4471e-6 - .0e-15*I', '6.4471e-6 + .0e-15*I')
def test_evalf_logs():
assert NS("log(3+pi*I)", 15) == '1.46877619736226 + 0.808448792630022*I'
assert NS("log(pi*I)", 15) == '1.14472988584940 + 1.57079632679490*I'
def test_evalf_trig():
assert NS('sin(1)',15) == '0.841470984807897'
assert NS('cos(1)',15) == '0.540302305868140'
assert NS('sin(10**-6)',15) == '9.99999999999833e-7'
assert NS('cos(10**-6)',15) == '0.999999999999500'
assert NS('sin(E*10**100)',15) == '0.409160531722613'
# Some input near roots
assert NS(sin(exp(pi*sqrt(163))*pi), 15) == '-2.35596641936785e-12'
assert NS(sin(pi*10**100 + Rational(7,10**5), evaluate=False), 15, maxn=120) == \
'6.99999999428333e-5'
assert NS(sin(Rational(7,10**5), evaluate=False), 15) == \
'6.99999999428333e-5'
# Check detection of various false identities
def test_evalf_near_integers():
# Binet's formula
f = lambda n: ((1+sqrt(5))**n)/(2**n * sqrt(5))
assert NS(f(5000) - fibonacci(5000), 10, maxn=1500) == '5.156009964e-1046'
# Some near-integer identities from
# http://mathworld.wolfram.com/AlmostInteger.html
assert NS('sin(2017*2**(1/5))',15) == '-1.00000000000000'
assert NS('sin(2017*2**(1/5))',20) == '-0.99999999999999997857'
assert NS('1+sin(2017*2**(1/5))',15) == '2.14322287389390e-17'
assert NS('45 - 613*E/37 + 35/991', 15) == '6.03764498766326e-11'
def test_evalf_ramanujan():
assert NS(exp(pi*sqrt(163)) - 640320**3 - 744, 10) == '-7.499274028e-13'
# A related identity
A = 262537412640768744*exp(-pi*sqrt(163))
B = 196884*exp(-2*pi*sqrt(163))
C = 103378831900730205293632*exp(-3*pi*sqrt(163))
assert NS(1-A-B+C,10) == '1.613679005e-59'
# Input that for various reasons have failed at some point
def test_evalf_bugs():
assert NS(sin(1)+exp(-10**10),10) == NS(sin(1),10)
assert NS(exp(10**10)+sin(1),10) == NS(exp(10**10),10)
assert NS('log(1+1/10**50)',20) == '1.0000000000000000000e-50'
assert NS('log(10**100,10)',10) == '100.0000000'
assert NS('log(2)',10) == '0.6931471806'
assert NS('(sin(x)-x)/x**3', 15, subs={x:'1/10**50'}) == '-0.166666666666667'
assert NS(sin(1)+Rational(1,10**100)*I,15) == '0.841470984807897 + 1.00000000000000e-100*I'
assert x.evalf() == x
assert NS((1+I)**2*I,6) == '-2.00000 + 2.32831e-10*I'
d={n: (-1)**Rational(6,7), y: (-1)**Rational(4,7), x: (-1)**Rational(2,7)}
assert NS((x*(1+y*(1 + n))).subs(d).evalf(),6) == '0.346011 + 0.433884*I'
assert NS(((-I-sqrt(2)*I)**2).evalf()) == '-5.82842712474619'
assert NS((1+I)**2*I,15) == '-2.00000000000000 + 2.16840434497101e-19*I'
#1659 (1/2):
assert NS(pi.evalf(69) - pi) == '-4.43863937855894e-71'
#1659 (2/2): With the bug present, this still only fails if the
# terms are in the order given here. This is not generally the case,
# because the order depends on the hashes of the terms.
assert NS(20 - 5008329267844*n**25 - 477638700*n**37 - 19*n,
subs={n:.01}) == '19.8100000000000'
def test_evalf_integer_parts():
a = floor(log(8)/log(2) - exp(-1000), evaluate=False)
b = floor(log(8)/log(2), evaluate=False)
raises(PrecisionExhausted, "a.evalf()")
assert a.evalf(chop=True) == 3
assert a.evalf(maxn=500) == 2
raises(PrecisionExhausted, "b.evalf()")
raises(PrecisionExhausted, "b.evalf(maxn=500)")
assert b.evalf(chop=True) == 3
assert int(floor(factorial(50)/E,evaluate=False).evalf()) == \
11188719610782480504630258070757734324011354208865721592720336800L
assert int(ceiling(factorial(50)/E,evaluate=False).evalf()) == \
11188719610782480504630258070757734324011354208865721592720336801L
assert int(floor((GoldenRatio**999 / sqrt(5) + Rational(1,2))).evalf(1000)) == fibonacci(999)
assert int(floor((GoldenRatio**1000 / sqrt(5) + Rational(1,2))).evalf(1000)) == fibonacci(1000)
def test_evalf_trig_zero_detection():
a = sin(160*pi, evaluate=False)
t = a.evalf(maxn=100)
assert abs(t) < 1e-100
assert t._prec < 2
assert a.evalf(chop=True) == 0
raises(PrecisionExhausted, "a.evalf(strict=True)")
def test_evalf_divergent_series():
n = Symbol('n', integer=True)
raises(ValueError, 'Sum(1/n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum(n/(n**2+1), (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((-1)**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((-1)**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum(n**2, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum(2**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((-2)**n, (n, 1, oo)).evalf()')
def test_evalf_py_methods():
assert abs(float(pi+1) - 4.1415926535897932) < 1e-10
assert abs(complex(pi+1) - 4.1415926535897932) < 1e-10
assert abs(complex(pi+E*I) - (3.1415926535897931+2.7182818284590451j)) < 1e-10
raises(ValueError, "float(pi+x)")
raises(ValueError, "complex(pi+x)")
def test_evalf_power_subs_bugs():
assert (x**2).evalf(subs={x:0}) == 0
assert sqrt(x).evalf(subs={x:0}) == 0
assert (x**Rational(2,3)).evalf(subs={x:0}) == 0
assert (x**x).evalf(subs={x:0}) == 1
assert (3**x).evalf(subs={x:0}) == 1
assert exp(x).evalf(subs={x:0}) == 1
assert ((2+I)**x).evalf(subs={x:0}) == 1
assert (0**x).evalf(subs={x:0}) == 1
def test_evalf_arguments():
raises(TypeError, 'pi.evalf(method="garbage")')
def test_implemented_function_evalf():
from sympy.utilities.lambdify import implemented_function
f = Function('f')
x = Symbol('x')
f = implemented_function(f, lambda x: x + 1)
assert str(f(x)) == "f(x)"
assert str(f(2)) == "f(2)"
assert f(2).evalf() == 3
assert f(x).evalf() == f(x)
def test_evaluate_false():
for no in [[], 0, False, None]:
assert Add(3, 2, evaluate=no).is_Add
assert Mul(3, 2, evaluate=no).is_Mul
assert Pow(3, 2, evaluate=no).is_Pow
assert Pow(y, 2, evaluate=True) - Pow(y, 2, evaluate=True) == 0
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._lua_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
MODULES = {'basic': ('_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getmetatable',
'ipairs',
'load',
'loadfile',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawlen',
'rawset',
'select',
'setmetatable',
'tonumber',
'tostring',
'type',
'xpcall'),
'bit32': ('bit32.arshift',
'bit32.band',
'bit32.bnot',
'bit32.bor',
'bit32.btest',
'bit32.bxor',
'bit32.extract',
'bit32.lrotate',
'bit32.lshift',
'bit32.replace',
'bit32.rrotate',
'bit32.rshift'),
'coroutine': ('coroutine.create',
'coroutine.isyieldable',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'),
'debug': ('debug.debug',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.getuservalue',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.setuservalue',
'debug.traceback',
'debug.upvalueid',
'debug.upvaluejoin'),
'io': ('io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.stderr',
'io.stdin',
'io.stdout',
'io.tmpfile',
'io.type',
'io.write'),
'math': ('math.abs',
'math.acos',
'math.asin',
'math.atan',
'math.atan2',
'math.ceil',
'math.cos',
'math.cosh',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log',
'math.max',
'math.maxinteger',
'math.min',
'math.mininteger',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sin',
'math.sinh',
'math.sqrt',
'math.tan',
'math.tanh',
'math.tointeger',
'math.type',
'math.ult'),
'modules': ('package.config',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.searchers',
'package.searchpath',
'require'),
'os': ('os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'),
'string': ('string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.pack',
'string.packsize',
'string.rep',
'string.reverse',
'string.sub',
'string.unpack',
'string.upper'),
'table': ('table.concat',
'table.insert',
'table.move',
'table.pack',
'table.remove',
'table.sort',
'table.unpack'),
'utf8': ('utf8.char',
'utf8.charpattern',
'utf8.codepoint',
'utf8.codes',
'utf8.len',
'utf8.offset')}
if __name__ == '__main__': # pragma: no cover
import re
import sys
# urllib ends up wanting to import a module called 'math' -- if
# pygments/lexers is in the path, this ends badly.
for i in range(len(sys.path)-1, -1, -1):
if sys.path[i].endswith('/lexers'):
del sys.path[i]
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">(Lua )?\1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(?!lua|LUA)([^:]+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().items():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
with open(filename) as fp:
content = fp.read()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(filename, 'w') as fp:
fp.write(header)
fp.write('MODULES = %s\n\n' % pprint.pformat(modules))
fp.write(footer)
def run():
version = get_newest_version()
functions = set()
for v in ('5.2', version):
print('> Downloading function index for Lua %s' % v)
f = get_lua_functions(v)
print('> %d functions found, %d new:' %
(len(f), len(set(f) - functions)))
functions |= set(f)
functions = sorted(functions)
modules = {}
for full_function_name in functions:
print('>> %s' % full_function_name)
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
modules = {k: tuple(v) for k, v in modules.iteritems()}
regenerate(__file__, modules)
run()
|
|
# Copyright 2019 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This test suite attempts to test the things that a real BIGIP device is
required for that unit testing cannot test. For example the unit tests can
cover the case in which the beginning of the URL is correct up to the
collection object i.e. https://192.168.1.1/mgmt/tm/ It cannot test that
the collection objects that are after that are correct
i.e https://192.168.1.1/mgmt/tm/boguscollection
'''
from distutils.version import LooseVersion
from icontrol.authtoken import iControlRESTTokenAuth
from icontrol.session import iControlRESTSession
from requests.exceptions import HTTPError
from requests.exceptions import SSLError
import os
import pytest
import time
@pytest.fixture
def modules():
result = [
'am', 'afm', 'apm', 'asm', 'avr', 'fps', 'gtm', 'ilx',
'lc', 'ltm', 'pem', 'sam', 'swg', 'vcmp'
]
return result
@pytest.fixture(autouse=True)
def skip_module_missing(request, modules, opt_bigip, opt_username, opt_password, opt_port):
if request.node.get_marker('skip_module_missing'):
marker = request.node.get_marker('skip_module_missing').args[0]
if marker in modules:
try:
from f5.bigip import ManagementRoot
except ImportError:
pytest.skip('Skipping test because I cannot determine if "{0}" is not provisioned'.format(marker))
mgmt = ManagementRoot(opt_bigip, opt_username, opt_password, port=opt_port, token=True)
provision = mgmt.tm.sys.provision
resource = getattr(provision, marker)
resource = resource.load()
result = resource.attrs
if str(result['level']) == 'none':
pytest.skip('Skipping test because "{0}" is not provisioned'.format(marker))
nat_data = {
'name': 'foo',
'partition': 'Common',
'originatingAddress': '192.168.1.1',
'translationAddress': '192.168.2.1',
}
topology_data = {
'name': 'ldns: subnet 192.168.110.0/24 server: subnet 192.168.100.0/24'
}
iapp_templ_data = {
"name": "test_templ",
"partition": "Common",
"actions": {
"definition":
{
"implementation": '''tmsh::create {
ltm pool /Common/test_serv.app/test_pool
load-balancing-mode least-connections-node
members replace-all-with {128.0.0.2:8080{address 128.0.0.2}}
}''',
"presentation": ""
}
}
}
iapp_serv_data = {
"name": "test_serv",
"partition": "Common",
"template": "/Common/test_templ"
}
iapp_templ_data_subpath_v11 = {
"name": "test_templ_subpath",
"partition": "Common",
"actions": {
"definition":
{
"implementation": '''tmsh::create { net vlan v102 }
tmsh::create { net self self.v102 address 192.168.1.5/24 vlan v102 }
tmsh::create { gtm datacenter dc1 }
tmsh::create { auth partition part1 }
tmsh::cd { /part1 }
tmsh::create { ltm virtual v1 destination 192.168.1.100:80 }
tmsh::cd { /Common }
tmsh::create { gtm server ltm11 addresses add { 192.168.1.5 } datacenter dc1
virtual-servers replace-all-with { /part1/v1 { destination 192.168.1.100:80 } } }
tmsh::cd { /part1 }
tmsh::create { gtm pool p1 members replace-all-with { /Common/ltm11:/part1/v1 } }''',
"presentation": ""
}
}
}
iapp_serv_data_subpath = {
"name": "test_serv_subpath",
"partition": "Common",
"template": "/Common/test_templ_subpath"
}
@pytest.fixture
def setup_subpath(request, ICR, BASE_URL):
app_templ_url = BASE_URL + 'sys/application/template/'
app_serv_url = BASE_URL + 'sys/application/service/'
def teardown_iapp():
try:
ICR.delete(
app_serv_url, uri_as_parts=True,
name='test_serv', partition='Common',
subPath='test_serv.app')
except Exception:
pass
try:
ICR.delete(
app_templ_url, uri_as_parts=True,
name='test_templ', partition='Common')
except Exception:
pass
teardown_iapp()
ICR.post(app_templ_url, json=iapp_templ_data)
try:
ICR.post(app_serv_url, json=iapp_serv_data)
except HTTPError as ex:
# The creation of an iapp service does cause a 404 error in bigip
# versions up to but excluding 12.0
if ex.response.status_code == 404:
pass
request.addfinalizer(teardown_iapp)
return app_serv_url
@pytest.fixture
def setup_subpath_alt(request, ICR, BASE_URL):
app_templ_url = BASE_URL + 'sys/application/template/'
app_serv_url = BASE_URL + 'sys/application/service/'
def teardown_iapp():
try:
ICR.delete(
app_serv_url, uri_as_parts=True,
name='test_serv_subpath', partition='Common',
subPath='test_serv_subpath.app')
except Exception:
pass
try:
ICR.delete(
app_templ_url, uri_as_parts=True,
name='test_templ_subpath', partition='Common')
except Exception:
pass
teardown_iapp()
ICR.post(app_templ_url, json=iapp_templ_data_subpath_v11)
try:
ICR.post(app_serv_url, json=iapp_serv_data_subpath)
except HTTPError as ex:
# The creation of an iapp service does cause a 404 error in bigip
# versions up to but excluding 12.0
if ex.response.status_code == 404:
pass
request.addfinalizer(teardown_iapp)
return app_serv_url
def teardown_nat(request, icr, url, name, partition):
'''Remove the nat object that we create during a test '''
def teardown():
icr.delete(url, uri_as_parts=True, name=name, partition=partition)
request.addfinalizer(teardown)
def teardown_topology(request, icr, url, name):
"""Remove the topology object that we create during a test."""
def teardown():
icr.delete(url, uri_as_parts=True, transform_name=True, name=name)
request.addfinalizer(teardown)
def invalid_url(func, url):
'''Reusable test to make sure that we get 404 for invalid URL '''
with pytest.raises(HTTPError) as err:
func(url)
return (err.value.response.status_code == 404 and
'Unexpected Error: Not Found for uri: ' + url
in str(err.value))
def invalid_credentials(user, password, url):
'''Reusable test to make sure that we get 401 for invalid creds '''
icr = iControlRESTSession(user, password)
with pytest.raises(HTTPError) as err:
icr.get(url)
return (err.value.response.status_code == 401 and
'401 Client Error: F5 Authorization Required' in str(err.value))
def invalid_token_credentials(user, password, url):
'''Reusable test to make sure that we get 401 for invalid token creds '''
icr = iControlRESTSession(user, password, token=True)
with pytest.raises(HTTPError) as err:
icr.get(url)
return (err.value.response.status_code == 401 and
'Authentication required!' in str(err.value))
def test_get_with_subpath(setup_subpath, ICR, BASE_URL):
# The iapp creates a pool. We should be able to get that pool with subPath
app_serv_url = setup_subpath
res = ICR.get(
app_serv_url, name='test_serv',
partition='Common', subPath='test_serv.app')
assert res.status_code == 200
pool_uri = BASE_URL + 'ltm/pool/'
pool_res = ICR.get(
pool_uri, name='test_pool',
partition='Common', subPath='test_serv.app')
assert pool_res.status_code == 200
data = pool_res.json()
assert data['items'][0]['subPath'] == 'test_serv.app'
assert data['items'][0]['name'] == 'test_pool'
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) >= LooseVersion(
'12.0.0'),
reason='No GTM Pool type, introduced in 12.0+'
)
def test_get_with_subpath_transform(setup_subpath_alt, ICR, BASE_URL):
app_serv_url = setup_subpath_alt
res = ICR.get(
app_serv_url, name='test_serv_subpath',
partition='Common', subPath='test_serv_subpath.app')
assert res.status_code == 200
pool_uri = BASE_URL + 'gtm/pool/~part1~p1/members/'
poolmem_res = ICR.get(pool_uri, name='v1', partition='Common', subPath='ltm11:/part1')
assert poolmem_res.status_code == 200
data = poolmem_res.json()
assert data['items'][0]['name'] == 'v1'
assert data['items'][0]['subPath'] == 'ltm11:/part1'
def test_get(ICR, GET_URL):
'''Test a GET request to a valid url
Pass: Returns a 200 with proper json
'''
response = ICR.get(GET_URL)
assert response.status_code == 200
assert response.json()
def test_get_invalid_url(ICR, FAKE_URL):
'''Test a GET to an invalid URL.
Pass: Returns a 404 with a proper message
'''
assert invalid_url(ICR.get, FAKE_URL)
def test_post(request, ICR, POST_URL):
'''Test a POST request to a valid url
Pass: Returns a 200 and the json object is set correctly
'''
teardown_nat(
request, ICR, POST_URL, nat_data['name'], nat_data['partition'])
response = ICR.post(POST_URL, json=nat_data)
response_data = response.json()
assert response.status_code == 200
assert(response_data['name'] == nat_data['name'])
assert(response_data['partition'] == nat_data['partition'])
assert(response_data['originatingAddress'] ==
nat_data['originatingAddress'])
assert(response_data['translationAddress'] ==
nat_data['translationAddress'])
def test_post_invalid_url(ICR, FAKE_URL):
'''Test a POST request to an invalid url.
Pass: Returns a 404 with a proper message
'''
assert invalid_url(ICR.post, FAKE_URL)
def test_put(request, ICR, POST_URL):
'''Test a PUT request to a valid url.
Pass: Returns a 200 and the json object is set correctly
'''
data = {'originatingAddress': '192.168.1.50'}
teardown_nat(
request, ICR, POST_URL, nat_data['name'], nat_data['partition'])
ICR.post(POST_URL, json=nat_data)
response = ICR.put(
POST_URL,
name=nat_data['name'],
partition=nat_data['partition'],
uri_as_parts=True,
json=data)
response_data = response.json()
assert response.status_code == 200
assert response_data['originatingAddress'] == data['originatingAddress']
assert response_data['name'] == nat_data['name']
assert response_data['partition'] == nat_data['partition']
assert response_data['translationAddress'] == \
nat_data['translationAddress']
def test_put_invalid_url(ICR, FAKE_URL):
'''Test a PUT request to an invalid url.
Pass: Return a 404 with a proper error message
'''
assert invalid_url(ICR.put, FAKE_URL)
def test_patch(request, ICR, POST_URL):
'''Test a PATCH request to a valid url.
Pass: Returns a 200 and the json object is set correctly
'''
data = {'originatingAddress': '192.168.1.50'}
teardown_nat(
request, ICR, POST_URL, nat_data['name'], nat_data['partition'])
ICR.post(POST_URL, json=nat_data)
response = ICR.patch(
POST_URL,
name=nat_data['name'],
partition=nat_data['partition'],
uri_as_parts=True,
json=data)
response_data = response.json()
assert response.status_code == 200
assert response_data['originatingAddress'] == data['originatingAddress']
assert response_data['name'] == nat_data['name']
assert response_data['partition'] == nat_data['partition']
assert response_data['translationAddress'] == \
nat_data['translationAddress']
def test_patch_invalid_url(ICR, FAKE_URL):
'''Test a PATCH request to an invalid url.
Pass: Return a 404 with a proper error message
'''
assert invalid_url(ICR.patch, FAKE_URL)
def test_delete(request, ICR, POST_URL):
'''Test a DELETE request to a valid url.
Pass: Return a 200 and the json is empty. Subsequent GET returns a 404
error because the object is no longer found.
'''
ICR.post(POST_URL, json=nat_data)
response = ICR.delete(
POST_URL,
name=nat_data['name'],
partition=nat_data['partition'],
uri_as_parts=True)
assert response.status_code == 200
with pytest.raises(ValueError):
response.json()
with pytest.raises(HTTPError) as err:
ICR.get(
POST_URL,
name=nat_data['name'],
partition=nat_data['partition'],
uri_as_parts=True)
assert err.value.response.status_code == 404
def test_delete_invalid_url(ICR, FAKE_URL):
'''Test a DELETE request to an invalid url.
Pass: Return a 404 with a proper error message
'''
assert invalid_url(ICR.delete, FAKE_URL)
def test_invalid_user(opt_password, GET_URL):
'''Test login with an invalid username and valid password
Pass: Returns 401 with authorization required message
'''
invalid_credentials('fakeuser', opt_password, GET_URL)
def test_invalid_password(opt_username, GET_URL):
'''Test login with a valid username and invalid password
Pass: Returns 401 with authorization required message
'''
invalid_credentials(opt_username, 'fakepassword', GET_URL)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) == LooseVersion(
'11.5.4'),
reason='Endpoint does not exist in 11.5.4'
)
def test_token_auth(opt_username, opt_password, GET_URL):
icr = iControlRESTSession(opt_username, opt_password, token=True)
response = icr.get(GET_URL)
assert response.status_code == 200
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) == LooseVersion(
'11.5.4'),
reason='Endpoint does not exist in 11.5.4'
)
def test_token_auth_twice(opt_username, opt_password, GET_URL):
icr = iControlRESTSession(opt_username, opt_password, token=True)
assert icr.session.auth.attempts == 0
response = icr.get(GET_URL)
assert response.status_code == 200
assert icr.session.auth.attempts == 1
response = icr.get(GET_URL)
assert response.status_code == 200
# This token should still be valid, so we should reuse it.
assert icr.session.auth.attempts == 1
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) == LooseVersion(
'11.5.4'),
reason='Endpoint does not exist in 11.5.4'
)
def test_token_auth_expired(opt_username, opt_password, GET_URL):
icr = iControlRESTSession(opt_username, opt_password, token=True)
assert icr.session.auth.attempts == 0
response = icr.get(GET_URL)
assert response.status_code == 200
assert icr.session.auth.attempts == 1
assert icr.session.auth.expiration >= time.time()
# Artificially expire the token
icr.session.auth.expiration = time.time() - 1.0
# Since token is expired, we should get a new one.
response = icr.get(GET_URL)
assert response.status_code == 200
assert icr.session.auth.attempts == 2
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) == LooseVersion(
'11.5.4'),
reason='Endpoint does not exist in 11.5.4'
)
def test_token_invalid_user(opt_password, GET_URL):
invalid_token_credentials('fakeuser', opt_password, GET_URL)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) == LooseVersion(
'11.5.4'),
reason='Endpoint does not exist in 11.5.4'
)
def test_token_invalid_password(opt_username, GET_URL):
invalid_token_credentials(opt_username, 'fakepassword', GET_URL)
# You must configure a user that has a non-admin role in a partition for
# test_nonadmin tests to be effective. For instance:
#
# auth user bob {
# description bob
# encrypted-password $6$LsSnHp7J$AIJ2IC8kS.YDrrn/sH6BsxQ...
# partition Common
# partition-access {
# bobspartition {
# role operator
# }
# }
# shell tmsh
# }
#
# Then instantiate with --nonadmin-username=bob --nonadmin-password=changeme
def test_nonadmin_token_auth(opt_nonadmin_username, opt_nonadmin_password,
GET_URL):
if not opt_nonadmin_username or not opt_nonadmin_password:
pytest.skip("No non-admin username/password configured")
icr = iControlRESTSession(opt_nonadmin_username,
opt_nonadmin_password,
token=True)
response = icr.get(GET_URL)
assert response.status_code == 200
def test_nonadmin_token_auth_invalid_password(opt_nonadmin_username,
GET_URL):
if not opt_nonadmin_username:
pytest.skip("No non-admin username/password configured")
invalid_token_credentials(opt_nonadmin_username,
'fakepassword',
GET_URL)
def test_nonadmin_token_auth_invalid_username(opt_nonadmin_password,
GET_URL):
if not opt_nonadmin_password:
pytest.skip("No non-admin username/password configured")
invalid_token_credentials('fakeuser',
opt_nonadmin_password,
GET_URL)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) > LooseVersion('12.0.0'),
reason='Issue with spaces in the name parameter has been resolved post '
'12.1.x, therefore another test needs running'
)
@pytest.mark.skip_module_missing('gtm')
def test_get_special_name_11_x_12_0(request, ICR, BASE_URL):
"""Get the object with '/' characters in name
Due to a bug name kwarg needs to have space in front of "ldns" and
"server" key words when using GET method. We also need to catch and
ignore 404 response to POST due to a bug with topology creation in 11.5.4
"""
ending = 'gtm/topology/'
topology_url = BASE_URL + ending
load_name = ' ldns: subnet 192.168.110.0/24 server: subnet ' \
'192.168.100.0/24'
teardown_topology(request, ICR, topology_url, load_name)
try:
ICR.post(topology_url, json=topology_data)
except HTTPError as err:
if err.response.status_code == 404:
pass
else:
raise
response = ICR.get(topology_url, uri_as_parts=True, transform_name=True,
name=load_name)
assert response.status_code == 200
data = response.json()
assert data['name'] == load_name
assert data['kind'] == 'tm:gtm:topology:topologystate'
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.1.0'),
reason='Issue with paces in the name parameter has been resolved in '
'12.1.x and up, any lower version will fail this test otherwise'
)
@pytest.mark.skip_module_missing('gtm')
def test_get_special_name_12_1(request, ICR, BASE_URL):
"""Get the object with '/' characters in name
Since the blank space issue was fixed in 12.1.0,
this test had to change.
"""
ending = 'gtm/topology/'
topology_url = BASE_URL + ending
load_name = 'ldns: subnet 192.168.110.0/24 server: subnet ' \
'192.168.100.0/24'
teardown_topology(request, ICR, topology_url, load_name)
try:
ICR.post(topology_url, json=topology_data)
except HTTPError as err:
if err.response.status_code == 404:
pass
else:
raise
response = ICR.get(topology_url, uri_as_parts=True, transform_name=True,
name=load_name)
assert response.status_code == 200
data = response.json()
assert data['name'] == load_name
assert data['kind'] == 'tm:gtm:topology:topologystate'
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.1.0'),
reason='GTM must be provisioned for this test'
)
@pytest.mark.skip_module_missing('gtm')
def test_delete_special_name(request, ICR, BASE_URL):
"""Test a DELETE request to a valid url.
Pass: Return a 200 and the json is empty. Subsequent GET returns a 404
error because the object is no longer found.
"""
ending = 'gtm/topology/'
topology_url = BASE_URL + ending
try:
ICR.post(topology_url, json=topology_data)
except HTTPError as err:
if err.response.status_code == 404:
pass
else:
raise
response = ICR.delete(
topology_url,
name=topology_data['name'],
uri_as_parts=True,
transform_name=True)
assert response.status_code == 200
with pytest.raises(ValueError):
response.json()
with pytest.raises(HTTPError) as err:
ICR.get(
topology_url,
name=topology_data['name'],
uri_as_parts=True,
transform_name=True)
assert err.value.response.status_code == 404
def test_ssl_verify(opt_username, opt_password, GET_URL, opt_ca_bundle):
"""Test connection with a trusted certificate"""
if not opt_ca_bundle:
pytest.skip("No CA bundle configured")
icr = iControlRESTSession(opt_username, opt_password,
token=True, verify=opt_ca_bundle)
icr.get(GET_URL)
def test_ssl_verify_fail(opt_username, opt_password, GET_URL):
"""Test connection with an untrusted certificate"""
dir_path = os.path.dirname(os.path.realpath(__file__))
ca_bundle = '%s/dummy-ca-cert.pem' % dir_path
icr = iControlRESTSession(opt_username, opt_password,
verify=ca_bundle)
with pytest.raises(SSLError) as excinfo:
icr.get(GET_URL)
assert 'certificate verify failed' in str(excinfo.value)
def test_get_token_ssl_verify_fail(opt_username, opt_password, opt_bigip, opt_port):
"""Test token retrival with an untrusted certificate"""
dir_path = os.path.dirname(os.path.realpath(__file__))
ca_bundle = '%s/dummy-ca-cert.pem' % dir_path
icr = iControlRESTTokenAuth(opt_username, opt_password,
verify=ca_bundle)
with pytest.raises(SSLError) as excinfo:
icr.get_new_token('{0}:{1}'.format(opt_bigip, opt_port))
assert 'certificate verify failed' in str(excinfo.value)
def test_using_stashed_tokens(GET_URL, opt_bigip, opt_username, opt_password):
icr1 = iControlRESTSession(opt_username, opt_password, token='tmos')
icr2 = iControlRESTSession(opt_username, opt_password, token='tmos')
# Trigger token creation
icr1.get(GET_URL)
icr2.get(GET_URL)
# Ensure we have two completely different sessions here
assert icr1.token != icr2.token
# Ensure that both of them are valid
response = icr1.get(GET_URL)
assert response.status_code == 200
assert response.json()
response = icr2.get(GET_URL)
assert response.status_code == 200
assert response.json()
# Overwrite one session with another. This is illustrating the behavior
# one might see when loading a cookie from disk.
icr1.token = icr2.token
# Ensure we indeed overwrote the tokens
assert icr1.token == icr2.token
# Recheck to make sure that all web requests still work
response = icr1.get(GET_URL)
assert response.status_code == 200
assert response.json()
response = icr2.get(GET_URL)
assert response.status_code == 200
assert response.json()
# Create new object with no token data
icr3 = iControlRESTSession(opt_username, opt_password, token='tmos')
assert icr3.token is None
# Give token to new session
icr3.token = icr2.token
# Ensure new object can talk
response = icr1.get(GET_URL)
assert response.status_code == 200
assert response.json()
# Ensure new object did not get new token but used existing one
assert icr3.token == icr2.token
# Provide the token via object instantiation
icr4 = iControlRESTSession(
opt_username, opt_password, token='tmos',
token_to_use=icr2.token
)
# Ensure the token was actually given
assert icr4.token == icr2.token
# Ensure the provided token works
response = icr4.get(GET_URL)
assert response.status_code == 200
assert response.json()
def test_using_tmos_token(GET_URL, opt_bigip, opt_username, opt_password):
icr1 = iControlRESTSession(opt_username, opt_password, token='tmos')
response = icr1.get(GET_URL)
assert response.status_code == 200
assert response.json()
def test_using_tmos_auth_provider(GET_URL, opt_bigip, opt_username, opt_password):
icr1 = iControlRESTSession(opt_username, opt_password, auth_provider='tmos')
response = icr1.get(GET_URL)
assert response.status_code == 200
assert response.json()
def test_debug_tracing(request, POST_URL, GET_URL, opt_bigip, opt_username, opt_password):
icr1 = iControlRESTSession(opt_username, opt_password, auth_provider='tmos')
icr1.debug = True
icr1.get(GET_URL)
response = icr1.post(POST_URL, json=nat_data)
response.json()
teardown_nat(request, icr1, POST_URL, nat_data['name'], nat_data['partition'])
assert len(icr1.debug_output) > 0
|
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command-line interface to the OpenStack Images API.
"""
import argparse
import json
import logging
import os
from os.path import expanduser
import re
import sys
from keystoneclient.v2_0 import client as ksclient
import netaddr
import glanceclient
from glanceclient import exc
from glanceclient.common import utils
from glanceclient.openstack.common import strutils
class OpenStackImagesShell(object):
def get_base_parser(self):
parser = argparse.ArgumentParser(
prog='glance',
description=__doc__.strip(),
epilog='See "glance help COMMAND" '
'for help on a specific command.',
add_help=False,
formatter_class=HelpFormatter,
)
# Global arguments
parser.add_argument('-h', '--help',
action='store_true',
help=argparse.SUPPRESS,
)
parser.add_argument('--version',
action='version',
version=glanceclient.__version__)
parser.add_argument('-d', '--debug',
default=bool(utils.env('GLANCECLIENT_DEBUG')),
action='store_true',
help='Defaults to env[GLANCECLIENT_DEBUG]')
parser.add_argument('-v', '--verbose',
default=False, action="store_true",
help="Print more verbose output")
parser.add_argument('--get-schema',
default=False, action="store_true",
dest='get_schema',
help='Force retrieving the schema used to generate'
' portions of the help text rather than using'
' a cached copy. Ignored with api version 1')
parser.add_argument('-k', '--insecure',
default=False,
action='store_true',
help='Explicitly allow glanceclient to perform '
'\"insecure SSL\" (https) requests. The server\'s '
'certificate will not be verified against any '
'certificate authorities. This option should '
'be used with caution.')
parser.add_argument('--cert-file',
help='Path of certificate file to use in SSL '
'connection. This file can optionally be '
'prepended with the private key.')
parser.add_argument('--key-file',
help='Path of client key to use in SSL '
'connection. This option is not necessary '
'if your key is prepended to your cert file.')
parser.add_argument('--os-cacert',
metavar='<ca-certificate-file>',
dest='os_cacert',
default=utils.env('OS_CACERT'),
help='Path of CA TLS certificate(s) used to '
'verify the remote server\'s certificate. '
'Without this option glance looks for the '
'default system CA certificates.')
parser.add_argument('--ca-file',
dest='os_cacert',
help='DEPRECATED! Use --os-cacert.')
parser.add_argument('--timeout',
default=600,
help='Number of seconds to wait for a response')
parser.add_argument('--no-ssl-compression',
dest='ssl_compression',
default=True, action='store_false',
help='Disable SSL compression when using https.')
parser.add_argument('-f', '--force',
dest='force',
default=False, action='store_true',
help='Prevent select actions from requesting '
'user confirmation.')
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('--dry-run',
default=False,
action='store_true',
help='DEPRECATED! Only used for deprecated '
'legacy commands.')
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('--ssl',
dest='use_ssl',
default=False,
action='store_true',
help='DEPRECATED! Send a fully-formed endpoint '
'using --os-image-url instead.')
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('-H', '--host',
metavar='ADDRESS',
help='DEPRECATED! Send a fully-formed endpoint '
'using --os-image-url instead.')
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('-p', '--port',
dest='port',
metavar='PORT',
type=int,
default=9292,
help='DEPRECATED! Send a fully-formed endpoint '
'using --os-image-url instead.')
parser.add_argument('--os-username',
default=utils.env('OS_USERNAME'),
help='Defaults to env[OS_USERNAME]')
parser.add_argument('--os_username',
help=argparse.SUPPRESS)
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('-I',
dest='os_username',
help='DEPRECATED! Use --os-username.')
parser.add_argument('--os-password',
default=utils.env('OS_PASSWORD'),
help='Defaults to env[OS_PASSWORD]')
parser.add_argument('--os_password',
help=argparse.SUPPRESS)
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('-K',
dest='os_password',
help='DEPRECATED! Use --os-password.')
parser.add_argument('--os-tenant-id',
default=utils.env('OS_TENANT_ID'),
help='Defaults to env[OS_TENANT_ID]')
parser.add_argument('--os_tenant_id',
help=argparse.SUPPRESS)
parser.add_argument('--os-tenant-name',
default=utils.env('OS_TENANT_NAME'),
help='Defaults to env[OS_TENANT_NAME]')
parser.add_argument('--os_tenant_name',
help=argparse.SUPPRESS)
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('-T',
dest='os_tenant_name',
help='DEPRECATED! Use --os-tenant-name.')
parser.add_argument('--os-auth-url',
default=utils.env('OS_AUTH_URL'),
help='Defaults to env[OS_AUTH_URL]')
parser.add_argument('--os_auth_url',
help=argparse.SUPPRESS)
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('-N',
dest='os_auth_url',
help='DEPRECATED! Use --os-auth-url.')
parser.add_argument('--os-region-name',
default=utils.env('OS_REGION_NAME'),
help='Defaults to env[OS_REGION_NAME]')
parser.add_argument('--os_region_name',
help=argparse.SUPPRESS)
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('-R',
dest='os_region_name',
help='DEPRECATED! Use --os-region-name.')
parser.add_argument('--os-auth-token',
default=utils.env('OS_AUTH_TOKEN'),
help='Defaults to env[OS_AUTH_TOKEN]')
parser.add_argument('--os_auth_token',
help=argparse.SUPPRESS)
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('-A', '--auth_token',
dest='os_auth_token',
help='DEPRECATED! Use --os-auth-token.')
parser.add_argument('--os-image-url',
default=utils.env('OS_IMAGE_URL'),
help='Defaults to env[OS_IMAGE_URL]')
parser.add_argument('--os_image_url',
help=argparse.SUPPRESS)
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('-U', '--url',
dest='os_image_url',
help='DEPRECATED! Use --os-image-url.')
parser.add_argument('--os-image-api-version',
default=utils.env(
'OS_IMAGE_API_VERSION', default='1'),
help='Defaults to env[OS_IMAGE_API_VERSION] or 1')
parser.add_argument('--os_image_api_version',
help=argparse.SUPPRESS)
parser.add_argument('--os-service-type',
default=utils.env('OS_SERVICE_TYPE'),
help='Defaults to env[OS_SERVICE_TYPE]')
parser.add_argument('--os_service_type',
help=argparse.SUPPRESS)
parser.add_argument('--os-endpoint-type',
default=utils.env('OS_ENDPOINT_TYPE'),
help='Defaults to env[OS_ENDPOINT_TYPE]')
parser.add_argument('--os_endpoint_type',
help=argparse.SUPPRESS)
#NOTE(bcwaldon): DEPRECATED
parser.add_argument('-S', '--os_auth_strategy',
help='DEPRECATED! This option is '
'completely ignored.')
return parser
def get_subcommand_parser(self, version):
parser = self.get_base_parser()
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
submodule = utils.import_versioned_module(version, 'shell')
self._find_actions(subparsers, submodule)
self._find_actions(subparsers, self)
return parser
def _find_actions(self, subparsers, actions_module):
for attr in (a for a in dir(actions_module) if a.startswith('do_')):
# I prefer to be hypen-separated instead of underscores.
command = attr[3:].replace('_', '-')
callback = getattr(actions_module, attr)
desc = callback.__doc__ or ''
help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser(command,
help=help,
description=desc,
add_help=False,
formatter_class=HelpFormatter
)
subparser.add_argument('-h', '--help',
action='help',
help=argparse.SUPPRESS,
)
self.subcommands[command] = subparser
for (args, kwargs) in arguments:
subparser.add_argument(*args, **kwargs)
subparser.set_defaults(func=callback)
# TODO(dtroyer): move this into the common client support?
# Compatibility check to remove API version as the trailing component
# in a service endpoint; also removes a trailing '/'
def _strip_version(self, endpoint):
"""Strip version from the last component of endpoint if present."""
# Get rid of trailing '/' if present
if endpoint.endswith('/'):
endpoint = endpoint[:-1]
url_bits = endpoint.split('/')
# regex to match 'v1' or 'v2.0' etc
if re.match('v\d+\.?\d*', url_bits[-1]):
endpoint = '/'.join(url_bits[:-1])
return endpoint
def _get_ksclient(self, **kwargs):
"""Get an endpoint and auth token from Keystone.
:param username: name of user
:param password: user's password
:param tenant_id: unique identifier of tenant
:param tenant_name: name of tenant
:param auth_url: endpoint to authenticate against
"""
return ksclient.Client(username=kwargs.get('username'),
password=kwargs.get('password'),
tenant_id=kwargs.get('tenant_id'),
tenant_name=kwargs.get('tenant_name'),
auth_url=kwargs.get('auth_url'),
cacert=kwargs.get('cacert'),
insecure=kwargs.get('insecure'))
def _get_endpoint(self, client, **kwargs):
"""Get an endpoint using the provided keystone client."""
endpoint_kwargs = {
'service_type': kwargs.get('service_type') or 'image',
'endpoint_type': kwargs.get('endpoint_type') or 'publicURL',
}
if kwargs.get('region_name'):
endpoint_kwargs['attr'] = 'region'
endpoint_kwargs['filter_value'] = kwargs.get('region_name')
endpoint = client.service_catalog.url_for(**endpoint_kwargs)
return self._strip_version(endpoint)
def _get_image_url(self, args):
"""Translate the available url-related options into a single string.
Return the endpoint that should be used to talk to Glance if a
clear decision can be made. Otherwise, return None.
"""
if args.os_image_url:
return args.os_image_url
elif args.host:
# Check if it is legal ipv6 address, if so, need wrap it with '[]'
if netaddr.valid_ipv6(args.host):
args.host = '[%s]' % args.host
scheme = 'https' if args.use_ssl else 'http'
return '%s://%s:%s/' % (scheme, args.host, args.port)
else:
return None
def _get_endpoint_and_token(self, args, force_auth=False):
image_url = self._get_image_url(args)
auth_token = args.os_auth_token
auth_reqd = force_auth or (utils.is_authentication_required(args.func)
and not (auth_token and image_url))
if not auth_reqd:
endpoint = image_url
token = args.os_auth_token
else:
if not args.os_username:
raise exc.CommandError("You must provide a username via"
" either --os-username or "
"env[OS_USERNAME]")
if not args.os_password:
raise exc.CommandError("You must provide a password via"
" either --os-password or "
"env[OS_PASSWORD]")
if not (args.os_tenant_id or args.os_tenant_name):
raise exc.CommandError("You must provide a tenant_id via"
" either --os-tenant-id or "
"via env[OS_TENANT_ID]")
if not args.os_auth_url:
raise exc.CommandError("You must provide an auth url via"
" either --os-auth-url or "
"via env[OS_AUTH_URL]")
kwargs = {
'username': args.os_username,
'password': args.os_password,
'tenant_id': args.os_tenant_id,
'tenant_name': args.os_tenant_name,
'auth_url': args.os_auth_url,
'service_type': args.os_service_type,
'endpoint_type': args.os_endpoint_type,
'cacert': args.os_cacert,
'insecure': args.insecure,
'region_name': args.os_region_name,
}
_ksclient = self._get_ksclient(**kwargs)
token = args.os_auth_token or _ksclient.auth_token
endpoint = args.os_image_url or self._get_endpoint(_ksclient,
**kwargs)
return endpoint, token
def _get_versioned_client(self, api_version, args, force_auth=False):
endpoint, token = self._get_endpoint_and_token(args,
force_auth=force_auth)
kwargs = {
'token': token,
'insecure': args.insecure,
'timeout': args.timeout,
'cacert': args.os_cacert,
'cert_file': args.cert_file,
'key_file': args.key_file,
'ssl_compression': args.ssl_compression
}
client = glanceclient.Client(api_version, endpoint, **kwargs)
return client
def _cache_schema(self, options, home_dir='~/.glanceclient'):
homedir = expanduser(home_dir)
if not os.path.exists(homedir):
os.makedirs(homedir)
schema_file_path = homedir + os.sep + "image_schema.json"
if (not os.path.exists(schema_file_path)) or options.get_schema:
try:
client = self._get_versioned_client('2', options,
force_auth=True)
schema = client.schemas.get("image")
with file(schema_file_path, 'w') as f:
f.write(json.dumps(schema.raw()))
except Exception as e:
#NOTE(esheffield) do nothing here, we'll get a message later
#if the schema is missing
pass
def main(self, argv):
# Parse args once to find version
parser = self.get_base_parser()
(options, args) = parser.parse_known_args(argv)
# build available subcommands based on version
api_version = options.os_image_api_version
if api_version == '2':
self._cache_schema(options)
subcommand_parser = self.get_subcommand_parser(api_version)
self.parser = subcommand_parser
# Handle top-level --help/-h before attempting to parse
# a command off the command line
if options.help or not argv:
self.do_help(options)
return 0
# Parse args again and call whatever callback was selected
args = subcommand_parser.parse_args(argv)
# Short-circuit and deal with help command right away.
if args.func == self.do_help:
self.do_help(args)
return 0
LOG = logging.getLogger('glanceclient')
LOG.addHandler(logging.StreamHandler())
LOG.setLevel(logging.DEBUG if args.debug else logging.INFO)
client = self._get_versioned_client(api_version, args,
force_auth=False)
try:
args.func(client, args)
except exc.Unauthorized:
raise exc.CommandError("Invalid OpenStack Identity credentials.")
@utils.arg('command', metavar='<subcommand>', nargs='?',
help='Display help for <subcommand>')
def do_help(self, args):
"""
Display help about this program or one of its subcommands.
"""
if getattr(args, 'command', None):
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise exc.CommandError("'%s' is not a valid subcommand" %
args.command)
else:
self.parser.print_help()
class HelpFormatter(argparse.HelpFormatter):
def start_section(self, heading):
# Title-case the headings
heading = '%s%s' % (heading[0].upper(), heading[1:])
super(HelpFormatter, self).start_section(heading)
def main():
try:
OpenStackImagesShell().main(map(strutils.safe_decode, sys.argv[1:]))
except KeyboardInterrupt:
print >> sys.stderr, '... terminating glance client'
sys.exit(1)
except Exception as e:
print >> sys.stderr, utils.exception_to_str(e)
sys.exit(1)
|
|
"""
kombu.transport.SQS
===================
Amazon SQS transport module for Kombu. This package implements an AMQP-like
interface on top of Amazons SQS service, with the goal of being optimized for
high performance and reliability.
The default settings for this module are focused now on high performance in
task queue situations where tasks are small, idempotent and run very fast.
SQS Features supported by this transport:
Long Polling:
http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/
sqs-long-polling.html
Long polling is enabled by setting the `wait_time_seconds` transport
option to a number > 1. Amazon supports up to 20 seconds. This is
disabled for now, but will be enabled by default in the near future.
Batch API Actions:
http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/
sqs-batch-api.html
The default behavior of the SQS Channel.drain_events() method is to
request up to the 'prefetch_count' messages on every request to SQS.
These messages are stored locally in a deque object and passed back
to the Transport until the deque is empty, before triggering a new
API call to Amazon.
This behavior dramatically speeds up the rate that you can pull tasks
from SQS when you have short-running tasks (or a large number of workers).
When a Celery worker has multiple queues to monitor, it will pull down
up to 'prefetch_count' messages from queueA and work on them all before
moving on to queueB. If queueB is empty, it will wait up until
'polling_interval' expires before moving back and checking on queueA.
"""
from __future__ import absolute_import
import collections
import socket
import string
from anyjson import loads, dumps
import boto
from boto import exception
from boto import sdb as _sdb
from boto import sqs as _sqs
from boto.sdb.domain import Domain
from boto.sdb.connection import SDBConnection
from boto.sqs.connection import SQSConnection
from boto.sqs.message import Message
from kombu.five import Empty, range, text_t
from kombu.log import get_logger
from kombu.utils import cached_property, uuid
from kombu.utils.encoding import bytes_to_str, safe_str
from kombu.transport.virtual import scheduling
from . import virtual
logger = get_logger(__name__)
# dots are replaced by dash, all other punctuation
# replaced by underscore.
CHARS_REPLACE_TABLE = dict((ord(c), 0x5f)
for c in string.punctuation if c not in '-_.')
CHARS_REPLACE_TABLE[0x2e] = 0x2d # '.' -> '-'
def maybe_int(x):
try:
return int(x)
except ValueError:
return x
BOTO_VERSION = tuple(maybe_int(part) for part in boto.__version__.split('.'))
W_LONG_POLLING = BOTO_VERSION >= (2, 8)
#: SQS bulk get supports a maximum of 10 messages at a time.
SQS_MAX_MESSAGES = 10
class Table(Domain):
"""Amazon SimpleDB domain describing the message routing table."""
# caches queues already bound, so we don't have to declare them again.
_already_bound = set()
def routes_for(self, exchange):
"""Iterator giving all routes for an exchange."""
return self.select("""WHERE exchange = '%s'""" % exchange)
def get_queue(self, queue):
"""Get binding for queue."""
qid = self._get_queue_id(queue)
if qid:
return self.get_item(qid)
def create_binding(self, queue):
"""Get binding item for queue.
Creates the item if it doesn't exist.
"""
item = self.get_queue(queue)
if item:
return item, item['id']
id = uuid()
return self.new_item(id), id
def queue_bind(self, exchange, routing_key, pattern, queue):
if queue not in self._already_bound:
binding, id = self.create_binding(queue)
binding.update(exchange=exchange,
routing_key=routing_key or '',
pattern=pattern or '',
queue=queue or '',
id=id)
binding.save()
self._already_bound.add(queue)
def queue_delete(self, queue):
"""delete queue by name."""
self._already_bound.discard(queue)
item = self._get_queue_item(queue)
if item:
self.delete_item(item)
def exchange_delete(self, exchange):
"""Delete all routes for `exchange`."""
for item in self.routes_for(exchange):
self.delete_item(item['id'])
def get_item(self, item_name):
"""Uses `consistent_read` by default."""
# Domain is an old-style class, can't use super().
for consistent_read in (False, True):
item = Domain.get_item(self, item_name, consistent_read)
if item:
return item
def select(self, query='', next_token=None,
consistent_read=True, max_items=None):
"""Uses `consistent_read` by default."""
query = """SELECT * FROM `%s` %s""" % (self.name, query)
return Domain.select(self, query, next_token,
consistent_read, max_items)
def _try_first(self, query='', **kwargs):
for c in (False, True):
for item in self.select(query, consistent_read=c, **kwargs):
return item
def get_exchanges(self):
return list(set(i['exchange'] for i in self.select()))
def _get_queue_item(self, queue):
return self._try_first("""WHERE queue = '%s' limit 1""" % queue)
def _get_queue_id(self, queue):
item = self._get_queue_item(queue)
if item:
return item['id']
class Channel(virtual.Channel):
Table = Table
default_region = 'us-east-1'
default_visibility_timeout = 1800 # 30 minutes.
default_wait_time_seconds = 0 # disabled see #198
domain_format = 'kombu%(vhost)s'
_sdb = None
_sqs = None
_queue_cache = {}
_noack_queues = set()
def __init__(self, *args, **kwargs):
super(Channel, self).__init__(*args, **kwargs)
# SQS blows up when you try to create a new queue if one already
# exists with a different visibility_timeout, so this prepopulates
# the queue_cache to protect us from recreating
# queues that are known to already exist.
queues = self.sqs.get_all_queues(prefix=self.queue_name_prefix)
for queue in queues:
self._queue_cache[queue.name] = queue
self._fanout_queues = set()
# The drain_events() method stores extra messages in a local
# Deque object. This allows multiple messages to be requested from
# SQS at once for performance, but maintains the same external API
# to the caller of the drain_events() method.
self._queue_message_cache = collections.deque()
def basic_consume(self, queue, no_ack, *args, **kwargs):
if no_ack:
self._noack_queues.add(queue)
return super(Channel, self).basic_consume(
queue, no_ack, *args, **kwargs
)
def basic_cancel(self, consumer_tag):
if consumer_tag in self._consumers:
queue = self._tag_to_queue[consumer_tag]
self._noack_queues.discard(queue)
return super(Channel, self).basic_cancel(consumer_tag)
def drain_events(self, timeout=None):
"""Return a single payload message from one of our queues.
:raises Empty: if no messages available.
"""
# If we're not allowed to consume or have no consumers, raise Empty
if not self._consumers or not self.qos.can_consume():
raise Empty()
message_cache = self._queue_message_cache
# Check if there are any items in our buffer. If there are any, pop
# off that queue first.
try:
return message_cache.popleft()
except IndexError:
pass
# At this point, go and get more messages from SQS
res, queue = self._poll(self.cycle, timeout=timeout)
message_cache.extend((r, queue) for r in res)
# Now try to pop off the queue again.
try:
return message_cache.popleft()
except IndexError:
raise Empty()
def _reset_cycle(self):
"""Reset the consume cycle.
:returns: a FairCycle object that points to our _get_bulk() method
rather than the standard _get() method. This allows for multiple
messages to be returned at once from SQS (based on the prefetch
limit).
"""
self._cycle = scheduling.FairCycle(
self._get_bulk, self._active_queues, Empty,
)
def entity_name(self, name, table=CHARS_REPLACE_TABLE):
"""Format AMQP queue name into a legal SQS queue name."""
return text_t(safe_str(name)).translate(table)
def _new_queue(self, queue, **kwargs):
"""Ensure a queue with given name exists in SQS."""
# Translate to SQS name for consistency with initial
# _queue_cache population.
queue = self.entity_name(self.queue_name_prefix + queue)
try:
return self._queue_cache[queue]
except KeyError:
q = self._queue_cache[queue] = self.sqs.create_queue(
queue, self.visibility_timeout,
)
return q
def queue_bind(self, queue, exchange=None, routing_key='',
arguments=None, **kwargs):
super(Channel, self).queue_bind(queue, exchange, routing_key,
arguments, **kwargs)
if self.typeof(exchange).type == 'fanout':
self._fanout_queues.add(queue)
def _queue_bind(self, *args):
"""Bind ``queue`` to ``exchange`` with routing key.
Route will be stored in SDB if so enabled.
"""
if self.supports_fanout:
self.table.queue_bind(*args)
def get_table(self, exchange):
"""Get routing table.
Retrieved from SDB if :attr:`supports_fanout`.
"""
if self.supports_fanout:
return [(r['routing_key'], r['pattern'], r['queue'])
for r in self.table.routes_for(exchange)]
return super(Channel, self).get_table(exchange)
def get_exchanges(self):
if self.supports_fanout:
return self.table.get_exchanges()
return super(Channel, self).get_exchanges()
def _delete(self, queue, *args):
"""delete queue by name."""
if self.supports_fanout:
self.table.queue_delete(queue)
super(Channel, self)._delete(queue)
self._queue_cache.pop(queue, None)
def exchange_delete(self, exchange, **kwargs):
"""Delete exchange by name."""
if self.supports_fanout:
self.table.exchange_delete(exchange)
super(Channel, self).exchange_delete(exchange, **kwargs)
def _has_queue(self, queue, **kwargs):
"""Return True if ``queue`` was previously declared."""
if self.supports_fanout:
return bool(self.table.get_queue(queue))
return super(Channel, self)._has_queue(queue)
def _put(self, queue, message, **kwargs):
"""Put message onto queue."""
q = self._new_queue(queue)
m = Message()
m.set_body(dumps(message))
q.write(m)
def _put_fanout(self, exchange, message, **kwargs):
"""Deliver fanout message to all queues in ``exchange``."""
for route in self.table.routes_for(exchange):
self._put(route['queue'], message, **kwargs)
def _get_from_sqs(self, queue, count=1):
"""Retrieve messages from SQS and returns the raw SQS message objects.
:returns: List of SQS message objects
"""
q = self._new_queue(queue)
if W_LONG_POLLING and queue not in self._fanout_queues:
return q.get_messages(
count, wait_time_seconds=self.wait_time_seconds,
)
else: # boto < 2.8
return q.get_messages(count)
def _message_to_python(self, message, queue_name, queue):
payload = loads(bytes_to_str(message.get_body()))
if queue_name in self._noack_queues:
queue.delete_message(message)
else:
payload['properties']['delivery_info'].update({
'sqs_message': message, 'sqs_queue': queue,
})
return payload
def _messages_to_python(self, messages, queue):
"""Convert a list of SQS Message objects into Payloads.
This method handles converting SQS Message objects into
Payloads, and appropriately updating the queue depending on
the 'ack' settings for that queue.
:param messages: A list of SQS Message objects.
:param queue: String name representing the queue they came from
:returns: A list of Payload objects
"""
q = self._new_queue(queue)
return [self._message_to_python(m, queue, q) for m in messages]
def _get_bulk(self, queue, max_if_unlimited=SQS_MAX_MESSAGES):
"""Try to retrieve multiple messages off ``queue``.
Where _get() returns a single Payload object, this method returns a
list of Payload objects. The number of objects returned is determined
by the total number of messages available in the queue and the
number of messages that the QoS object allows (based on the
prefetch_count).
.. note::
Ignores QoS limits so caller is responsible for checking
that we are allowed to consume at least one message from the
queue. get_bulk will then ask QoS for an estimate of
the number of extra messages that we can consume.
args:
queue: The queue name (string) to pull from
returns:
payloads: A list of payload objects returned
"""
# drain_events calls `can_consume` first, consuming
# a token, so we know that we are allowed to consume at least
# one message.
maxcount = self.qos.can_consume_max_estimate()
maxcount = max_if_unlimited if maxcount is None else max(maxcount, 1)
messages = self._get_from_sqs(
queue, count=min(maxcount, SQS_MAX_MESSAGES),
)
if messages:
return self._messages_to_python(messages, queue)
raise Empty()
def _get(self, queue):
"""Try to retrieve a single message off ``queue``."""
messages = self._get_from_sqs(queue, count=1)
if messages:
return self._messages_to_python(messages, queue)[0]
raise Empty()
def _restore(self, message,
unwanted_delivery_info=('sqs_message', 'sqs_queue')):
for unwanted_key in unwanted_delivery_info:
# Remove objects that aren't JSON serializable (Issue #1108).
message.delivery_info.pop(unwanted_key, None)
return super(Channel, self)._restore(message)
def basic_ack(self, delivery_tag):
delivery_info = self.qos.get(delivery_tag).delivery_info
try:
queue = delivery_info['sqs_queue']
except KeyError:
pass
else:
queue.delete_message(delivery_info['sqs_message'])
super(Channel, self).basic_ack(delivery_tag)
def _size(self, queue):
"""Return the number of messages in a queue."""
return self._new_queue(queue).count()
def _purge(self, queue):
"""Delete all current messages in a queue."""
q = self._new_queue(queue)
# SQS is slow at registering messages, so run for a few
# iterations to ensure messages are deleted.
size = 0
for i in range(10):
size += q.count()
if not size:
break
q.clear()
return size
def close(self):
super(Channel, self).close()
for conn in (self._sqs, self._sdb):
if conn:
try:
conn.close()
except AttributeError as exc: # FIXME ???
if "can't set attribute" not in str(exc):
raise
def _get_regioninfo(self, regions):
if self.region:
for _r in regions:
if _r.name == self.region:
return _r
def _aws_connect_to(self, fun, regions):
conninfo = self.conninfo
region = self._get_regioninfo(regions)
return fun(region=region,
aws_access_key_id=conninfo.userid,
aws_secret_access_key=conninfo.password,
port=conninfo.port)
def _next_delivery_tag(self):
return uuid() # See #73
@property
def sqs(self):
if self._sqs is None:
self._sqs = self._aws_connect_to(SQSConnection, _sqs.regions())
return self._sqs
@property
def sdb(self):
if self._sdb is None:
self._sdb = self._aws_connect_to(SDBConnection, _sdb.regions())
return self._sdb
@property
def table(self):
name = self.entity_name(
self.domain_format % {'vhost': self.conninfo.virtual_host})
d = self.sdb.get_object(
'CreateDomain', {'DomainName': name}, self.Table)
d.name = name
return d
@property
def conninfo(self):
return self.connection.client
@property
def transport_options(self):
return self.connection.client.transport_options
@cached_property
def visibility_timeout(self):
return (self.transport_options.get('visibility_timeout') or
self.default_visibility_timeout)
@cached_property
def queue_name_prefix(self):
return self.transport_options.get('queue_name_prefix', '')
@cached_property
def supports_fanout(self):
return self.transport_options.get('sdb_persistence', False)
@cached_property
def region(self):
return self.transport_options.get('region') or self.default_region
@cached_property
def wait_time_seconds(self):
return self.transport_options.get('wait_time_seconds',
self.default_wait_time_seconds)
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
wait_time_seconds = 0
default_port = None
connection_errors = (
virtual.Transport.connection_errors +
(exception.SQSError, socket.error)
)
channel_errors = (
virtual.Transport.channel_errors + (exception.SQSDecodeError, )
)
driver_type = 'sqs'
driver_name = 'sqs'
|
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: anote.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='anote.proto',
package='anote.proto',
serialized_pb='\n\x0b\x61note.proto\x12\x0b\x61note.proto\"\xfc\x01\n\x04Task\x12\x0f\n\x07task_id\x18\x01 \x01(\x05\x12\r\n\x05title\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\x0b\n\x03tag\x18\x04 \x03(\t\x12\x0f\n\x07project\x18\x05 \x01(\t\x12\x11\n\tparent_id\x18\x06 \x01(\x05\x12\x10\n\x08\x61ncestor\x18\x07 \x03(\x05\x12\x13\n\x0b\x64\x65scription\x18\x08 \x01(\t\x12\x0f\n\x07note_id\x18\t \x03(\x05\x12\x1f\n\x04note\x18\n \x03(\x0b\x32\x11.anote.proto.Note\x12\x13\n\x0b\x63reate_time\x18\x0b \x01(\x05\x12\x13\n\x0bupdate_time\x18\x0c \x01(\x05\x12\x10\n\x08position\x18\r \x01(\x05\"6\n\x04Note\x12\x0f\n\x07task_id\x18\x01 \x01(\x05\x12\x0f\n\x07note_id\x18\x02 \x01(\x05\x12\x0c\n\x04text\x18\x03 \x01(\t\"6\n\x03Tag\x12\r\n\x05title\x18\x01 \x01(\t\x12\x12\n\noccurrence\x18\x02 \x01(\x05\x12\x0c\n\x04hide\x18\x03 \x01(\x08')
_TASK = _descriptor.Descriptor(
name='Task',
full_name='anote.proto.Task',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='anote.proto.Task.task_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='title', full_name='anote.proto.Task.title', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='anote.proto.Task.status', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag', full_name='anote.proto.Task.tag', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='project', full_name='anote.proto.Task.project', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parent_id', full_name='anote.proto.Task.parent_id', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ancestor', full_name='anote.proto.Task.ancestor', index=6,
number=7, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='anote.proto.Task.description', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='note_id', full_name='anote.proto.Task.note_id', index=8,
number=9, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='note', full_name='anote.proto.Task.note', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='create_time', full_name='anote.proto.Task.create_time', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='update_time', full_name='anote.proto.Task.update_time', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='anote.proto.Task.position', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=29,
serialized_end=281,
)
_NOTE = _descriptor.Descriptor(
name='Note',
full_name='anote.proto.Note',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='anote.proto.Note.task_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='note_id', full_name='anote.proto.Note.note_id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='anote.proto.Note.text', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=283,
serialized_end=337,
)
_TAG = _descriptor.Descriptor(
name='Tag',
full_name='anote.proto.Tag',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='anote.proto.Tag.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='occurrence', full_name='anote.proto.Tag.occurrence', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hide', full_name='anote.proto.Tag.hide', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=339,
serialized_end=393,
)
_TASK.fields_by_name['note'].message_type = _NOTE
DESCRIPTOR.message_types_by_name['Task'] = _TASK
DESCRIPTOR.message_types_by_name['Note'] = _NOTE
DESCRIPTOR.message_types_by_name['Tag'] = _TAG
class Task(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TASK
# @@protoc_insertion_point(class_scope:anote.proto.Task)
class Note(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _NOTE
# @@protoc_insertion_point(class_scope:anote.proto.Note)
class Tag(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TAG
# @@protoc_insertion_point(class_scope:anote.proto.Tag)
# @@protoc_insertion_point(module_scope)
|
|
"""This component provides select entities for UniFi Protect."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
import logging
from typing import Any, Final
from pyunifiprotect.data import (
Camera,
DoorbellMessageType,
IRLEDMode,
Light,
LightModeEnableType,
LightModeType,
Liveview,
RecordingMode,
Viewer,
)
from pyunifiprotect.data.devices import LCDMessage
from homeassistant.components.select import SelectEntity, SelectEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity import EntityCategory
from homeassistant.util.dt import utcnow
from .const import (
DOMAIN,
SERVICE_SET_DOORBELL_MESSAGE,
SET_DOORBELL_LCD_MESSAGE_SCHEMA,
TYPE_EMPTY_VALUE,
)
from .data import ProtectData
from .entity import ProtectDeviceEntity, async_all_device_entities
from .models import ProtectRequiredKeysMixin
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
_KEY_IR = "infrared"
_KEY_REC_MODE = "recording_mode"
_KEY_VIEWER = "viewer"
_KEY_LIGHT_MOTION = "light_motion"
_KEY_DOORBELL_TEXT = "doorbell_text"
_KEY_PAIRED_CAMERA = "paired_camera"
INFRARED_MODES = [
{"id": IRLEDMode.AUTO.value, "name": "Auto"},
{"id": IRLEDMode.ON.value, "name": "Always Enable"},
{"id": IRLEDMode.AUTO_NO_LED.value, "name": "Auto (Filter Only, no LED's)"},
{"id": IRLEDMode.OFF.value, "name": "Always Disable"},
]
LIGHT_MODE_MOTION = "On Motion - Always"
LIGHT_MODE_MOTION_DARK = "On Motion - When Dark"
LIGHT_MODE_DARK = "When Dark"
LIGHT_MODE_OFF = "Manual"
LIGHT_MODES = [LIGHT_MODE_MOTION, LIGHT_MODE_DARK, LIGHT_MODE_OFF]
LIGHT_MODE_TO_SETTINGS = {
LIGHT_MODE_MOTION: (LightModeType.MOTION.value, LightModeEnableType.ALWAYS.value),
LIGHT_MODE_MOTION_DARK: (
LightModeType.MOTION.value,
LightModeEnableType.DARK.value,
),
LIGHT_MODE_DARK: (LightModeType.WHEN_DARK.value, LightModeEnableType.DARK.value),
LIGHT_MODE_OFF: (LightModeType.MANUAL.value, None),
}
MOTION_MODE_TO_LIGHT_MODE = [
{"id": LightModeType.MOTION.value, "name": LIGHT_MODE_MOTION},
{"id": f"{LightModeType.MOTION.value}Dark", "name": LIGHT_MODE_MOTION_DARK},
{"id": LightModeType.WHEN_DARK.value, "name": LIGHT_MODE_DARK},
{"id": LightModeType.MANUAL.value, "name": LIGHT_MODE_OFF},
]
DEVICE_RECORDING_MODES = [
{"id": mode.value, "name": mode.value.title()} for mode in list(RecordingMode)
]
DEVICE_CLASS_LCD_MESSAGE: Final = "unifiprotect__lcd_message"
@dataclass
class ProtectSelectEntityDescription(ProtectRequiredKeysMixin, SelectEntityDescription):
"""Describes UniFi Protect Select entity."""
ufp_options: list[dict[str, Any]] | None = None
ufp_enum_type: type[Enum] | None = None
ufp_set_function: str | None = None
CAMERA_SELECTS: tuple[ProtectSelectEntityDescription, ...] = (
ProtectSelectEntityDescription(
key=_KEY_REC_MODE,
name="Recording Mode",
icon="mdi:video-outline",
entity_category=EntityCategory.CONFIG,
ufp_options=DEVICE_RECORDING_MODES,
ufp_enum_type=RecordingMode,
ufp_value="recording_settings.mode",
ufp_set_function="set_recording_mode",
),
ProtectSelectEntityDescription(
key=_KEY_IR,
name="Infrared Mode",
icon="mdi:circle-opacity",
entity_category=EntityCategory.CONFIG,
ufp_required_field="feature_flags.has_led_ir",
ufp_options=INFRARED_MODES,
ufp_enum_type=IRLEDMode,
ufp_value="isp_settings.ir_led_mode",
ufp_set_function="set_ir_led_model",
),
ProtectSelectEntityDescription(
key=_KEY_DOORBELL_TEXT,
name="Doorbell Text",
icon="mdi:card-text",
entity_category=EntityCategory.CONFIG,
device_class=DEVICE_CLASS_LCD_MESSAGE,
ufp_required_field="feature_flags.has_lcd_screen",
ufp_value="lcd_message",
),
)
LIGHT_SELECTS: tuple[ProtectSelectEntityDescription, ...] = (
ProtectSelectEntityDescription(
key=_KEY_LIGHT_MOTION,
name="Light Mode",
icon="mdi:spotlight",
entity_category=EntityCategory.CONFIG,
ufp_options=MOTION_MODE_TO_LIGHT_MODE,
ufp_value="light_mode_settings.mode",
),
ProtectSelectEntityDescription(
key=_KEY_PAIRED_CAMERA,
name="Paired Camera",
icon="mdi:cctv",
entity_category=EntityCategory.CONFIG,
ufp_value="camera_id",
),
)
VIEWER_SELECTS: tuple[ProtectSelectEntityDescription, ...] = (
ProtectSelectEntityDescription(
key=_KEY_VIEWER,
name="Liveview",
icon="mdi:view-dashboard",
entity_category=None,
ufp_value="liveview",
ufp_set_function="set_liveview",
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: entity_platform.AddEntitiesCallback,
) -> None:
"""Set up number entities for UniFi Protect integration."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
entities: list[ProtectDeviceEntity] = async_all_device_entities(
data,
ProtectSelects,
camera_descs=CAMERA_SELECTS,
light_descs=LIGHT_SELECTS,
viewer_descs=VIEWER_SELECTS,
)
async_add_entities(entities)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_DOORBELL_MESSAGE,
SET_DOORBELL_LCD_MESSAGE_SCHEMA,
"async_set_doorbell_message",
)
class ProtectSelects(ProtectDeviceEntity, SelectEntity):
"""A UniFi Protect Select Entity."""
def __init__(
self,
data: ProtectData,
device: Camera | Light | Viewer,
description: ProtectSelectEntityDescription,
) -> None:
"""Initialize the unifi protect select entity."""
assert description.ufp_value is not None
self.device: Camera | Light | Viewer = device
self.entity_description: ProtectSelectEntityDescription = description
super().__init__(data)
self._attr_name = f"{self.device.name} {self.entity_description.name}"
options = description.ufp_options
if options is not None:
self._attr_options = [item["name"] for item in options]
self._hass_to_unifi_options: dict[str, Any] = {
item["name"]: item["id"] for item in options
}
self._unifi_to_hass_options: dict[Any, str] = {
item["id"]: item["name"] for item in options
}
self._async_set_dynamic_options()
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
# entities with categories are not exposed for voice and safe to update dynamically
if self.entity_description.entity_category is not None:
_LOGGER.debug(
"Updating dynamic select options for %s", self.entity_description.name
)
self._async_set_dynamic_options()
@callback
def _async_set_dynamic_options(self) -> None:
"""Options that do not actually update dynamically.
This is due to possible downstream platforms dependencies on these options.
"""
if self.entity_description.ufp_options is not None:
return
if self.entity_description.key == _KEY_VIEWER:
options = [
{"id": item.id, "name": item.name}
for item in self.data.api.bootstrap.liveviews.values()
]
elif self.entity_description.key == _KEY_DOORBELL_TEXT:
default_message = (
self.data.api.bootstrap.nvr.doorbell_settings.default_message_text
)
messages = self.data.api.bootstrap.nvr.doorbell_settings.all_messages
built_messages = (
{"id": item.type.value, "name": item.text} for item in messages
)
options = [
{"id": "", "name": f"Default Message ({default_message})"},
*built_messages,
]
elif self.entity_description.key == _KEY_PAIRED_CAMERA:
options = [{"id": TYPE_EMPTY_VALUE, "name": "Not Paired"}]
for camera in self.data.api.bootstrap.cameras.values():
options.append({"id": camera.id, "name": camera.name})
self._attr_options = [item["name"] for item in options]
self._hass_to_unifi_options = {item["name"]: item["id"] for item in options}
self._unifi_to_hass_options = {item["id"]: item["name"] for item in options}
@property
def current_option(self) -> str:
"""Return the current selected option."""
assert self.entity_description.ufp_value is not None
unifi_value = get_nested_attr(self.device, self.entity_description.ufp_value)
if unifi_value is None:
unifi_value = TYPE_EMPTY_VALUE
elif isinstance(unifi_value, Liveview):
unifi_value = unifi_value.id
elif self.entity_description.key == _KEY_LIGHT_MOTION:
assert isinstance(self.device, Light)
# a bit of extra to allow On Motion Always/Dark
if (
self.device.light_mode_settings.mode == LightModeType.MOTION
and self.device.light_mode_settings.enable_at
== LightModeEnableType.DARK
):
unifi_value = f"{LightModeType.MOTION.value}Dark"
elif self.entity_description.key == _KEY_DOORBELL_TEXT:
assert isinstance(unifi_value, LCDMessage)
return unifi_value.text
return self._unifi_to_hass_options.get(unifi_value, unifi_value)
async def async_select_option(self, option: str) -> None:
"""Change the Select Entity Option."""
if isinstance(self.device, Light):
if self.entity_description.key == _KEY_LIGHT_MOTION:
lightmode, timing = LIGHT_MODE_TO_SETTINGS[option]
_LOGGER.debug("Changing Light Mode to %s", option)
await self.device.set_light_settings(
LightModeType(lightmode),
enable_at=None if timing is None else LightModeEnableType(timing),
)
return
unifi_value = self._hass_to_unifi_options[option]
if self.entity_description.key == _KEY_PAIRED_CAMERA:
if unifi_value == TYPE_EMPTY_VALUE:
unifi_value = None
camera = self.data.api.bootstrap.cameras.get(unifi_value)
await self.device.set_paired_camera(camera)
_LOGGER.debug("Changed Paired Camera to to: %s", option)
return
unifi_value = self._hass_to_unifi_options[option]
if isinstance(self.device, Camera):
if self.entity_description.key == _KEY_DOORBELL_TEXT:
if unifi_value.startswith(DoorbellMessageType.CUSTOM_MESSAGE.value):
await self.device.set_lcd_text(
DoorbellMessageType.CUSTOM_MESSAGE, text=option
)
elif unifi_value == TYPE_EMPTY_VALUE:
await self.device.set_lcd_text(None)
else:
await self.device.set_lcd_text(DoorbellMessageType(unifi_value))
_LOGGER.debug("Changed Doorbell LCD Text to: %s", option)
return
if self.entity_description.ufp_enum_type is not None:
unifi_value = self.entity_description.ufp_enum_type(unifi_value)
elif self.entity_description.key == _KEY_VIEWER:
unifi_value = self.data.api.bootstrap.liveviews[unifi_value]
_LOGGER.debug("%s set to: %s", self.entity_description.key, option)
assert self.entity_description.ufp_set_function
coro = getattr(self.device, self.entity_description.ufp_set_function)
await coro(unifi_value)
async def async_set_doorbell_message(self, message: str, duration: str) -> None:
"""Set LCD Message on Doorbell display."""
if self.entity_description.key != _KEY_DOORBELL_TEXT:
raise HomeAssistantError("Not a doorbell text select entity")
assert isinstance(self.device, Camera)
reset_at = None
timeout_msg = ""
if duration.isnumeric():
reset_at = utcnow() + timedelta(minutes=int(duration))
timeout_msg = f" with timeout of {duration} minute(s)"
_LOGGER.debug(
'Setting message for %s to "%s"%s', self.device.name, message, timeout_msg
)
await self.device.set_lcd_text(
DoorbellMessageType.CUSTOM_MESSAGE, message, reset_at=reset_at
)
|
|
"""
this module provides functions for generating some basic figures. The code can
be used as is, or serve as an example for writing your own code.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import ConnectionPatch
# from . import plotting_util
from .plotting_util import (prepare_data, simple_kde, group_density,
make_grid, make_legend, plot_envelope,
simple_density, do_titles, do_ylabels, TIME,
PlotType, get_color, Density, LegendEnum)
from ..util import EMAError, get_module_logger
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
__all__ = ['lines',
'envelopes',
'kde_over_time',
'multiple_densities']
_logger = get_module_logger(__name__)
TIME_LABEL = 'Time'
def envelopes(experiments,
outcomes,
outcomes_to_show=None,
group_by=None,
grouping_specifiers=None,
density=None,
fill=False,
legend=True,
titles={},
ylabels={},
log=False):
''' Make envelop plots.
An envelope shows over time the minimum and maximum value for a set
of runs over time. It is thus to be used in case of time series
data. The function will try to find a result labeled "TIME". If this
is present, these values will be used on the X-axis. In case of
Vensim models, TIME is present by default.
Parameters
----------
experiments : DataFrame
outcomes : dict
outcomes_to_show, str, list of str, optional
group_by : str, optional
name of the column in the experimentsto group results by.
Alternatively, `index` can be used to use indexing
arrays as the basis for grouping.
grouping_specifiers : iterable or dict, optional
set of categories to be used as a basis for
grouping by. Grouping_specifiers is only
meaningful if group_by is provided as well.
In case of grouping by index, the grouping
specifiers should be in a dictionary where
the key denotes the name of the group.
density : {None, HIST, KDE, VIOLIN, BOXPLOT}, optional
fill : bool, optional
legend : bool, optional
titles : dict, optional
a way for controlling whether each of the axes should have a
title. There are three possibilities. If set to None, no title
will be shown for any of the axes. If set to an empty dict,
the default, the title is identical to the name of the outcome of
interest. If you want to override these default names, provide a
dict with the outcome of interest as key and the desired title as
value. This dict need only contain the outcomes for which you
want to use a different title.
ylabels : dict, optional
way for controlling the ylabels. Works identical to titles.
log : bool, optional
log scale density plot
Returns
-------
Figure : Figure instance
axes : dict
dict with outcome as key, and axes as value. Density axes' are
indexed by the outcome followed by _density.
Note
----
the current implementation is limited to seven different categories in case
of group_by, categories, and/or discretesize. This limit is due to the colors
specified in COLOR_LIST.
Examples
--------
>>> import util as util
>>> data = util.load_results(r'1000 flu cases.cPickle')
>>> envelopes(data, group_by='policy')
will show an envelope for three three different policies, for all the
outcomes of interest. while
>>> envelopes(data, group_by='policy', categories=['static policy',
'adaptive policy'])
will only show results for the two specified policies, ignoring any results
associated with \'no policy\'.
'''
_logger.debug("generating envelopes")
prepared_data = prepare_data(experiments, outcomes,
outcomes_to_show, group_by,
grouping_specifiers,
filter_scalar=True)
outcomes, outcomes_to_show, time, grouping_labels = prepared_data
figure, grid = make_grid(outcomes_to_show, density)
# do the plotting
axes_dict = {}
for i, outcome_to_plot in enumerate(outcomes_to_show):
ax = figure.add_subplot(grid[i, 0])
axes_dict[outcome_to_plot] = ax
ax_d = None
if density:
ax_d = figure.add_subplot(grid[i, 1], sharey=ax)
axes_dict[outcome_to_plot + "_density"] = ax_d
if group_by:
group_by_envelopes(outcomes, outcome_to_plot, time, density,
ax, ax_d, fill, grouping_labels, log)
else:
single_envelope(outcomes, outcome_to_plot, time, density,
ax, ax_d, fill, log)
if ax_d:
for tl in ax_d.get_yticklabels():
tl.set_visible(False)
ax.set_xlabel(TIME_LABEL)
do_ylabels(ax, ylabels, outcome_to_plot)
do_titles(ax, titles, outcome_to_plot)
if legend and group_by:
gs1 = grid[0, 0]
for ax in figure.axes:
gs2 = ax._subplotspec
if all((gs1._gridspec == gs2._gridspec,
gs1.num1 == gs2.num1,
gs1.num2 == gs2.num2)):
break
if fill:
make_legend(grouping_labels, ax, alpha=0.3,
legend_type=LegendEnum.PATCH)
else:
make_legend(grouping_labels, ax, legend_type=LegendEnum.LINE)
return figure, axes_dict
def group_by_envelopes(outcomes, outcome_to_plot, time, density, ax,
ax_d, fill, group_labels, log):
''' Helper function responsible for generating an envelope plot
based on a grouping.
Parameters
----------
outcomes : dict
a dictionary containing the various outcomes to plot
outcome_to_plot : str
the specific outcome to plot
time : str
the name of the time dimension
density : {None, HIST, KDE, VIOLIN, BOXPLOT}
ax : Axes instance
the ax on which to plot
ax_d : Axes instance
the ax on which to plot the density
fill : bool
group_by_labels : list of str
order in which groups should be plotted
log : bool
'''
for j, key in enumerate(group_labels):
value = outcomes[key]
value = value[outcome_to_plot]
try:
plot_envelope(ax, j, time, value, fill)
except ValueError:
_logger.exception("value error when plotting for %s" % (key))
raise
if density:
group_density(ax_d, density, outcomes, outcome_to_plot, group_labels,
log)
ax_d.get_yaxis().set_view_interval(
ax.get_yaxis().get_view_interval()[0],
ax.get_yaxis().get_view_interval()[1])
def single_envelope(outcomes,
outcome_to_plot,
time,
density,
ax,
ax_d,
fill,
log):
'''
Helper function for generating a single envelope plot.
Parameters
----------
outcomes : dict
a dictonary containing the various outcomes to plot
outcome_to_plot : str
the specific outcome to plot
time : str
the name of the time dimension
density : {None, HIST, KDE, VIOLIN, BOXPLOT}
ax : Axes instance
the ax on which to plot
ax_d : Axes instance
the ax on which to plot the density
fill : bool
group_by_labels : list of str
order in which groups should be plotted
log : bool
'''
value = outcomes[outcome_to_plot]
plot_envelope(ax, 0, time, value, fill)
if density:
simple_density(density, value, ax_d, ax, log)
def lines(experiments,
outcomes,
outcomes_to_show=[],
group_by=None,
grouping_specifiers=None,
density='',
legend=True,
titles={},
ylabels={},
experiments_to_show=None,
show_envelope=False,
log=False):
'''This function takes the results from :meth:`perform_experiments` and
visualizes these as line plots. It is thus to be used in case of time
series data. The function will try to find a result labeled "TIME". If this
is present, these values will be used on the X-axis. In case of Vensim
models, TIME is present by default.
Parameters
----------
experiments : DataFrame
outcomes : dict
outcomes_to_show : list of str, optional
list of outcome of interest you want to plot. If empty,
all outcomes are plotted. **Note**: just names.
group_by : str, optional
name of the column in the cases array to group results by.
Alternatively, `index` can be used to use indexing arrays as the
basis for grouping.
grouping_specifiers : iterable or dict, optional
set of categories to be used as a basis for grouping
by. Grouping_specifiers is only meaningful if
group_by is provided as well. In case of grouping by
index, the grouping specifiers should be in a
dictionary where the key denotes the name of the
group.
density : {None, HIST, KDE, VIOLIN, BOXPLOT}, optional
legend : bool, optional
titles : dict, optional
a way for controlling whether each of the axes should have a
title. There are three possibilities. If set to None, no title
will be shown for any of the axes. If set to an empty dict,
the default, the title is identical to the name of the outcome of
interest. If you want to override these default names, provide a
dict with the outcome of interest as key and the desired title as
value. This dict need only contain the outcomes for which you
want to use a different title.
ylabels : dict, optional
way for controlling the ylabels. Works identical to titles.
experiments_to_show : ndarray, optional
indices of experiments to show lines for,
defaults to None.
show_envelope : bool, optional
show envelope of outcomes. This envelope is the based on
the minimum at each column and the maximum at each column.
log : bool, optional
log scale density plot
Returns
-------
fig : Figure instance
axes : dict
dict with outcome as key, and axes as value. Density axes' are
indexed by the outcome followed by _density.
Note
----
the current implementation is limited to seven different categories in case
of group_by, categories, and/or discretesize. This limit is due to the colors
specified in COLOR_LIST.
'''
_logger.debug("generating line graph")
# make sure we have the data
if show_envelope:
return plot_lines_with_envelopes(
experiments,
outcomes,
outcomes_to_show=outcomes_to_show,
group_by=group_by,
legend=legend,
density=density,
grouping_specifiers=grouping_specifiers,
experiments_to_show=experiments_to_show,
titles=titles,
ylabels=ylabels,
log=log)
if experiments_to_show is not None:
experiments = experiments.loc[experiments_to_show, :]
outcomes = {k: v[experiments_to_show] for k, v in outcomes.items()}
data = prepare_data(experiments, outcomes, outcomes_to_show,
group_by, grouping_specifiers)
outcomes, outcomes_to_show, time, grouping_labels = data
figure, grid = make_grid(outcomes_to_show, density)
axes_dict = {}
# do the plotting
for i, outcome_to_plot in enumerate(outcomes_to_show):
ax = figure.add_subplot(grid[i, 0])
axes_dict[outcome_to_plot] = ax
ax_d = None
if density:
ax_d = figure.add_subplot(grid[i, 1], sharey=ax)
axes_dict[outcome_to_plot + "_density"] = ax_d
for tl in ax_d.get_yticklabels():
tl.set_visible(False)
if group_by:
group_by_lines(outcomes, outcome_to_plot, time, density,
ax, ax_d, grouping_labels, log)
else:
simple_lines(outcomes, outcome_to_plot, time, density,
ax, ax_d, log)
ax.set_xlabel(TIME_LABEL)
do_ylabels(ax, ylabels, outcome_to_plot)
do_titles(ax, titles, outcome_to_plot)
if legend and group_by:
gs1 = grid[0, 0]
for ax in figure.axes:
gs2 = ax._subplotspec
if all((gs1._gridspec == gs2._gridspec,
gs1.num1 == gs2.num1,
gs1.num2 == gs2.num2)):
break
make_legend(grouping_labels, ax)
return figure, axes_dict
def plot_lines_with_envelopes(experiments,
outcomes,
outcomes_to_show=[],
group_by=None,
grouping_specifiers=None,
density='',
legend=True,
titles={},
ylabels={},
experiments_to_show=None,
log=False):
'''
Helper function for generating a plot which contains both an envelope and
lines.
Parameters
----------
experiments : DataFrame
outcomes : dict
outcomes_to_show : list of str, optional
list of outcome of interest you want to plot. If empty,
all outcomes are plotted. **Note**: just names.
group_by : str, optional
name of the column in the cases array to group results by.
Alternatively, `index` can be used to use indexing arrays as the
basis for grouping.
grouping_specifiers : iterable or dict, optional
set of categories to be used as a basis for grouping
by. Grouping_specifiers is only meaningful if
group_by is provided as well. In case of grouping by
index, the grouping specifiers should be in a
dictionary where the key denotes the name of the
group.
density : {None, HIST, KDE, VIOLIN, BOXPLOT}, optional
legend : bool, optional
titles : dict, optional
a way for controlling whether each of the axes should have a
title. There are three possibilities. If set to None, no title
will be shown for any of the axes. If set to an empty dict,
the default, the title is identical to the name of the outcome of
interest. If you want to override these default names, provide a
dict with the outcome of interest as key and the desired title as
value. This dict need only contain the outcomes for which you
want to use a different title.
ylabels : dict, optional
way for controlling the ylabels. Works identical to titles.
experiments_to_show : ndarray, optional
indices of experiments to show lines for,
defaults to None.
log : bool, optional
Returns
-------
Figure
a figure instance
dict
dict with outcome as key, and axes as value. Density axes' are
indexed by the outcome followed by _density
'''
full_outcomes = prepare_data(experiments, outcomes,
outcomes_to_show, group_by,
grouping_specifiers)[0]
experiments = experiments.loc[experiments_to_show, :]
temp = {}
for key, value in outcomes.items():
temp[key] = value[experiments_to_show]
data = prepare_data(experiments, temp, outcomes_to_show,
group_by, grouping_specifiers)
outcomes, outcomes_to_show, time, grouping_labels = data
figure, grid = make_grid(outcomes_to_show, density)
axes_dict = {}
# do the plotting
for i, outcome_to_plot in enumerate(outcomes_to_show):
ax = figure.add_subplot(grid[i, 0])
axes_dict[outcome_to_plot] = ax
ax_d = None
if density:
ax_d = figure.add_subplot(grid[i, 1], sharey=ax)
axes_dict[outcome_to_plot + "_density"] = ax_d
for tl in ax_d.get_yticklabels():
tl.set_visible(False)
if group_by:
for j, key in enumerate(grouping_labels):
full_value = full_outcomes[key][outcome_to_plot]
plot_envelope(ax, j, time, full_value, fill=True)
for j, key in enumerate(grouping_labels):
value = outcomes[key][outcome_to_plot]
full_value = full_outcomes[key][outcome_to_plot]
ax.plot(time.T[:, np.newaxis], value.T,
c=get_color(j))
if density:
group_density(ax_d, density, full_outcomes,
outcome_to_plot, grouping_labels, log)
ax_d.get_yaxis().set_view_interval(
ax.get_yaxis().get_view_interval()[0],
ax.get_yaxis().get_view_interval()[1])
else:
value = full_outcomes[outcome_to_plot]
plot_envelope(ax, 0, time, value, fill=True)
if density:
simple_density(density, value, ax_d, ax, log)
value = outcomes[outcome_to_plot]
ax.plot(time.T, value.T)
ax.set_xlim(left=time[0], right=time[-1])
ax.set_xlabel(TIME_LABEL)
do_ylabels(ax, ylabels, outcome_to_plot)
do_titles(ax, titles, outcome_to_plot)
if legend and group_by:
gs1 = grid[0, 0]
for ax in figure.axes:
gs2 = ax._subplotspec
if all((gs1._gridspec == gs2._gridspec,
gs1.num1 == gs2.num1,
gs1.num2 == gs2.num2)):
break
make_legend(grouping_labels, ax)
return figure, axes_dict
def group_by_lines(outcomes, outcome_to_plot, time, density,
ax, ax_d, group_by_labels, log):
'''
Helper function responsible for generating a grouped lines plot.
Parameters
----------
results : tupule
return from :meth:`perform_experiments`.
outcome_to_plot : str
time : str
density : {None, HIST, KDE, VIOLIN, BOXPLOT}
ax : Axes instance
ax_d : Axes instance
group_by_labels : list of str
log : bool
'''
for j, key in enumerate(group_by_labels):
value = outcomes[key]
value = value[outcome_to_plot]
color = get_color(j)
ax.plot(time.T[:, np.newaxis], value.T, c=color, ms=1, markevery=5)
if density:
group_density(ax_d, density, outcomes, outcome_to_plot,
group_by_labels, log)
ax_d.get_yaxis().set_view_interval(
ax.get_yaxis().get_view_interval()[0],
ax.get_yaxis().get_view_interval()[1])
def simple_lines(outcomes, outcome_to_plot, time, density,
ax, ax_d, log):
'''
Helper function responsible for generating a simple lines plot.
Parameters
----------
outcomes : dict
outcomes_to_plot : str
time : str
density : {None, HIST, KDE, VIOLIN, BOXPLOT}
ax : Axes instance
ax_d : Axes instance
log : bool
'''
value = outcomes[outcome_to_plot]
ax.plot(time.T, value.T)
if density:
simple_density(density, value, ax_d, ax, log)
def kde_over_time(experiments,
outcomes,
outcomes_to_show=[],
group_by=None,
grouping_specifiers=None,
colormap='viridis',
log=True):
'''
Plot a KDE over time. The KDE is is visualized through a heatmap
Parameters
----------
experiments : DataFrame
outcomes : dict
outcomes_to_show : list of str, optional
list of outcome of interest you want to plot. If
empty, all outcomes are plotted.
**Note**: just names.
group_by : str, optional
name of the column in the cases array to group results
by. Alternatively, `index` can be used to use indexing
arrays as the basis for grouping.
grouping_specifiers : iterable or dict, optional
set of categories to be used as a basis for
grouping by. Grouping_specifiers is only
meaningful if group_by is provided as well.
In case of grouping by index, the grouping
specifiers should be in a dictionary where
the key denotes the name of the group.
colormap : str, optional
valid matplotlib color map name
log : bool, optional
Returns
-------
list of Figure instances
a figure instance for each group for each outcome
dict
dict with outcome as key, and axes as value. Density axes' are
indexed by the outcome followed by _density
'''
# determine the minima and maxima over all runs
minima = {}
maxima = {}
for key, value in outcomes.items():
minima[key] = np.min(value)
maxima[key] = np.max(value)
prepared_data = prepare_data(experiments, outcomes,
outcomes_to_show, group_by,
grouping_specifiers,
filter_scalar=True)
outcomes, outcomes_to_show, time, grouping_specifiers = prepared_data
del time
if group_by:
figures = []
axes_dicts = {}
for key, value in outcomes.items():
fig, axes_dict = simple_kde(value, outcomes_to_show,
colormap, log, minima, maxima)
fig.suptitle(key)
figures.append(fig)
axes_dicts[key] = axes_dict
return figures, axes_dicts
else:
return simple_kde(outcomes, outcomes_to_show, colormap, log,
minima, maxima)
def multiple_densities(experiments,
outcomes,
points_in_time=[],
outcomes_to_show=[],
group_by=None,
grouping_specifiers=None,
density=Density.KDE,
legend=True,
titles={},
ylabels={},
experiments_to_show=None,
plot_type=PlotType.ENVELOPE,
log=False,
**kwargs):
''' Make an envelope plot with multiple density plots over the run time
Parameters
----------
experiments : DataFrame
outcomes : dict
points_in_time : list
a list of points in time for which you want to see the
density. At the moment up to 6 points in time are
supported
outcomes_to_show : list of str, optional
list of outcome of interest you want to plot. If empty,
all outcomes are plotted. **Note**: just names.
group_by : str, optional
name of the column in the cases array to group results by.
Alternatively, `index` can be used to use indexing arrays as the
basis for grouping.
grouping_specifiers : iterable or dict, optional
set of categories to be used as a basis for grouping
by. Grouping_specifiers is only meaningful if
group_by is provided as well. In case of grouping by
index, the grouping specifiers should be in a
dictionary where the key denotes the name of the
group.
density : {Density.KDE, Density.HIST, Density.VIOLIN, Density.BOXPLOT},
optional
legend : bool, optional
titles : dict, optional
a way for controlling whether each of the axes should have a
title. There are three possibilities. If set to None, no title
will be shown for any of the axes. If set to an empty dict,
the default, the title is identical to the name of the outcome of
interest. If you want to override these default names, provide a
dict with the outcome of interest as key and the desired title as
value. This dict need only contain the outcomes for which you
want to use a different title.
ylabels : dict, optional
way for controlling the ylabels. Works identical to titles.
experiments_to_show : ndarray, optional
indices of experiments to show lines for,
defaults to None.
plot_type : {PlotType.ENVELOPE, PlotType.ENV_LIN, PlotType.LINES}, optional
log : bool, optional
Returns
-------
fig : Figure instance
axes : dict
dict with outcome as key, and axes as value. Density axes' are
indexed by the outcome followed by _density.
Note
----
the current implementation is limited to seven different categories
in case of group_by, categories, and/or discretesize. This limit is
due to the colors specified in COLOR_LIST.
Note
----
the connection patches are for some reason not drawn if log scaling is
used for the density plots. This appears to be an issue in matplotlib
itself.
'''
if not outcomes_to_show:
outcomes_to_show = [k for k, v in outcomes.items() if v.ndim == 2]
outcomes_to_show.remove(TIME)
elif isinstance(outcomes_to_show, str):
outcomes_to_show = [outcomes_to_show]
data = prepare_data(experiments, outcomes,
outcomes_to_show, group_by,
grouping_specifiers)
outcomes, _, time, grouping_labels = data
axes_dicts = {}
figures = []
for outcome_to_show in outcomes_to_show:
axes_dict = {}
axes_dicts[outcome_to_show] = axes_dict
# start of plotting
fig = plt.figure()
figures.append(fig)
# making of grid
if not points_in_time:
raise EMAError("no points in time specified")
if len(points_in_time) == 1:
ax_env = plt.subplot2grid((2, 3), (0, 0), colspan=3)
ax1 = plt.subplot2grid((2, 3), (1, 1), sharey=ax_env)
kde_axes = [ax1]
elif len(points_in_time) == 2:
ax_env = plt.subplot2grid((2, 2), (0, 0), colspan=2)
ax1 = plt.subplot2grid((2, 2), (1, 0), sharey=ax_env)
ax2 = plt.subplot2grid((2, 2), (1, 1), sharex=ax1, sharey=ax_env)
kde_axes = [ax1, ax2]
elif len(points_in_time) == 3:
ax_env = plt.subplot2grid((2, 3), (0, 0), colspan=3)
ax1 = plt.subplot2grid((2, 3), (1, 0), sharey=ax_env)
ax2 = plt.subplot2grid((2, 3), (1, 1), sharex=ax1, sharey=ax_env)
ax3 = plt.subplot2grid((2, 3), (1, 2), sharex=ax1, sharey=ax_env)
kde_axes = [ax1, ax2, ax3]
elif len(points_in_time) == 4:
ax_env = plt.subplot2grid((2, 4), (0, 1), colspan=2)
ax1 = plt.subplot2grid((2, 4), (1, 0), sharey=ax_env)
ax2 = plt.subplot2grid((2, 4), (1, 1), sharex=ax1, sharey=ax_env)
ax3 = plt.subplot2grid((2, 4), (1, 2), sharex=ax1, sharey=ax_env)
ax4 = plt.subplot2grid((2, 4), (1, 3), sharex=ax1, sharey=ax_env)
kde_axes = [ax1, ax2, ax3, ax4]
elif len(points_in_time) == 5:
ax_env = plt.subplot2grid((2, 5), (0, 1), colspan=3)
ax1 = plt.subplot2grid((2, 5), (1, 0), sharey=ax_env)
ax2 = plt.subplot2grid((2, 5), (1, 1), sharex=ax1, sharey=ax_env)
ax3 = plt.subplot2grid((2, 5), (1, 2), sharex=ax1, sharey=ax_env)
ax4 = plt.subplot2grid((2, 5), (1, 3), sharex=ax1, sharey=ax_env)
ax5 = plt.subplot2grid((2, 5), (1, 4), sharex=ax1, sharey=ax_env)
kde_axes = [ax1, ax2, ax3, ax4, ax5]
elif len(points_in_time) == 6:
ax_env = plt.subplot2grid((2, 6), (0, 1), colspan=4)
ax1 = plt.subplot2grid((2, 6), (1, 0), sharey=ax_env)
ax2 = plt.subplot2grid((2, 6), (1, 1), sharex=ax1, sharey=ax_env)
ax3 = plt.subplot2grid((2, 6), (1, 2), sharex=ax1, sharey=ax_env)
ax4 = plt.subplot2grid((2, 6), (1, 3), sharex=ax1, sharey=ax_env)
ax5 = plt.subplot2grid((2, 6), (1, 4), sharex=ax1, sharey=ax_env)
ax6 = plt.subplot2grid((2, 6), (1, 5), sharex=ax1, sharey=ax_env)
kde_axes = [ax1, ax2, ax3, ax4, ax5, ax6, ]
else:
raise EMAError("too many points in time provided")
axes_dict["main plot"] = ax_env
for n, entry in enumerate(kde_axes):
axes_dict["density_%s" % n] = entry
# turn of ticks for all but the first density
if n > 0:
for tl in entry.get_yticklabels():
tl.set_visible(False)
# bit of a trick to avoid duplicating code. If no subgroups are
# specified, nest the outcomes one step deeper in the dict so the
# iteration below can proceed normally.
if not grouping_labels:
grouping_labels = [""]
outcomes[""] = outcomes
for j, key in enumerate(grouping_labels):
value = outcomes[key][outcome_to_show]
if plot_type == PlotType.ENVELOPE:
plot_envelope(ax_env, j, time, value, **kwargs)
elif plot_type == PlotType.LINES:
ax_env.plot(time.T, value.T)
elif plot_type == PlotType.ENV_LIN:
plot_envelope(ax_env, j, time, value, **kwargs)
if experiments_to_show is not None:
ax_env.plot(time.T, value[experiments_to_show].T)
else:
ax_env.plot(time.T, value.T)
ax_env.set_xlim(time[0], time[-1])
ax_env.set_xlabel(TIME_LABEL)
do_ylabels(ax_env, ylabels, outcome_to_show)
do_titles(ax_env, titles, outcome_to_show)
for ax, time_value in zip(kde_axes, points_in_time):
index = np.where(time == time_value)[0][0]
group_density(ax, density, outcomes, outcome_to_show,
grouping_labels, index=index, log=log)
min_y, max_y = ax_env.get_ylim()
ax_env.autoscale(enable=False, axis='y')
# draw line to connect each point in time in the main plot
# to the associated density plot
for i, ax in enumerate(kde_axes):
time_value = points_in_time[i]
ax_env.plot([time_value, time_value],
[min_y, max_y], c='k', ls='--')
con = ConnectionPatch(xyA=(time_value, min_y),
xyB=(ax.get_xlim()[0],
max_y), coordsA="data",
coordsB="data", axesA=ax_env, axesB=ax)
ax_env.add_artist(con)
if legend and group_by:
lt = LegendEnum.PATCH
alpha = 0.3
if plot_type == PlotType.LINES:
lt = LegendEnum.LINE
alpha = 1
make_legend(grouping_labels, ax_env, legend_type=lt, alpha=alpha)
return figures, axes_dicts
|
|
import os.path
import json
import copy
import itertools
import requests.exceptions
from . import transport
from . import errors
from . import utils
from . import schemata
service_error = utils.registry(errors.ServiceError, 'ERRORS')
@service_error(-1)
class InternalError(errors.ServiceError):
"""An internal service error has occured"""
pass
@service_error(-2)
class ArgumentError(errors.ServiceError):
"""Invalid arguments were passed to a request"""
pass
@service_error(-3)
class RetryRequest(errors.ServiceError):
"""The server is too busy and requests a retry"""
pass
@service_error(-5)
class UploadFailed(errors.ServiceError):
"""The upload failed"""
pass
@service_error(-6)
class ConcurrentIPsExceeded(errors.ServiceError):
"""Too many different IPs are concurrently accessing this upload URL"""
pass
@service_error(-7)
class InvalidRange(errors.ServiceError):
"""An invalid range header was specified"""
pass
@service_error(-8)
class UploadURLExpired(errors.ServiceError):
"""The upload URL has expired"""
pass
@service_error(-9)
class ObjectNotFound(errors.ServiceError):
"""Object (typically node or user) not found"""
pass
@service_error(-10)
class CircularLinkingAttempted(errors.ServiceError):
"""A circular link was denied"""
pass
@service_error(-11)
class AccessViolation(errors.ServiceError):
"""An access violation occured (e.g. writing to a read-only share)"""
@service_error(-12)
class ObjectExists(errors.ServiceError):
"""The object already exists on the server"""
pass
@service_error(-13)
class ObjectIncomplete(errors.ServiceError):
"""The accessed object is incomplete"""
pass
@service_error(-15)
class InvalidSessionId(errors.ServiceError):
"""The server indicates that the provided session id is invalid"""
pass
@service_error(-16)
class UserBlocked(errors.ServiceError):
"""The user has been blocked"""
pass
@service_error(-17)
class QuotaExceeded(errors.ServiceError):
"""The user quota has been exceeded"""
pass
@service_error(-18)
class TemporarilyUnavailable(errors.ServiceError):
"""The resource is temporarily unavailable"""
# TODO: Should this be a retry condition?
pass
TRANSACTION_SCHEMA = schemata.Schema.from_file('transaction.json')
RETRY_CONDITIONS = (requests.exceptions.Timeout, RetryRequest,
transport.HTTP500Error)
class Operation(object):
_request = None
_response = None
_request_data = {}
_response_data = None
schema = None
def __init__(self, *args, **kwargs):
self.session = kwargs.pop('session', None)
try:
self._request = schemata.SchemaBundle.from_file(
self.schema, 'request')
self.request(*args, **kwargs)
except KeyError:
pass
try:
self._response = schemata.SchemaBundle.from_file(
self.schema, 'response')
except KeyError:
pass
if not self._request and not self._response:
raise errors.SupermegaException(
'Need either request or response in schema')
def request(self, *args, **kwargs):
pass
def response(self, session = None):
session = session or self.session
if not self._response_data:
Transaction(self).send(session)
if isinstance(self._response_data, errors.ServiceError):
raise self._response_data
return copy.deepcopy(self._response_data)
def get_serializable_request(self):
data = self._request.translate(self._request_data)
data[schemata.SchemaBundle.OPCODE_KEY] = self._request.opcode
self._request.schema.validate(data)
return data
def load_response(self, data):
self._response.schema.validate(data)
self._response_data = self._response.translate(data)
def set_response_error(self, error):
self._response_data = error
def get(self, *args, **kwargs): # ???
return self._request_data.get(*args, **kwargs)
def __getitem__(self, key):
return self._request_data[key]
def __setitem__(self, attr, value):
self._request.validate(value, (attr,))
self._request_data[attr] = value
def __contains__(self, key):
return self._request_data.__contains__(key)
class Transaction(list):
class Encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Operation):
return obj.get_serializable_request()
return super(json.JSONEncoder, self).default(obj)
def __init__(self, *args):
super(Transaction, self).__init__(args)
def serialize(self):
return json.dumps(self, cls=self.Encoder)
def deserialize(self, request_transaction, data):
data = json.loads(data)
# The server seems to return either -errno or [-errno]
if isinstance(data, (int, long)):
raise errors.ServiceError.for_errno(data)
TRANSACTION_SCHEMA.validate(data)
for op, response_data in itertools.izip(request_transaction, data):
if isinstance(response_data, (int, long)) and response_data < 0:
op.set_response_error(
errors.ServiceError.for_errno(response_data))
else:
op.load_response(response_data)
self.append(op)
def send(self, session):
request = transport.TransactionRequest(self, session)
return self._send(request)
@utils.retry(RETRY_CONDITIONS)
def _send(self, request):
data = request.send().content
response = self.__class__()
response.deserialize(self, data)
return response
class UserSession(Operation):
schema = 'user-session.bundle.json'
def request(self, user, hash):
self['user'] = user
if hash:
self['hash'] = hash
class EphemeralUserSession(Operation):
schema = 'user-session-ephemeral.bundle.json'
def request(self, handle):
self['handle'] = handle
class UserInformation(Operation):
schema = 'user-information.bundle.json'
class UserUpdate(Operation):
schema = 'user-update.bundle.json'
def request(self, key, challenge):
self['key'] = key
self['challenge'] = challenge
class Files(Operation):
schema = 'files.bundle.json'
def request(self):
self['c'] = 1 # TODO: Find out what this means
class FileGetInfo(Operation):
schema = 'file-get-info.bundle.json'
def request(self, handle, include_url = True):
self['handle'] = handle
self['include_url'] = int(include_url)
class PublicFileGetInfo(Operation):
schema = 'public-file-get-info.bundle.json'
def request(self, handle, include_url = True):
self['handle'] = handle
self['include_url'] = int(include_url)
class FileUpload(Operation):
schema = 'file-upload.bundle.json'
def request(self, size):
self['size'] = size
class NodeAdd(Operation):
schema = 'node-add.bundle.json'
def request(self, parent_handle, node_type, node_key, node_attrs,
completion_token):
self['parent'] = parent_handle
self['nodes'] = [{
'completion_token': completion_token,
'type': node_type,
'attrs': node_attrs,
'key': node_key
}]
class NodeUpdate(Operation):
schema = 'node-update.bundle.json'
def request(self, node_handle, key, attrs):
self['handle'] = node_handle
self['key'] = key
self['attrs'] = attrs
self['request_id'] = ''
class FileMove(Operation):
schema = 'file-move.bundle.json'
def request(self, fileobj, new_parent):
self['new_parent'] = new_parent.handle
self['handle'] = fileobj.handle
self['request_id'] = ''
class FileDelete(Operation):
schema = 'file-delete.bundle.json'
def request(self, file_handle):
self['handle'] = file_handle
self['request_id'] = ''
class FileGetPublicHandle(Operation):
schema = 'file-get-public-handle.bundle.json'
def request(self, handle):
self['handle'] = handle
class PollServer(Operation):
schema = 'server.bundle.json'
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume, settings
import numpy as np
import time
import os
from caffe2.python import core, dyndep
import caffe2.python.hypothesis_test_util as hu
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/nnpack:nnpack_ops")
np.random.seed(1)
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
ws.run(plan)
after = time.time()
print("Timing network, time taken per-iteration: {:.6f}ms".format((
after - before) / float(iters) * 1000.0))
return after - before
def has_avx2():
import subprocess
try:
subprocess.check_output(["grep", "avx2", "/proc/cpuinfo"])
return True
except subprocess.CalledProcessError:
# grep exits with rc 1 on no matches
return False
@unittest.skipIf(not has_avx2(), "NNPACK requires AVX2")
class NNPackOpsTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 2),
kernel=st.integers(3, 5),
size=st.integers(5, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 5),
groups=st.integers(1, 2))
def test_convolution_correctness(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, groups):
assume(input_channels % groups == 0)
assume(output_channels % groups == 0)
assume(output_channels == input_channels / groups)
assume(stride <= kernel)
if stride != 1:
assume(batch_size == 1)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels, input_channels, kernel, kernel).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
order = "NCHW"
outputs = {}
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"Conv",
["X", "w", "b"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
kts="TUPLE",
engine=engine,
group=groups,
)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("w").feed(w)
self.ws.create_blob("b").feed(b)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@given(size=st.sampled_from([6, 8]),
input_channels=st.integers(1, 8),
batch_size=st.integers(1, 5))
def test_max_pool_correctness(self, size, input_channels, batch_size):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
order = "NCHW"
outputs = {}
# only 2 * 2 stride and 2 * 2 pool is supported in NNPack now
stride = 2
kernel = 2
# The pooling strategy of NNPack is different from caffe2 pooling
pad = 0
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"MaxPool",
["X"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@given(size=st.sampled_from([6, 8]),
input_channels=st.integers(1, 8),
batch_size=st.integers(1, 5))
def test_relu_correctness(self, size, input_channels, batch_size):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
outputs = {}
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"Relu",
["X"],
["Y"],
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@given(size=st.sampled_from([6, 8]),
input_channels=st.integers(1, 8),
batch_size=st.integers(1, 5),
alpha=st.floats(0, 1))
def test_leaky_relu_correctness(self, size, input_channels, batch_size,
alpha):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
outputs = {}
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"LeakyRelu",
["X"],
["Y"],
alpha=alpha,
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@settings(timeout=3600)
@unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark")
@given(stride=st.integers(1, 1),
pad=st.integers(0, 2),
kernel=st.sampled_from([3, 5, 7]),
size=st.integers(30, 90),
input_channels=st.sampled_from([3, 64, 256]),
output_channels=st.sampled_from([32, 96, 256]),
batch_size=st.sampled_from([32, 64, 96, 128]))
def test_timings(self, stride, pad, kernel, size,
input_channels, output_channels, batch_size):
assume(stride <= kernel)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(output_channels, input_channels,
kernel, kernel).astype(np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
order = "NCHW"
times = {}
for engine in ["", "NNPACK"]:
net = core.Net(engine + "_test")
net.Conv(
["X", "W", "b"], "Y",
order=order,
kernel=kernel,
stride=stride,
pad=pad,
kts="TUPLE",
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("W").feed(w)
self.ws.create_blob("b").feed(b)
self.ws.run(net)
times[engine] = benchmark(self.ws, net)
print("Speedup for NNPACK: {:.2f}".format(
times[""] / times["NNPACK"]))
@settings(timeout=3600)
@unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark")
@given(size=st.integers(30, 90),
input_channels=st.sampled_from([3, 64, 256]),
batch_size=st.sampled_from([32, 64, 96, 128]))
def test_relu_timings(self, size, input_channels, batch_size):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
times = {}
for engine in ["", "NNPACK"]:
net = core.Net(engine + "_test")
net.Relu(
["X"],
["Y"],
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(net)
times[engine] = benchmark(self.ws, net)
print("Speedup for NNPACK: {:.2f}".format(
times[""] / times["NNPACK"]))
|
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-dataplex documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-dataplex"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-dataplex",
"github_user": "googleapis",
"github_repo": "python-dataplex",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-dataplex-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-dataplex.tex",
"google-cloud-dataplex Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-dataplex",
"google-cloud-dataplex Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-dataplex",
"google-cloud-dataplex Documentation",
author,
"google-cloud-dataplex",
"google-cloud-dataplex Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
|
# This is a test to see if I can push code from sherlock to github 170926
# -*- coding: utf-8 -*-
def most_stable_phase(phase_regions,pH=None, scale='RHE', pt_oxid_V=0.6470339):
"""
Returns a list containing how close the closest phase is to the ORR
equilbrium line and the Pourbaix entry object(s) corrsponding to that phase
Meant to be used with phase_filter to create the input to this method
Args:
phase_regions: PD regions which will be analyzed
Pt_ref = True, difference between the Pt V_crit and system's V_crit
RHE = V_crit on an RHE scale
"""
#start_fold - most_stable_phase
#start_fold - Imported Modules
from pd_screen_tools import ORR_line # ORR/OER V_equil, function of pH
#end_fold
# print phase_regions[0]
if pH==None:
#start_fold - pH==None - Considers Entire pH range
point_dis_ORR_lst = [] # Smallest distance from ORR paired with
# Pourbaix entry object for each region
cnt = 0
for region in phase_regions:
dist_ORR_reg_lst = [] # Distance from ORR for all sides of region
for line in region[0]:
dist_ORR_0 = ORR_line(line[0][0])-line[1][0] # Distance from ORR
# for 1st endpoint
dist_ORR_1 = ORR_line(line[0][1])-line[1][1] # Distance from ORR
# for 2nd endpoint
# Grabs closest point from line segment
dist_ORR_reg_lst.append(min(dist_ORR_0,dist_ORR_1))
# Closest point on closest side to ORR line for phase region
min_dist_ORR = min(dist_ORR_reg_lst)
point_dis_ORR_lst.append([])
point_dis_ORR_lst[cnt].append(min_dist_ORR) # Closeness to ORR
point_dis_ORR_lst[cnt].append(region[1]) # Pourbaix entry object
cnt = cnt+1
most_stable_reg = min(point_dis_ORR_lst) # Closest point to ORR for
# inputed regions
#end_fold
if pH!=None:
#start_fold - pH is Specified
point_dis_ORR_lmost = []
cnt = 0
V_max_of_regions = []
for region in phase_regions:
entry_name_lst = []
for i in region[1]:
entry_name_lst.append(i.name)
V_lst = []
for line in region[0]:
if round(line[0][0],4)==round(line[0][1],4):
# print 'Vertical line removed'
continue
pH_range = sorted(line[0])
if pH<pH_range[0] or pH>pH_range[1]:
# print 'pH not in range of line segment'
continue
m = (line[1][0]-line[1][1])/(line[0][0]-line[0][1])
b = line[1][0]-m*line[0][0]
# b2 = line[1][1]-m*line[0][1]
V_at_ph = m*pH+b
V_lst.append(V_at_ph)
if not V_lst == []:
V_max = max(V_lst)
V_and_entry = [V_max,region[1]]
V_max_of_regions.append(V_and_entry)
else:
pass
V_max_total = max(V_max_of_regions)
V_dist_from_ORR = ORR_line(pH) - V_max_total[0]
V_max_total[0] = V_dist_from_ORR
most_stable_reg = V_max_total
#end_fold
# start_fold - Other Voltage References
## Distance from ORR line
if scale == 'ORR_dist':
most_stable_reg = most_stable_reg
## Pt reference: V_diss - V_Pt
elif scale == 'Pt_ref':
# Pt_diss_V = 0.6470339
# Pt_diss_V = 0.83981341187500214
pt_crit_V = pt_oxid_V
most_stable_reg[0] = 1.23 - most_stable_reg[0] - pt_crit_V
## Returns the maximum achievable V on an RHE scale
elif scale == 'RHE':
most_stable_reg[0] = 1.23 - most_stable_reg[0]
# end_fold
return most_stable_reg
#end_fold
def oxidation_dissolution_product(phase_regions_all, most_stable_phase):
"""
Returns the Pourbaix Entry of the most likely dissolved or oxidized phase
given a starting stable phase
Graphically, this is the phase which is above the stable phase (In terms of
voltage), near the pH region at which stable phasae has the highest V_crit
"""
#start_fold - oxidation_dissolution_product
from pymatgen.analysis.pourbaix.maker import PREFAC
import numpy as np
slope = -0.0591
phase_regions_all_copy = phase_regions_all[:]
## Obtaining the Region Coordinates of the most_stable_phase ##
for region in phase_regions_all_copy:
if region[1] == most_stable_phase[1]:
stable_region_coord_tmp = region
stable_region_coord = stable_region_coord_tmp[0]
## Converting the coordinates to a RHE scale
for side in stable_region_coord:
side[1][0] = side[1][0] - slope*side[0][0]
side[1][1] = side[1][1] - slope*side[0][1]
## Finding the highest Voltage in the stable phase vs RHE
highest_V = -20
for side in stable_region_coord:
if side[1][0] >= highest_V:
highest_V = side[1][0]
if side[1][1] >= highest_V:
highest_V = side[1][1]
## Returns the coordinate of the most stable side/point
highest_V_points = []
for side in stable_region_coord:
if round(side[1][0], 4) == round(highest_V, 4):
highest_V_points.append([side[0][0],side[1][0]])
if round(side[1][1], 4) == round(highest_V, 4):
highest_V_points.append([side[0][1],side[1][1]])
lst_0 = np.round(highest_V_points,6)
set_1 = set(map(tuple,lst_0))
most_stable_points = map(list,set_1)
# print most_stable_points
###
for point in most_stable_points:
point[1] = point[1]-PREFAC*point[0]
###
## Reversing the coordinates to a SHE scale
for side in stable_region_coord:
side[1][0] = side[1][0] + slope*side[0][0]
side[1][1] = side[1][1] + slope*side[0][1]
## Returns the Other Regions Which Contain the Most Stable point
# Not Including the Original Stable Phase
neighbor_reg = []
neighbor_reg_tmp = []
def format_to_point_pairs(line_segment):
point_0 = [line_segment[0][0],line_segment[1][0]]
point_1 = [line_segment[0][1],line_segment[1][1]]
return [point_0,point_1]
def compare_points(point_0, point_1, rounding):
if round(point_0[0],rounding) == round(point_1[0],rounding) and round(point_0[1],rounding) == round(point_1[1],rounding):
return True
adj_reg_lst = []
for point in most_stable_points:
for region in phase_regions_all:
# print '######################' #TEMP_PRINT
if region==stable_region_coord_tmp:
# print 'TEMP_PRINT'
continue
for segment in region[0]:
point_0 = format_to_point_pairs(segment)[0]
point_1 = format_to_point_pairs(segment)[1]
if compare_points(point_0,point,3)==True or compare_points(point_1,point,4)==True:
adj_reg_lst.append(region)
def make_unique(original_list):
unique_list = []
[unique_list.append(obj) for obj in original_list if obj not in unique_list]
return unique_list
def binary_region_entries(region):
# for i in adj_reg_lst:
if len(i[1])==1:
return [i[1][0]]
if len(i[1])==2:
return [i[1][0],i[1][1]]
uniq_lst = make_unique(adj_reg_lst)
for i in uniq_lst:
i.append(adj_reg_lst.count(i))
tmp = max(uniq_lst, key=lambda x: x[2])
neighbor_reg_0 = binary_region_entries(tmp)
name_lst = []
for i in neighbor_reg_0:
name_lst.append(i.phase_type)
# print name_lst
return name_lst
#end_fold
def is_nonox_phase(phase_regions):
"""
Checks if there exits a non-oxide solid phase
"Position-agnositic" stability criteria on a Pourbaix Diagram
Args:
phase_regions: PD regions which will be analyzed
"""
#start_fold - is_nonox_phase
from pymatgen.analysis.pourbaix.entry import PourbaixEntry, IonEntry
## CHECKING FOR NON-OXIDE SOLID PHASES ##
non_oxide = False
for region in phase_regions:
if len(region[1]) == 1: # region has one entry
if not region[1][0].phase_type == 'Solid': break # Region solid?
# If no, pass
for elem in region[1][0].composition.elements:
if elem.symbol == 'O': break
elif len(region[1]) == 2: # region has two entries
reg_phase_type_1 = region[1][0].phase_type
reg_phase_type_2 = region[1][1].phase_type
if not reg_phase_type_1 == 'Solid' and reg_phase_type_2 == 'Solid':
break
elem_lst = []
elem_comb = region[1][0].composition.elements + \
region[1][1].composition.elements
for elem in elem_comb:
elem_lst.append(elem.symbol)
if 'O' not in elem_lst:
non_oxide = True
break
return non_oxide
#end_fold
|
|
"""
Support for Wink locks.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/lock.wink/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.lock import LockDevice
from homeassistant.components.wink import DOMAIN, WinkDevice
from homeassistant.const import (
ATTR_CODE, ATTR_ENTITY_ID, ATTR_NAME, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['wink']
_LOGGER = logging.getLogger(__name__)
SERVICE_SET_VACATION_MODE = 'wink_set_lock_vacation_mode'
SERVICE_SET_ALARM_MODE = 'wink_set_lock_alarm_mode'
SERVICE_SET_ALARM_SENSITIVITY = 'wink_set_lock_alarm_sensitivity'
SERVICE_SET_ALARM_STATE = 'wink_set_lock_alarm_state'
SERVICE_SET_BEEPER_STATE = 'wink_set_lock_beeper_state'
SERVICE_ADD_KEY = 'wink_add_new_lock_key_code'
ATTR_ENABLED = 'enabled'
ATTR_SENSITIVITY = 'sensitivity'
ATTR_MODE = 'mode'
ALARM_SENSITIVITY_MAP = {
'low': 0.2,
'medium_low': 0.4,
'medium': 0.6,
'medium_high': 0.8,
'high': 1.0,
}
ALARM_MODES_MAP = {
'activity': 'alert',
'forced_entry': 'forced_entry',
'tamper': 'tamper',
}
SET_ENABLED_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_ENABLED): cv.string,
})
SET_SENSITIVITY_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_SENSITIVITY): vol.In(ALARM_SENSITIVITY_MAP)
})
SET_ALARM_MODES_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_MODE): vol.In(ALARM_MODES_MAP)
})
ADD_KEY_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_CODE): cv.positive_int,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
import pywink
for lock in pywink.get_locks():
_id = lock.object_id() + lock.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_entities([WinkLockDevice(lock, hass)])
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get('entity_id')
all_locks = hass.data[DOMAIN]['entities']['lock']
locks_to_set = []
if entity_ids is None:
locks_to_set = all_locks
else:
for lock in all_locks:
if lock.entity_id in entity_ids:
locks_to_set.append(lock)
for lock in locks_to_set:
if service.service == SERVICE_SET_VACATION_MODE:
lock.set_vacation_mode(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_ALARM_STATE:
lock.set_alarm_state(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_BEEPER_STATE:
lock.set_beeper_state(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_ALARM_MODE:
lock.set_alarm_mode(service.data.get(ATTR_MODE))
elif service.service == SERVICE_SET_ALARM_SENSITIVITY:
lock.set_alarm_sensitivity(service.data.get(ATTR_SENSITIVITY))
elif service.service == SERVICE_ADD_KEY:
name = service.data.get(ATTR_NAME)
code = service.data.get(ATTR_CODE)
lock.add_new_key(code, name)
hass.services.register(DOMAIN, SERVICE_SET_VACATION_MODE,
service_handle,
schema=SET_ENABLED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_ALARM_STATE,
service_handle,
schema=SET_ENABLED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_BEEPER_STATE,
service_handle,
schema=SET_ENABLED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_ALARM_MODE,
service_handle,
schema=SET_ALARM_MODES_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_ALARM_SENSITIVITY,
service_handle,
schema=SET_SENSITIVITY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_ADD_KEY,
service_handle,
schema=ADD_KEY_SCHEMA)
class WinkLockDevice(WinkDevice, LockDevice):
"""Representation of a Wink lock."""
@asyncio.coroutine
def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['lock'].append(self)
@property
def is_locked(self):
"""Return true if device is locked."""
return self.wink.state()
def lock(self, **kwargs):
"""Lock the device."""
self.wink.set_state(True)
def unlock(self, **kwargs):
"""Unlock the device."""
self.wink.set_state(False)
def set_alarm_state(self, enabled):
"""Set lock's alarm state."""
self.wink.set_alarm_state(enabled)
def set_vacation_mode(self, enabled):
"""Set lock's vacation mode."""
self.wink.set_vacation_mode(enabled)
def set_beeper_state(self, enabled):
"""Set lock's beeper mode."""
self.wink.set_beeper_mode(enabled)
def add_new_key(self, code, name):
"""Add a new user key code."""
self.wink.add_new_key(code, name)
def set_alarm_sensitivity(self, sensitivity):
"""
Set lock's alarm sensitivity.
Valid sensitivities:
0.2, 0.4, 0.6, 0.8, 1.0
"""
self.wink.set_alarm_sensitivity(sensitivity)
def set_alarm_mode(self, mode):
"""
Set lock's alarm mode.
Valid modes:
alert - Beep when lock is locked or unlocked
tamper - 15 sec alarm when lock is disturbed when locked
forced_entry - 3 min alarm when significant force applied
to door when locked.
"""
self.wink.set_alarm_mode(mode)
@property
def device_state_attributes(self):
"""Return the state attributes."""
super_attrs = super().device_state_attributes
sensitivity = dict_value_to_key(ALARM_SENSITIVITY_MAP,
self.wink.alarm_sensitivity())
super_attrs['alarm_sensitivity'] = sensitivity
super_attrs['vacation_mode'] = self.wink.vacation_mode_enabled()
super_attrs['beeper_mode'] = self.wink.beeper_enabled()
super_attrs['auto_lock'] = self.wink.auto_lock_enabled()
alarm_mode = dict_value_to_key(ALARM_MODES_MAP,
self.wink.alarm_mode())
super_attrs['alarm_mode'] = alarm_mode
super_attrs['alarm_enabled'] = self.wink.alarm_enabled()
return super_attrs
def dict_value_to_key(dict_map, comp_value):
"""Return the key that has the provided value."""
for key, value in dict_map.items():
if value == comp_value:
return key
return STATE_UNKNOWN
|
|
# -*- coding: utf-8 -*-
import requests
from six import string_types
import telebot
from telebot import types
logger = telebot.logger
def _make_request(token, method_name, method='get', params=None, files=None):
"""
Makes a request to the Telegram API.
:param token: The bot's API token. (Created with @BotFather)
:param method_name: Name of the API method to be called. (E.g. 'getUpdates')
:param method: HTTP method to be used. Defaults to 'get'.
:param params: Optional parameters. Should be a dictionary with key-value pairs.
:param files: Optional files.
:return:
"""
request_url = telebot.API_URL + 'bot' + token + '/' + method_name
result = requests.request(method, request_url, params=params, files=files)
logger.debug(result.text)
if result.status_code != 200:
raise ApiException(method_name, result)
try:
result_json = result.json()
if not result_json['ok']:
raise Exception()
except:
raise ApiException(method_name, result)
return result_json['result']
def get_me(token):
method_url = 'getMe'
return _make_request(token, method_url)
def send_message(token, chat_id, text, disable_web_page_preview=None, reply_to_message_id=None, reply_markup=None):
"""
Use this method to send text messages. On success, the sent Message is returned.
:param token:
:param chat_id:
:param text:
:param disable_web_page_preview:
:param reply_to_message_id:
:param reply_markup:
:return:
"""
method_url = r'sendMessage'
payload = {'chat_id': str(chat_id), 'text': text}
if disable_web_page_preview:
payload['disable_web_page_preview'] = disable_web_page_preview
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup:
payload['reply_markup'] = _convert_markup(reply_markup)
return _make_request(token, method_url, params=payload, method='post')
def get_updates(token, offset=None, limit=None, timeout=None):
method_url = r'getUpdates'
payload = {}
if offset:
payload['offset'] = offset
if limit:
payload['limit'] = limit
if timeout:
payload['timeout'] = timeout
return _make_request(token, method_url, params=payload)
def get_user_profile_photos(token, user_id, offset=None, limit=None):
method_url = r'getUserProfilePhotos'
payload = {'user_id': user_id}
if offset:
payload['offset'] = offset
if limit:
payload['limit'] = limit
return _make_request(token, method_url, params=payload)
def forward_message(token, chat_id, from_chat_id, message_id):
method_url = r'forwardMessage'
payload = {'chat_id': chat_id, 'from_chat_id': from_chat_id, 'message_id': message_id}
return _make_request(token, method_url, params=payload)
def send_photo(token, chat_id, photo, caption=None, reply_to_message_id=None, reply_markup=None):
method_url = r'sendPhoto'
payload = {'chat_id': chat_id}
files = None
if not is_string(photo):
files = {'photo': photo}
else:
payload['photo'] = photo
if caption:
payload['caption'] = caption
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup:
payload['reply_markup'] = _convert_markup(reply_markup)
return _make_request(token, method_url, params=payload, files=files, method='post')
def send_location(token, chat_id, latitude, longitude, reply_to_message_id=None, reply_markup=None):
method_url = r'sendLocation'
payload = {'chat_id': chat_id, 'latitude': latitude, 'longitude': longitude}
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup:
payload['reply_markup'] = _convert_markup(reply_markup)
return _make_request(token, method_url, params=payload)
def send_chat_action(token, chat_id, action):
method_url = r'sendChatAction'
payload = {'chat_id': chat_id, 'action': action}
return _make_request(token, method_url, params=payload)
def send_video(token, chat_id, data, duration=None, caption=None, reply_to_message_id=None, reply_markup=None):
method_url = r'sendVideo'
payload = {'chat_id': chat_id}
files = None
if not is_string(data):
files = {'video': data}
else:
payload['video'] = data
if duration:
payload['duration'] = duration
if caption:
payload['caption'] = caption
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup:
payload['reply_markup'] = _convert_markup(reply_markup)
return _make_request(token, method_url, params=payload, files=files, method='post')
def send_data(token, chat_id, data, data_type, reply_to_message_id=None, reply_markup=None):
method_url = get_method_by_type(data_type)
payload = {'chat_id': chat_id}
files = None
if not is_string(data):
files = {data_type: data}
else:
payload[data_type] = data
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup:
payload['reply_markup'] = _convert_markup(reply_markup)
return _make_request(token, method_url, params=payload, files=files, method='post')
def get_method_by_type(data_type):
if data_type == 'audio':
return 'sendAudio'
if data_type == 'document':
return 'sendDocument'
if data_type == 'sticker':
return 'sendSticker'
def _convert_markup(markup):
if isinstance(markup, types.JsonSerializable):
return markup.to_json()
return markup
def is_string(var):
return isinstance(var, string_types)
def is_command(text):
"""
Checks if `text` is a command. Telegram chat commands start with the '/' character.
:param text: Text to check.
:return: True if `text` is a command, else False.
"""
return text.startswith('/')
def extract_command(text):
"""
Extracts the command from `text` (minus the '/') if `text` is a command (see is_command).
If `text` is not a command, this function returns None.
Examples:
extract_command('/help'): 'help'
extract_command('/help@BotName'): 'help'
extract_command('/search black eyed peas'): 'search'
extract_command('Good day to you'): None
:param text: String to extract the command from
:return: the command if `text` is a command (according to is_command), else None.
"""
return text.split()[0].split('@')[0][1:] if is_command(text) else None
def split_string(text, chars_per_string):
"""
Splits one string into multiple strings, with a maximum amount of `chars_per_string` characters per string.
This is very useful for splitting one giant message into multiples.
:param text: The text to split
:param chars_per_string: The number of characters per line the text is split into.
:return: The splitted text as a list of strings.
"""
return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]
class ApiException(Exception):
"""
This class represents an Exception thrown when a call to the Telegram API fails.
"""
def __init__(self, function_name, result):
super(ApiException, self).__init__('{0} failed. Returned result: {1}'.format(function_name, result))
self.function_name = function_name
self.result = result
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multi_label_head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow_estimator.python.estimator.canned import dnn
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.head import head_utils as test_lib
from tensorflow_estimator.python.estimator.head import multi_label_head as head_lib
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys
def _sigmoid_cross_entropy(labels, logits):
"""Returns sigmoid cross entropy averaged over classes."""
sigmoid_logits = 1 / (1 + np.exp(-logits))
unreduced_result = (-labels * np.log(sigmoid_logits) -
(1 - labels) * np.log(1 - sigmoid_logits))
# Mean over classes
return np.mean(unreduced_result, axis=-1, keepdims=True)
@test_util.run_all_in_graph_and_eager_modes
class MultiLabelHead(tf.test.TestCase):
def test_n_classes_is_none(self):
with self.assertRaisesRegexp(
ValueError,
r'n_classes must be > 1 for multi-label classification\. Given: None'):
head_lib.MultiLabelHead(n_classes=None)
def test_n_classes_is_1(self):
with self.assertRaisesRegexp(
ValueError,
r'n_classes must be > 1 for multi-label classification\. Given: 1'):
head_lib.MultiLabelHead(n_classes=1)
def test_threshold_too_small(self):
with self.assertRaisesRegexp(
ValueError, r'thresholds must be in \(0, 1\) range\. Given: 0\.0'):
head_lib.MultiLabelHead(n_classes=2, thresholds=[0., 0.5])
def test_threshold_too_large(self):
with self.assertRaisesRegexp(
ValueError, r'thresholds must be in \(0, 1\) range\. Given: 1\.0'):
head_lib.MultiLabelHead(n_classes=2, thresholds=[0.5, 1.0])
def test_label_vocabulary_dict(self):
with self.assertRaisesRegexp(
ValueError, r'label_vocabulary must be a list or tuple\. '
r'Given type: <(type|class) \'dict\'>'):
head_lib.MultiLabelHead(n_classes=2, label_vocabulary={'foo': 'bar'})
def test_label_vocabulary_wrong_size(self):
with self.assertRaisesRegexp(
ValueError,
r'Length of label_vocabulary must be n_classes \(3\). Given: 2'):
head_lib.MultiLabelHead(n_classes=3, label_vocabulary=['foo', 'bar'])
def test_invalid_loss_reduction(self):
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: invalid_loss_reduction'):
head_lib.MultiLabelHead(
n_classes=3, loss_reduction='invalid_loss_reduction')
with self.assertRaisesRegexp(ValueError, r'Invalid loss_reduction: none'):
head_lib.MultiLabelHead(
n_classes=3, loss_reduction=tf.losses.Reduction.NONE)
def test_loss_fn_arg_labels_missing(self):
def _loss_fn(logits):
del logits # Unused
with self.assertRaisesRegexp(
ValueError, r'loss_fn must contain argument: labels\. '
r'Given arguments: \(\'logits\',\)'):
head_lib.MultiLabelHead(n_classes=3, loss_fn=_loss_fn)
def test_loss_fn_arg_logits_missing(self):
def _loss_fn(labels):
del labels # unused
with self.assertRaisesRegexp(
ValueError, r'loss_fn must contain argument: logits\. '
r'Given arguments: \(\'labels\',\)'):
head_lib.MultiLabelHead(n_classes=3, loss_fn=_loss_fn)
def test_loss_fn_arg_features_ok(self):
def _loss_fn(labels, logits, features):
del labels, logits, features # Unused
head_lib.MultiLabelHead(n_classes=3, loss_fn=_loss_fn)
def test_loss_fn_arg_invalid(self):
def _loss_fn(labels, logits, name=None):
del labels, logits, name # Unused
with self.assertRaisesRegexp(ValueError,
r'loss_fn has unexpected args: \[\'name\'\]'):
head_lib.MultiLabelHead(n_classes=3, loss_fn=_loss_fn)
def test_classes_for_class_based_metrics_invalid(self):
with self.assertRaisesRegexp(
ValueError,
r'All classes_for_class_based_metrics must be in range \[0, 2\]\. '
r'Given: -1'):
head_lib.MultiLabelHead(
n_classes=3, classes_for_class_based_metrics=[2, -1])
def test_classes_for_class_based_metrics_string_invalid(self):
with self.assertRaisesRegexp(ValueError, r'\'z\' is not in list'):
head_lib.MultiLabelHead(
n_classes=3,
label_vocabulary=['a', 'b', 'c'],
classes_for_class_based_metrics=['c', 'z'])
def test_predict(self):
n_classes = 4
head = head_lib.MultiLabelHead(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
logits = np.array([[0., 1., 2., -1.], [-1., -2., -3., 1.]],
dtype=np.float32)
expected_probabilities = tf.math.sigmoid(logits)
expected_export_classes = [[b'0', b'1', b'2', b'3']] * 2
keys = prediction_keys.PredictionKeys
preds = head.predictions(logits,
[keys.LOGITS, keys.PROBABILITIES, keys.CLASSES])
self.assertAllClose(logits, self.evaluate(preds[keys.LOGITS]))
self.assertAllClose(expected_probabilities,
self.evaluate(preds[keys.PROBABILITIES]))
if tf.executing_eagerly():
return
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=ModeKeys.PREDICT,
logits=logits,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
self.assertItemsEqual(
(test_lib._DEFAULT_SERVING_KEY, 'predict', 'classification'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllEqual(expected_export_classes,
predictions[prediction_keys.PredictionKeys.CLASSES])
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose(
expected_probabilities,
sess.run(spec.export_outputs[test_lib._DEFAULT_SERVING_KEY].scores))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[test_lib._DEFAULT_SERVING_KEY].classes))
def test_weight_should_not_impact_prediction(self):
n_classes = 4
head = head_lib.MultiLabelHead(n_classes, weight_column='example_weights')
self.assertEqual(n_classes, head.logits_dimension)
logits = np.array([[0., 1., 2., -1.], [-1., -2., -3., 1.]],
dtype=np.float32)
expected_probabilities = tf.math.sigmoid(logits)
expected_export_classes = [[b'0', b'1', b'2', b'3']] * 2
weights_2x1 = [[1.], [2.]]
features = {
'x': np.array(((42,),), dtype=np.int32),
'example_weights': weights_2x1
}
keys = prediction_keys.PredictionKeys
preds = head.predictions(logits,
[keys.LOGITS, keys.PROBABILITIES, keys.CLASSES])
self.assertAllClose(logits, self.evaluate(preds[keys.LOGITS]))
self.assertAllClose(expected_probabilities,
self.evaluate(preds[keys.PROBABILITIES]))
if tf.executing_eagerly():
return
spec = head.create_estimator_spec(
features=features,
mode=ModeKeys.PREDICT,
logits=logits,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
# Assert predictions and export_outputs.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllEqual(expected_export_classes,
predictions[prediction_keys.PredictionKeys.CLASSES])
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
def test_eval_create_loss(self):
"""Tests head.loss for eval mode."""
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = (labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))) / 2
expected_training_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits))
actual_training_loss = head.loss(
logits=logits, labels=labels, features=features, mode=ModeKeys.EVAL)
self.assertAllClose(expected_training_loss,
self.evaluate(actual_training_loss))
def test_eval_create_loss_large_logits(self):
"""Tests head.loss for eval mode and large logits."""
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# For large logits, this is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits
expected_training_loss = 0.5 * np.sum(
np.array([[(10. + 10.) / 2.], [(15. + 0.) / 2.]], dtype=np.float32))
actual_training_loss = head.loss(
logits=logits, labels=labels, features=features, mode=ModeKeys.EVAL)
self.assertAllClose(
expected_training_loss, self.evaluate(actual_training_loss), atol=1e-4)
def test_eval_create_loss_labels_wrong_shape(self):
"""Tests head.loss for eval mode when labels has the wrong shape."""
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32)
labels_2x1 = np.array([[1], [1]], dtype=np.int64)
labels_2 = np.array([1, 1], dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
if tf.executing_eagerly():
with self.assertRaisesRegexp(ValueError, 'Expected labels dimension=2'):
head.loss(
logits=logits,
labels=labels_2x1,
features=features,
mode=ModeKeys.EVAL)
with self.assertRaisesRegexp(ValueError, 'Expected labels dimension=2'):
head.loss(
logits=logits,
labels=labels_2,
features=features,
mode=ModeKeys.EVAL)
else:
labels_placeholder = tf.compat.v1.placeholder(dtype=tf.dtypes.int64)
actual_training_loss = head.loss(
logits=logits,
labels=labels_placeholder,
features=features,
mode=ModeKeys.EVAL)
with self.cached_session():
test_lib._initialize_variables(self, tf.compat.v1.train.Scaffold())
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 2\] \[labels_shape: \] \[2 1\]'):
actual_training_loss.eval({labels_placeholder: labels_2x1})
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
r'labels shape must be \[D0, D1, ... DN, 2\]\..*'
r'\[Received shape: \] \[2\]'):
actual_training_loss.eval({labels_placeholder: labels_2})
def test_eval_create_loss_loss_fn(self):
"""Tests head.loss for eval mode and custom loss_fn."""
loss = np.array([[1.], [2.]], dtype=np.float32)
logits_input = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels_input = np.array([[1, 0], [1, 1]], dtype=np.int64)
def _loss_fn(labels, logits):
check_labels = tf.debugging.Assert(
tf.reduce_all(tf.math.equal(labels, labels_input)), data=[labels])
check_logits = tf.debugging.Assert(
tf.reduce_all(tf.math.equal(logits, logits_input)), data=[logits])
with tf.control_dependencies([check_labels, check_logits]):
return tf.constant(loss)
head = head_lib.MultiLabelHead(n_classes=2, loss_fn=_loss_fn)
actual_training_loss = head.loss(
logits=logits_input,
labels=labels_input,
features={'x': np.array(((42,),), dtype=np.int32)},
mode=ModeKeys.EVAL)
self.assertAllClose(np.sum(loss) / 2., self.evaluate(actual_training_loss))
def test_eval_create_loss_loss_fn_wrong_shape(self):
"""Tests custom loss_fn that returns Tensor of unexpected shape."""
loss = np.array([1., 2.], dtype=np.float32)
def _loss_fn(labels, logits):
del labels, logits # Unused
return tf.constant(loss)
head = head_lib.MultiLabelHead(n_classes=2, loss_fn=_loss_fn)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
if tf.executing_eagerly():
with self.assertRaisesRegexp(ValueError, 'loss_shape'):
head.loss(
logits=logits, labels=labels, features=features, mode=ModeKeys.EVAL)
else:
actual_training_loss = head.loss(
logits=logits, labels=labels, features=features, mode=ModeKeys.EVAL)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
r'\[loss_fn must return Tensor of shape \[D0, D1, ... DN, 1\]\. \] '
r'\[logits_shape: \] \[2 2\] \[loss_shape: \] \[2\]'):
self.evaluate(actual_training_loss)
def test_eval_labels_none(self):
"""Tests that error is raised when labels is None."""
head = head_lib.MultiLabelHead(n_classes=2)
with self.assertRaisesRegexp(
ValueError, r'You must provide a labels Tensor\. Given: None\.'):
head.loss(
logits=np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
labels=None,
features={'x': np.array(((42,),), dtype=np.int32)},
mode=ModeKeys.EVAL)
def _test_eval(self,
head,
logits,
labels,
expected_loss,
expected_metrics,
features=None,
regularization_losses=None):
tol = 1e-3
if tf.executing_eagerly():
loss = head.loss(
labels,
logits,
features=features or {},
mode=ModeKeys.EVAL,
regularization_losses=regularization_losses)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
eval_metrics = head.metrics(regularization_losses=regularization_losses)
updated_metrics = head.update_metrics(
eval_metrics,
features or {},
logits,
labels,
regularization_losses=regularization_losses)
self.assertItemsEqual(expected_metrics.keys(), updated_metrics.keys())
self.assertAllClose(
expected_metrics,
{k: updated_metrics[k].result() for k in updated_metrics},
rtol=tol,
atol=tol)
return
spec = head.create_estimator_spec(
features=features or {},
mode=ModeKeys.EVAL,
logits=logits,
labels=labels,
regularization_losses=regularization_losses,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
test_lib._assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, _ = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of value ops (in `metrics`).
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol,
atol=tol)
def test_eval(self):
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_sparse_labels(self):
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
# Equivalent to multi_hot = [[1, 0], [1, 1]]
labels = tf.sparse.SparseTensor(
values=[0, 0, 1], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2])
labels_multi_hot = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels_multi_hot, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_regularization_losses(self):
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
regularization_losses = [1.5, 0.5]
expected_regularization_loss = 2.
# unregularized_loss = sum(
# labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))) / batch_size
expected_unregularized_loss = np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits)) / 2.
expected_regularized_loss = (
expected_unregularized_loss + expected_regularization_loss)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_unregularized_loss,
keys.LOSS_REGULARIZATION: expected_regularization_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_regularized_loss,
expected_metrics=expected_metrics,
regularization_losses=regularization_losses)
def test_eval_with_label_vocabulary(self):
n_classes = 2
head = head_lib.MultiLabelHead(
n_classes, label_vocabulary=['class0', 'class1'])
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
# Equivalent to multi_hot = [[1, 0], [1, 1]]
labels = tf.sparse.SparseTensor(
values=['class0', 'class0', 'class1'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
labels_multi_hot = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels_multi_hot, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_label_vocabulary_with_multi_hot_input(self):
n_classes = 2
head = head_lib.MultiLabelHead(
n_classes, label_vocabulary=['class0', 'class1'])
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels_multi_hot = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels_multi_hot, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
}
self._test_eval(
head=head,
logits=logits,
labels=labels_multi_hot,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_thresholds(self):
n_classes = 2
thresholds = [0.25, 0.5, 0.75]
head = head_lib.MultiLabelHead(n_classes, thresholds=thresholds)
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
keys.ACCURACY_AT_THRESHOLD % thresholds[0]: 2. / 4.,
keys.PRECISION_AT_THRESHOLD % thresholds[0]: 2. / 3.,
keys.RECALL_AT_THRESHOLD % thresholds[0]: 2. / 3.,
keys.ACCURACY_AT_THRESHOLD % thresholds[1]: 1. / 4.,
keys.PRECISION_AT_THRESHOLD % thresholds[1]: 1. / 2.,
keys.RECALL_AT_THRESHOLD % thresholds[1]: 1. / 3.,
keys.ACCURACY_AT_THRESHOLD % thresholds[2]: 2. / 4.,
keys.PRECISION_AT_THRESHOLD % thresholds[2]: 1. / 1.,
keys.RECALL_AT_THRESHOLD % thresholds[2]: 1. / 3.,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_classes_for_class_based_metrics(self):
head = head_lib.MultiLabelHead(
n_classes=2, classes_for_class_based_metrics=[0, 1])
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
keys.PROBABILITY_MEAN_AT_CLASS % 0:
tf.math.reduce_sum(tf.math.sigmoid(logits[:, 0])) / 2.,
keys.AUC_AT_CLASS % 0: 0.,
keys.AUC_PR_AT_CLASS % 0: 1.,
keys.PROBABILITY_MEAN_AT_CLASS % 1:
tf.math.reduce_sum(tf.math.sigmoid(logits[:, 1])) / 2.,
keys.AUC_AT_CLASS % 1: 1.,
keys.AUC_PR_AT_CLASS % 1: 1.,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_classes_for_class_based_metrics_string(self):
head = head_lib.MultiLabelHead(
n_classes=2,
label_vocabulary=['a', 'b'],
classes_for_class_based_metrics=['a', 'b'])
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = tf.sparse.SparseTensor(
values=['a', 'a', 'b'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
labels_onehot = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels_onehot, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
keys.PROBABILITY_MEAN_AT_NAME % 'a':
tf.math.reduce_sum(tf.math.sigmoid(logits[:, 0])) / 2.,
keys.AUC_AT_NAME % 'a': 0.,
keys.AUC_PR_AT_NAME % 'a': 1.,
keys.PROBABILITY_MEAN_AT_NAME % 'b':
tf.math.reduce_sum(tf.math.sigmoid(logits[:, 1])) / 2.,
keys.AUC_AT_NAME % 'b': 1.,
keys.AUC_PR_AT_NAME % 'b': 1.,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_weights(self):
n_classes = 2
head = head_lib.MultiLabelHead(n_classes, weight_column='example_weights')
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {
'x': np.array([[41], [42]], dtype=np.int32),
'example_weights': np.array([[1.], [2.]], dtype=np.float32),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, weighted sum over examples, divide by batch_size.
# loss = (1 * (10 + 10) / 2 + 2 * (15 + 0) / 2) / 2
expected_loss = 12.5
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over weighted examples (denominator is sum(weights)).
keys.LOSS_MEAN: expected_loss * (2. / 3.),
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.2000,
keys.AUC_PR: 0.7280,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics,
features=features)
def test_train_create_loss_large_logits(self):
"""Tests head.create_loss for train mode and large logits."""
n_classes = 2
head = head_lib.MultiLabelHead(n_classes, weight_column='example_weights')
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
weights = np.array([[1.], [2.]], dtype=np.float32)
features = {
'x': np.array(((42,),), dtype=np.int32),
'example_weights': weights
}
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# For large logits, this is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits
# expected_unreduced_loss = [[(10. + 10.) / 2.], [(15. + 0.) / 2.]]
# expected_weights = [[1.], [2.]]
expected_training_loss = (1. * (10. + 10.) / 2. + 2. * (15. + 0.) / 2.) / 2.
training_loss = head.loss(
logits=logits, labels=labels, features=features, mode=ModeKeys.TRAIN)
self.assertAllClose(
expected_training_loss, self.evaluate(training_loss), atol=1e-4)
def test_train_create_loss_loss_reduction(self):
"""Tests head.create_loss with loss_reduction."""
n_classes = 2
head = head_lib.MultiLabelHead(
n_classes,
weight_column='example_weights',
loss_reduction=tf.losses.Reduction.SUM)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
weights = np.array([[1.], [2.]], dtype=np.float32)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# For large logits, this is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits
# expected_unreduced_loss = [[(10. + 10.) / 2.], [(15. + 0.) / 2.]]
# expected_weights = [[1.], [2.]]
expected_training_loss = (1. * (10. + 10.) + 2. * (15. + 0.)) / 2.
training_loss = head.loss(
logits=logits,
labels=labels,
features={
'x': np.array(((42,),), dtype=np.int32),
'example_weights': weights
},
mode=ModeKeys.TRAIN)
self.assertAllClose(
expected_training_loss, self.evaluate(training_loss), atol=1e-4)
def test_train_labels_none(self):
"""Tests that error is raised when labels is None."""
head = head_lib.MultiLabelHead(n_classes=2)
with self.assertRaisesRegexp(
ValueError, r'You must provide a labels Tensor\. Given: None\.'):
head.loss(
logits=np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
labels=None,
features={'x': np.array(((42,),), dtype=np.int32)},
mode=ModeKeys.TRAIN)
def test_train_invalid_indicator_labels(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
# The value 2 is outside the allowed range.
labels = np.array([[2, 0], [1, 1]], dtype=np.int64)
if tf.executing_eagerly():
with self.assertRaisesRegexp(
ValueError,
r'labels must be an integer indicator Tensor with values in '
r'\[0, 1\]'):
head.loss(
logits=logits, labels=labels, features={}, mode=ModeKeys.TRAIN)
return
def _train_op_fn(loss):
del loss
return tf.no_op()
with self.cached_session() as sess:
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
r'labels must be an integer indicator Tensor with values in '
r'\[0, 1\]'):
spec = head.create_estimator_spec(
features={},
mode=ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
trainable_variables=[
tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)
])
test_lib._initialize_variables(self, spec.scaffold)
sess.run(spec.loss)
def test_train_invalid_sparse_labels(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
# The value 2 is outside the allowed range.
labels = tf.sparse.SparseTensor(
values=[2, 0, 1], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2])
if tf.executing_eagerly():
with self.assertRaisesRegexp(
ValueError,
r'labels must be an integer SparseTensor with values in \[0, 2\)'):
head.loss(
logits=logits, labels=labels, features={}, mode=ModeKeys.TRAIN)
return
def _train_op_fn(loss):
del loss
return tf.no_op()
with self.cached_session() as sess:
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
r'labels must be an integer SparseTensor with values in \[0, 2\)'):
spec = head.create_estimator_spec(
features={},
mode=ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
trainable_variables=[
tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)
])
test_lib._initialize_variables(self, spec.scaffold)
sess.run(spec.loss)
def _test_train(self, head, logits, labels, expected_loss):
tol = 1e-3
features = {'x': np.array(((42,),), dtype=np.int32)}
if tf.executing_eagerly():
loss = head.loss(
logits=logits, labels=labels, features=features, mode=ModeKeys.TRAIN)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
return
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return tf.strings.join([
tf.constant(expected_train_result),
tf.strings.as_string(loss, precision=3)
])
spec = head.create_estimator_spec(
features=features,
mode=ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
test_lib._assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run(
(spec.loss, spec.train_op, spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
test_lib._assert_simple_summaries(
self, {metric_keys.MetricKeys.LOSS: expected_loss}, summary_str, tol)
def test_train(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over examples, divide by batch_size.
# loss = ((10 + 10) / 2 + (15 + 0) / 2 ) / 2
expected_loss = 8.75
self._test_train(
head=head, logits=logits, labels=labels, expected_loss=expected_loss)
def test_train_sparse_labels(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
# Equivalent to multi_hot = [[1, 0], [1, 1]]
labels = tf.sparse.SparseTensor(
values=[0, 0, 1], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2])
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over examples, divide by batch_size.
# loss = ((10 + 10) / 2 + (15 + 0) / 2 ) / 2
expected_loss = 8.75
self._test_train(
head=head, logits=logits, labels=labels, expected_loss=expected_loss)
def test_train_with_label_vocabulary(self):
head = head_lib.MultiLabelHead(
n_classes=2, label_vocabulary=['class0', 'class1'])
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
# Equivalent to multi_hot = [[1, 0], [1, 1]]
labels = tf.sparse.SparseTensor(
values=['class0', 'class0', 'class1'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over examples, divide by batch_size.
# loss = ((10 + 10) / 2 + (15 + 0) / 2 ) / 2
expected_loss = 8.75
self._test_train(
head=head, logits=logits, labels=labels, expected_loss=expected_loss)
def test_train_with_regularization_losses(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
regularization_losses = [1.5, 0.5]
features = {'x': np.array(((42,),), dtype=np.int32)}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes and over batch and add regularization loss.
expected_loss = 35. / 4. + 2.
expected_summaries = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_REGULARIZATION: 2.,
}
tol = 1e-3
loss = head.loss(
logits=logits,
labels=labels,
features=features,
mode=ModeKeys.TRAIN,
regularization_losses=regularization_losses)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, self.evaluate(loss), rtol=tol, atol=tol)
if tf.executing_eagerly():
return
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return tf.strings.join([
tf.constant(expected_train_result),
tf.strings.as_string(loss, precision=3)
])
spec = head.create_estimator_spec(
features=features,
mode=ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
regularization_losses=regularization_losses,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
# Assert predictions, loss, train_op, and summaries.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run(
(spec.loss, spec.train_op, spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
test_lib._assert_simple_summaries(self, expected_summaries, summary_str,
tol)
def test_train_with_weights(self):
n_classes = 2
head = head_lib.MultiLabelHead(n_classes, weight_column='example_weights')
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {
'x': np.array([[41], [42]], dtype=np.int32),
'example_weights': np.array([[1.], [2.]], dtype=np.float32),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, weighted sum over examples, divide by batch_size.
# loss = (1 * (10 + 10) / 2 + 2 * (15 + 0) / 2) / 2
expected_loss = 12.5
tol = 1e-3
loss = head.loss(
logits=logits, labels=labels, features=features, mode=ModeKeys.TRAIN)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, self.evaluate(loss), rtol=tol, atol=tol)
if tf.executing_eagerly():
return
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return tf.strings.join([
tf.constant(expected_train_result),
tf.strings.as_string(loss, precision=3)
])
spec = head.create_estimator_spec(
features=features,
mode=ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
test_lib._assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run(
(spec.loss, spec.train_op, spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
test_lib._assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
}, summary_str, tol)
def test_multi_dim_weighted_train_create_loss(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2]."""
head = head_lib.MultiLabelHead(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]],
dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]], [[0, 1, 1], [0, 1, 1]]],
dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# unreduced_loss =
# [[10 + 10 + 0, 0 + 0 + 10], [0 + 0 + 12, 12 + 12 + 0]] / 3
# = [[20/3, 10/3], [4, 8]]
# expected_unreduced_loss = [[[20./3.], [10./3.]], [[4.], [8.]]]
# weights are reshaped to [2, 2, 1] to match logits.
# expected_weights = [[[1.], [1.5]], [[2.], [2.5]]]
# loss = (1*20/3 + 1.5*10/3 + 2*4 + 2.5*8) / 4 = 9.9167
expected_training_loss = 9.9167
training_loss = head.loss(
logits=logits,
labels=labels,
features={'weights': weights},
mode=ModeKeys.TRAIN)
atol = 1.e-3
self.assertAllClose(
expected_training_loss, self.evaluate(training_loss), atol=atol)
def test_multi_dim_weighted_train(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2]."""
head = head_lib.MultiLabelHead(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]],
dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]], [[0, 1, 1], [0, 1, 1]]],
dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# loss = [[10 + 10 + 0, 0 + 0 + 10], [0 + 0 + 12, 12 + 12 + 0]] / 3
# = [[20/3, 10/3], [4, 8]]
# loss = (1*20/3 + 1.5*10/3 + 2*4 + 2.5*8) / 4 = 9.9167
expected_loss = 9.9167
atol = 1.e-3
loss = head.loss(
logits=logits,
labels=labels,
features={'weights': weights},
mode=ModeKeys.TRAIN)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, self.evaluate(loss), atol=atol)
if tf.executing_eagerly():
return
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return tf.strings.join([
tf.constant(expected_train_result),
tf.strings.as_string(loss, precision=3)
])
spec = head.create_estimator_spec(
features={'weights': weights},
mode=ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
with self.cached_session() as sess:
test_lib._initialize_variables(self, tf.compat.v1.train.Scaffold())
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, atol=atol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
def test_multi_dim_weights_wrong_inner_dim(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 1]."""
head = head_lib.MultiLabelHead(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]],
dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]], [[0, 1, 1], [0, 1, 1]]],
dtype=np.int64)
weights = np.array([[1.], [2.]], dtype=np.float32)
if tf.executing_eagerly():
with self.assertRaisesRegexp(ValueError, 'weights shape'):
head.loss(
logits=logits,
labels=labels,
features={'weights': weights},
mode=ModeKeys.TRAIN)
return
def _train_op_fn(loss):
del loss
return tf.no_op()
spec = head.create_estimator_spec(
features={'weights': weights},
mode=ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
with self.cached_session():
test_lib._initialize_variables(self, tf.compat.v1.train.Scaffold())
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
r'\[logits_shape: \] \[2 2 3\] \[weights_shape: \] \[2 1\]'):
spec.loss.eval()
def test_multi_dim_weights_wrong_outer_dim(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2, 3]."""
head = head_lib.MultiLabelHead(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]],
dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]], [[0, 1, 1], [0, 1, 1]]],
dtype=np.int64)
weights = np.array(
[[[1., 1., 1.], [1.5, 1.5, 1.5]], [[2., 2., 2.], [2.5, 2.5, 2.5]]],
dtype=np.float32)
if tf.executing_eagerly():
with self.assertRaisesRegexp(ValueError, 'weights shape'):
head.loss(
logits=logits,
labels=labels,
features={'weights': weights},
mode=ModeKeys.TRAIN)
return
weights_placeholder = tf.compat.v1.placeholder(dtype=tf.dtypes.float32)
def _train_op_fn(loss):
del loss
return tf.no_op()
spec = head.create_estimator_spec(
features={'weights': weights_placeholder},
mode=ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
with self.cached_session():
test_lib._initialize_variables(self, tf.compat.v1.train.Scaffold())
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
r'\[logits_shape: \] \[2 2 3\] \[weights_shape: \] \[2 2 3\]'):
spec.loss.eval({weights_placeholder: weights})
def test_multi_dim_weighted_eval(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2]."""
head = head_lib.MultiLabelHead(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]],
dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]], [[0, 1, 1], [0, 1, 1]]],
dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# loss = [[10 + 10 + 0, 0 + 0 + 10], [0 + 0 + 12, 12 + 12 + 0]] / 3
# = [[20/3, 10/3], [4, 8]]
# loss = (1*20/3 + 1.5*10/3 + 2*4 + 2.5*8) / 4 = 9.9167
expected_loss = 9.9167
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss * (4. / np.sum(weights)),
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.4977,
keys.AUC_PR: 0.5461,
}
self._test_eval(
head=head,
features={'weights': weights},
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
@test_util.deprecated_graph_mode_only
class MultiLabelHeadForEstimator(tf.test.TestCase):
"""Tests for create_estimator_spec running in Graph mode only."""
def test_invalid_trainable_variables(self):
head = head_lib.MultiLabelHead(n_classes=2)
class _Optimizer(tf.keras.optimizers.Optimizer):
def get_updates(self, loss, params):
del params
return [
tf.strings.join([
tf.constant('my_train_op'),
tf.strings.as_string(loss, precision=2)
])
]
def get_config(self):
config = super(_Optimizer, self).get_config()
return config
with self.assertRaisesRegexp(ValueError,
r'trainable_variables cannot be None'):
head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=ModeKeys.TRAIN,
logits=np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
labels=np.array([[1, 0], [1, 1]], dtype=np.int64),
optimizer=_Optimizer('my_optimizer'),
trainable_variables=None)
with self.assertRaisesRegexp(
ValueError, r'trainable_variables should be a list or a tuple'):
head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=ModeKeys.TRAIN,
logits=np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
labels=np.array([[1, 0], [1, 1]], dtype=np.int64),
optimizer=_Optimizer('my_optimizer'),
trainable_variables={
'var_list': [tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)]
})
def test_train_with_optimizer(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over examples, divide by batch_size.
# loss = ((10 + 10) / 2 + (15 + 0) / 2 ) / 2
expected_loss = 8.75
expected_train_result = 'my_train_op'
class _Optimizer(tf.keras.optimizers.Optimizer):
def get_updates(self, loss, params):
del params
return [
tf.strings.join([
tf.constant(expected_train_result),
tf.strings.as_string(loss, precision=3)
])
]
def get_config(self):
config = super(_Optimizer, self).get_config()
return config
spec = head.create_estimator_spec(
features=features,
mode=ModeKeys.TRAIN,
logits=logits,
labels=labels,
optimizer=_Optimizer('my_optimizer'),
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
tol = 1e-3
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
def test_predict_with_label_vocabulary(self):
n_classes = 4
head = head_lib.MultiLabelHead(
n_classes, label_vocabulary=['foo', 'bar', 'foobar', 'barfoo'])
logits = np.array([[0., 1., 2., -1.], [-1., -2., -3., 1.]],
dtype=np.float32)
expected_export_classes = [[b'foo', b'bar', b'foobar', b'barfoo']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=ModeKeys.PREDICT,
logits=logits,
trainable_variables=[tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)])
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllEqual(expected_export_classes,
predictions[prediction_keys.PredictionKeys.CLASSES])
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[test_lib._DEFAULT_SERVING_KEY].classes))
def test_train_with_update_ops(self):
with tf.Graph().as_default():
w = tf.Variable(1)
update_op = w.assign_add(1)
t = tf.Variable('')
expected_train_result = b'my_train_op'
def _train_op_fn(loss):
del loss
return t.assign(expected_train_result)
head = head_lib.MultiLabelHead(n_classes=2)
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=ModeKeys.TRAIN,
logits=np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
labels=np.array([[1, 0], [1, 1]], dtype=np.int64),
train_op_fn=_train_op_fn,
update_ops=[update_op],
trainable_variables=[
tf.Variable([1.0, 2.0], dtype=tf.dtypes.float32)
])
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
sess.run(spec.train_op)
w_value, t_value = sess.run([w, t])
self.assertEqual(2, w_value)
self.assertEqual(expected_train_result, t_value)
def test_lookup_tables_in_graph(self):
n_classes = 2
head = head_lib.MultiLabelHead(
n_classes=n_classes, label_vocabulary=['class0', 'class1'])
feature_columns = [tf.feature_column.numeric_column('x')]
# Create dnn estimator.
est = dnn.DNNEstimatorV2(
head=head, hidden_units=(2, 2), feature_columns=feature_columns)
def input_fn():
return ({
'x': np.array(((42,), (43,),), dtype=np.int32)
}, np.array([[1, 0], [1, 1]], dtype=np.int64))
# Train.
num_steps = 1
est.train(input_fn, steps=num_steps)
# Eval.
eval_results = est.evaluate(input_fn, steps=num_steps)
self.assertEqual(num_steps,
eval_results[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(eval_results))
# Predict.
est.predict(input_fn)
if __name__ == '__main__':
tf.test.main()
|
|
#########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA #
# 02111-1307 USA #
# #
# #
#########################################################################
import sys,os,rtf2xml.copy
class Preamble:
"""
Fix the reamaing parts of the preamble. This module does very little. It
makes sure that no text gets put in the revision of list table. In the
future, when I understand how to interprett he revision table and list
table, I will make these methods more functional.
"""
def __init__(self, file, bug_handler, platform, default_font, code_page,
language, copy=None, temp_dir=None):
"""
Required:
file--file to parse
platform --Windows or Macintosh
default_font -- the default font
code_page --the code page (ansi1252, for example)
language --the document language
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file=file
self.__bug_handler = bug_handler
self.__copy = copy
self.__default_font = default_font
self.__code_page = code_page
self.__platform = platform
self.__language = language
if temp_dir:
self.__write_to = os.path.join(temp_dir,"info_table_info.data")
else:
self.__write_to = "info_table_info.data"
def __initiate_values(self):
"""
Initiate all values.
"""
self.__state = 'default'
self.__text_string = ''
self.__state_dict = {
'default' : self.__default_func,
'revision' : self.__revision_table_func,
'list_table' : self.__list_table_func,
'body' : self.__body_func,
}
self.__default_dict = {
'mi<mk<rtfhed-beg' : self.__found_rtf_head_func,
'mi<mk<listabbeg_' : self.__found_list_table_func,
'mi<mk<revtbl-beg' : self.__found_revision_table_func,
'mi<mk<body-open_' : self.__found_body_func,
}
def __default_func(self, line):
action = self.__default_dict.get(self.__token_info)
if action:
action(line)
else:
self.__write_obj.write(line)
def __found_rtf_head_func(self, line):
"""
Requires:
line -- the line to parse
Returns:
nothing.
Logic:
Write to the output file the default font info, the code page
info, and the platform info.
"""
self.__write_obj.write(
'mi<tg<empty-att_<rtf-definition'
'<default-font>%s<code-page>%s'
'<platform>%s<language>%s'
'\n' % (self.__default_font, self.__code_page,
self.__platform, self.__language)
)
def __found_list_table_func(self, line):
self.__state = 'list_table'
def __list_table_func(self, line):
if self.__token_info == 'mi<mk<listabend_':
self.__state = 'default'
elif line[0:2] == 'tx':
pass
else:
self.__write_obj.write(line)
def __found_revision_table_func(self, line):
self.__state = 'revision'
def __revision_table_func(self, line):
if self.__token_info == 'mi<mk<revtbl-end':
self.__state = 'default'
elif line[0:2] == 'tx':
pass
else:
self.__write_obj.write(line)
def __found_body_func(self, line):
self.__state = 'body'
self.__write_obj.write(line)
def __body_func(self, line):
self.__write_obj.write(line)
def fix_preamble(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state. The state can either be defaut, the revision table, or
the list table.
"""
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action == None:
sys.stderr.write('no no matching state in module preamble_rest.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = rtf2xml.copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "preamble_div.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import timedelta
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.jobs.backfill_job import BackfillJob
from airflow.models import DagBag
from airflow.utils import timezone
from tests.test_utils.config import conf_vars
try:
from distributed import LocalCluster
# utility functions imported from the dask testing suite to instantiate a test
# cluster for tls tests
from distributed.utils_test import cluster as dask_testing_cluster, get_cert, tls_security
from airflow.executors.dask_executor import DaskExecutor
skip_tls_tests = False
except ImportError:
skip_tls_tests = True
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
SUCCESS_COMMAND = ['airflow', 'tasks', 'run', '--help']
FAIL_COMMAND = ['airflow', 'tasks', 'run', 'false']
class TestBaseDask(unittest.TestCase):
def assert_tasks_on_executor(self, executor, timeout_executor=120):
# start the executor
executor.start()
executor.execute_async(key='success', command=SUCCESS_COMMAND)
executor.execute_async(key='fail', command=FAIL_COMMAND)
success_future = next(k for k, v in executor.futures.items() if v == 'success')
fail_future = next(k for k, v in executor.futures.items() if v == 'fail')
# wait for the futures to execute, with a timeout
timeout = timezone.utcnow() + timedelta(seconds=timeout_executor)
while not (success_future.done() and fail_future.done()):
if timezone.utcnow() > timeout:
raise ValueError(
'The futures should have finished; there is probably '
'an error communicating with the Dask cluster.'
)
# both tasks should have finished
assert success_future.done()
assert fail_future.done()
# check task exceptions
assert success_future.exception() is None
assert fail_future.exception() is not None
class TestDaskExecutor(TestBaseDask):
def setUp(self):
self.dagbag = DagBag(include_examples=True)
self.cluster = LocalCluster()
def test_dask_executor_functions(self):
executor = DaskExecutor(cluster_address=self.cluster.scheduler_address)
self.assert_tasks_on_executor(executor, timeout_executor=120)
def test_backfill_integration(self):
"""
Test that DaskExecutor can be used to backfill example dags
"""
dag = self.dagbag.get_dag('example_bash_operator')
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True,
executor=DaskExecutor(cluster_address=self.cluster.scheduler_address),
)
job.run()
def tearDown(self):
self.cluster.close(timeout=5)
@pytest.mark.skipif(
skip_tls_tests, reason="The tests are skipped because distributed framework could not be imported"
)
class TestDaskExecutorTLS(TestBaseDask):
def setUp(self):
self.dagbag = DagBag(include_examples=True)
@conf_vars(
{
('dask', 'tls_ca'): get_cert('tls-ca-cert.pem'),
('dask', 'tls_cert'): get_cert('tls-key-cert.pem'),
('dask', 'tls_key'): get_cert('tls-key.pem'),
}
)
def test_tls(self):
# These use test certs that ship with dask/distributed and should not be
# used in production
with dask_testing_cluster(
worker_kwargs={'security': tls_security(), "protocol": "tls"},
scheduler_kwargs={'security': tls_security(), "protocol": "tls"},
) as (cluster, _):
executor = DaskExecutor(cluster_address=cluster['address'])
self.assert_tasks_on_executor(executor, timeout_executor=120)
executor.end()
# close the executor, the cluster context manager expects all listeners
# and tasks to have completed.
executor.client.close()
@mock.patch('airflow.executors.dask_executor.DaskExecutor.sync')
@mock.patch('airflow.executors.base_executor.BaseExecutor.trigger_tasks')
@mock.patch('airflow.executors.base_executor.Stats.gauge')
def test_gauge_executor_metrics(self, mock_stats_gauge, mock_trigger_tasks, mock_sync):
executor = DaskExecutor()
executor.heartbeat()
calls = [
mock.call('executor.open_slots', mock.ANY),
mock.call('executor.queued_tasks', mock.ANY),
mock.call('executor.running_tasks', mock.ANY),
]
mock_stats_gauge.assert_has_calls(calls)
class TestDaskExecutorQueue(unittest.TestCase):
def test_dask_queues_no_resources(self):
self.cluster = LocalCluster()
executor = DaskExecutor(cluster_address=self.cluster.scheduler_address)
executor.start()
with self.assertRaises(AirflowException):
executor.execute_async(key='success', command=SUCCESS_COMMAND, queue='queue1')
def test_dask_queues_not_available(self):
self.cluster = LocalCluster(resources={'queue1': 1})
executor = DaskExecutor(cluster_address=self.cluster.scheduler_address)
executor.start()
with self.assertRaises(AirflowException):
# resource 'queue2' doesn't exist on cluster
executor.execute_async(key='success', command=SUCCESS_COMMAND, queue='queue2')
def test_dask_queues(self):
self.cluster = LocalCluster(resources={'queue1': 1})
executor = DaskExecutor(cluster_address=self.cluster.scheduler_address)
executor.start()
executor.execute_async(key='success', command=SUCCESS_COMMAND, queue='queue1')
success_future = next(k for k, v in executor.futures.items() if v == 'success')
# wait for the futures to execute, with a timeout
timeout = timezone.utcnow() + timedelta(seconds=30)
while not success_future.done():
if timezone.utcnow() > timeout:
raise ValueError(
'The futures should have finished; there is probably '
'an error communicating with the Dask cluster.'
)
assert success_future.done()
assert success_future.exception() is None
def test_dask_queues_no_queue_specified(self):
self.cluster = LocalCluster(resources={'queue1': 1})
executor = DaskExecutor(cluster_address=self.cluster.scheduler_address)
executor.start()
# no queue specified for executing task
executor.execute_async(key='success', command=SUCCESS_COMMAND)
success_future = next(k for k, v in executor.futures.items() if v == 'success')
# wait for the futures to execute, with a timeout
timeout = timezone.utcnow() + timedelta(seconds=30)
while not success_future.done():
if timezone.utcnow() > timeout:
raise ValueError(
'The futures should have finished; there is probably '
'an error communicating with the Dask cluster.'
)
assert success_future.done()
assert success_future.exception() is None
def tearDown(self):
self.cluster.close(timeout=5)
|
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2009, Jean-Michel Sizun
# Copyright 2009 Frank Scholz <[email protected]>
import os.path
from twisted.internet import reactor, threads
from twisted.web import server, static
from twisted.web.error import PageRedirect
from coherence.upnp.core import utils
from coherence.upnp.core.utils import ReverseProxyUriResource, ReverseProxyResource
from coherence.upnp.core import DIDLLite
from coherence.backend import BackendStore,BackendItem
from coherence import log
from gdata.youtube.service import YouTubeService
from coherence.extern.youtubedl import FileDownloader,YoutubeIE,MetacafeIE,YoutubePlaylistIE
from coherence.backends.picasa_storage import Container, LazyContainer, AbstractBackendStore
MPEG4_MIMETYPE = 'video/mp4'
MPEG4_EXTENSION = 'mp4'
class TestVideoProxy(ReverseProxyUriResource, log.Loggable):
logCategory = 'internetVideoProxy'
def __init__(self, uri, id,
proxy_mode,
cache_directory,
cache_maxsize=100000000,
buffer_size=2000000,
fct=None, **kwargs):
ReverseProxyUriResource.__init__(self, uri)
self.id = id
if isinstance(self.id,int):
self.id = '%d' % self.id
self.proxy_mode = proxy_mode
self.cache_directory = cache_directory
self.cache_maxsize = int(cache_maxsize)
self.buffer_size = int(buffer_size)
self.downloader = None
self.video_url = None # the url we get from the youtube page
self.stream_url = None # the real video stream, cached somewhere
self.mimetype = None
self.filesize = 0
self.file_in_cache = False
self.url_extractor_fct = fct
self.url_extractor_params = kwargs
def requestFinished(self, result):
""" self.connection is set in utils.ReverseProxyResource.render """
self.info("ProxyStream requestFinished:",result)
if hasattr(self,'connection'):
self.connection.transport.loseConnection()
def render(self, request):
self.info("VideoProxy render", request, self.stream_url, self.video_url)
self.info("VideoProxy headers:", request.getAllHeaders())
self.info("VideoProxy id:", self.id)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
if self.stream_url is None:
web_url = "http://%s%s" % (self.host,self.path)
self.info("Web_url: %s" % web_url)
def got_real_urls(real_urls):
if len(real_urls) == 0:
self.warning('Unable to retrieve any URL for video stream')
return self.requestFinished(None)
else:
got_real_url(real_urls[0])
def got_real_url(real_url):
self.info("Real URL is %s" % real_url)
self.stream_url = real_url
if self.stream_url is None:
self.warning('Unable to retrieve URL - inconsistent web page')
return self.requestFinished(None) #FIXME
self.stream_url = self.stream_url.encode('ascii', 'strict')
self.resetUri(self.stream_url)
self.info("Video URL: %s" % self.stream_url)
self.video_url = self.stream_url[:]
d = self.followRedirects(request)
d.addCallback(self.proxyURL)
d.addErrback(self.requestFinished)
if self.url_extractor_fct is not None:
d = self.url_extractor_fct(web_url, **self.url_extractor_params)
d.addCallback(got_real_urls)
else:
got_real_url(web_url)
return server.NOT_DONE_YET
reactor.callLater(0.05,self.proxyURL,request)
return server.NOT_DONE_YET
def followRedirects(self, request):
self.info("HTTP redirect ", request, self.stream_url)
d = utils.getPage(self.stream_url, method="HEAD", followRedirect=0)
def gotHeader(result,request):
data,header = result
self.info("finally got something %r", header)
#FIXME what do we do here if the headers aren't there?
self.filesize = int(header['content-length'][0])
self.mimetype = header['content-type'][0]
return request
def gotError(error,request):
# error should be a "Failure" instance at this point
self.info("gotError" % error)
error_value = error.value
if (isinstance(error_value,PageRedirect)):
self.info("got PageRedirect %r" % error_value.location)
self.stream_url = error_value.location
self.resetUri(self.stream_url)
return self.followRedirects(request)
else:
self.warning("Error while retrieving page header for URI ", self.stream_url)
self.requestFinished(None)
return error
d.addCallback(gotHeader, request)
d.addErrback(gotError,request)
return d
def proxyURL(self, request):
self.info("proxy_mode: %s, request %s" % (self.proxy_mode,request.method))
if self.proxy_mode == 'redirect':
# send stream url to client for redirection
request.redirect(self.stream_url)
request.finish()
elif self.proxy_mode in ('proxy',):
res = ReverseProxyResource.render(self,request)
if isinstance(res,int):
return res
request.write(res)
return
elif self.proxy_mode in ('buffer','buffered'):
# download stream to cache,
# and send it to the client in // after X bytes
filepath = os.path.join(self.cache_directory, self.id)
file_is_already_available = False
if (os.path.exists(filepath)
and os.path.getsize(filepath) == self.filesize):
res = self.renderFile(request, filepath)
if isinstance(res,int):
return res
request.write(res)
request.finish()
else:
if request.method != 'HEAD':
self.downloadFile(request, filepath, None)
range = request.getHeader('range')
if range is not None:
bytesrange = range.split('=')
assert bytesrange[0] == 'bytes',\
"Syntactically invalid http range header!"
start, end = bytesrange[1].split('-', 1)
#print "%r %r" %(start,end)
if start:
start = int(start)
if end:
end = int(end)
else:
end = self.filesize -1
# Are we requesting something beyond the current size of the file?
try:
size = os.path.getsize(filepath)
except OSError:
size = 0
if (start >= size and
end+10 > self.filesize and
end-start < 200000):
#print "let's hand that through, it is probably a mp4 index request"
res = ReverseProxyResource.render(self,request)
if isinstance(res,int):
return res
request.write(res)
return
res = self.renderBufferFile (request, filepath, self.buffer_size)
if res == '' and request.method != 'HEAD':
return server.NOT_DONE_YET
if not isinstance(res,int):
request.write(res)
if request.method == 'HEAD':
request.finish()
else:
self.warning("Unsupported Proxy Mode: %s" % self.proxy_mode)
return self.requestFinished(None)
def getMimetype(self):
type = MPEG4_MIMETYPE
if self.mimetype is not None:
type = self.mimetype
return type
def renderFile(self,request,filepath):
self.info('Cache file available %r %r ' %(request, filepath))
downloadedFile = utils.StaticFile(filepath, self.mimetype)
downloadedFile.type = self.getMimetype()
downloadedFile.encoding = None
return downloadedFile.render(request)
def renderBufferFile (self, request, filepath, buffer_size):
# Try to render file(if we have enough data)
self.info("renderBufferFile %s" % filepath)
rendering = False
if os.path.exists(filepath) is True:
filesize = os.path.getsize(filepath)
if ((filesize >= buffer_size) or (filesize == self.filesize)):
rendering = True
self.info("Render file", filepath, self.filesize, filesize, buffer_size)
bufferFile = utils.BufferFile(filepath, self.filesize, MPEG4_MIMETYPE)
bufferFile.type = self.getMimetype()
bufferFile.encoding = None
try:
return bufferFile.render(request)
except Exception,error:
self.info(error)
if request.method != 'HEAD':
self.info('Will retry later to render buffer file')
reactor.callLater(0.5, self.renderBufferFile, request,filepath,buffer_size)
return ''
def downloadFinished(self, result):
self.info('Download finished!')
self.downloader = None
def gotDownloadError(self, error, request):
self.info("Unable to download stream to file: %s" % self.stream_url)
self.info(request)
self.info(error)
def downloadFile(self, request, filepath, callback, *args):
if (self.downloader is None):
self.info("Proxy: download data to cache file %s" % filepath)
self.checkCacheSize()
self.downloader = utils.downloadPage(self.stream_url, filepath, supportPartial=1)
self.downloader.addCallback(self.downloadFinished)
self.downloader.addErrback(self.gotDownloadError, request)
if(callback is not None):
self.downloader.addCallback(callback, request, filepath, *args)
return self.downloader
def checkCacheSize(self):
cache_listdir = os.listdir(self.cache_directory)
cache_size = 0
for filename in cache_listdir:
path = "%s%s%s" % (self.cache_directory, os.sep, filename)
statinfo = os.stat(path)
cache_size += statinfo.st_size
self.info("Cache size: %d (max is %s)" % (cache_size, self.cache_maxsize))
if (cache_size > self.cache_maxsize):
cache_targetsize = self.cache_maxsize * 2/3
self.info("Cache above max size: Reducing to %d" % cache_targetsize)
def compare_atime(filename1, filename2):
path1 = "%s%s%s" % (self.cache_directory, os.sep, filename1)
path2 = "%s%s%s" % (self.cache_directory, os.sep, filename2)
cmp = int(os.stat(path1).st_atime - os.stat(path2).st_atime)
return cmp
cache_listdir = sorted(cache_listdir,compare_atime)
while (cache_size > cache_targetsize):
filename = cache_listdir.pop(0)
path = "%s%s%s" % (self.cache_directory, os.sep, filename)
cache_size -= os.stat(path).st_size
os.remove(path)
self.info("removed %s" % filename)
self.info("new cache size is %d" % cache_size)
class YoutubeVideoItem(BackendItem):
def __init__(self, external_id, title, url, mimetype, entry, store):
self.external_id = external_id
self.name = title
self.duration = None
self.size = None
self.mimetype = mimetype
self.description = None
self.date = None
self.item = None
self.youtube_entry = entry
self.store = store
def extractDataURL(url, quality):
if (quality == 'hd'):
format = '22'
else:
format = '18'
kwargs = {
'usenetrc': False,
'quiet': True,
'forceurl': True,
'forcetitle': False,
'simulate': True,
'format': format,
'outtmpl': u'%(id)s.%(ext)s',
'ignoreerrors': True,
'ratelimit': None,
}
if len(self.store.login) > 0:
kwargs['username'] = self.store.login
kwargs['password'] = self.store.password
fd = FileDownloader(kwargs)
youtube_ie = YoutubeIE()
fd.add_info_extractor(YoutubePlaylistIE(youtube_ie))
fd.add_info_extractor(MetacafeIE(youtube_ie))
fd.add_info_extractor(youtube_ie)
deferred = fd.get_real_urls([url])
return deferred
#self.location = VideoProxy(url, self.external_id,
# store.proxy_mode,
# store.cache_directory, store.cache_maxsize, store.buffer_size,
# extractDataURL, quality=self.store.quality)
self.location = TestVideoProxy(url, self.external_id,
store.proxy_mode,
store.cache_directory, store.cache_maxsize,store.buffer_size,
extractDataURL, quality=self.store.quality)
def get_item(self):
if self.item == None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
self.item = DIDLLite.VideoItem(upnp_id, upnp_parent_id, self.name)
self.item.description = self.description
self.item.date = self.date
# extract thumbnail from youtube entry
# we take the last one, hoping this is the bigger one
thumbnail_url = None
for image in self.youtube_entry.media.thumbnail:
thumbnail_url = image.url
if thumbnail_url is not None:
self.item.albumArtURI = thumbnail_url
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
res.duration = self.duration
res.size = self.size
self.item.res.append(res)
return self.item
def get_path(self):
self.url = self.store.urlbase + str(self.storage_id) + "." + MPEG4_EXTENSION
return self.url
def get_id(self):
return self.storage_id
class YouTubeStore(AbstractBackendStore):
logCategory = 'youtube_store'
implements = ['MediaServer']
description = ('Youtube', 'connects to the YouTube service and exposes the standard feeds (public) and the uploads/favorites/playlists/subscriptions of a given user.', None)
options = [{'option':'name', 'text':'Server Name:', 'type':'string','default':'my media','help': 'the name under this MediaServer shall show up with on other UPnP clients'},
{'option':'version','text':'UPnP Version:','type':'int','default':2,'enum': (2,1),'help': 'the highest UPnP version this MediaServer shall support','level':'advance'},
{'option':'uuid','text':'UUID Identifier:','type':'string','help':'the unique (UPnP) identifier for this MediaServer, usually automatically set','level':'advance'},
{'option':'refresh','text':'Refresh period','type':'string'},
{'option':'login','text':'User ID:','type':'string','group':'User Account'},
{'option':'password','text':'Password:','type':'string','group':'User Account'},
{'option':'location','text':'Locale:','type':'string'},
{'option':'quality','text':'Video quality:','type':'string', 'default':'sd','enum': ('sd','hd')},
{'option':'standard_feeds','text':'Include standard feeds:','type':'bool', 'default': True},
{'option':'proxy_mode','text':'Proxy mode:','type':'string', 'enum': ('redirect','proxy','cache','buffered')},
{'option':'buffer_size','text':'Buffering size:','type':'int'},
{'option':'cache_directory','text':'Cache directory:','type':'dir', 'group':'Cache'},
{'option':'cache_maxsize','text':'Cache max size:','type':'int', 'group':'Cache'},
]
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.name = kwargs.get('name','YouTube')
self.login = kwargs.get('userid',kwargs.get('login',''))
self.password = kwargs.get('password','')
self.locale = kwargs.get('location',None)
self.quality = kwargs.get('quality','sd')
self.showStandardFeeds = (kwargs.get('standard_feeds','True') in ['Yes','yes','true','True','1'])
self.refresh = int(kwargs.get('refresh',60))*60
self.proxy_mode = kwargs.get('proxy_mode', 'redirect')
self.cache_directory = kwargs.get('cache_directory', '/tmp/coherence-cache')
try:
if self.proxy_mode != 'redirect':
os.mkdir(self.cache_directory)
except:
pass
self.cache_maxsize = kwargs.get('cache_maxsize', 100000000)
self.buffer_size = kwargs.get('buffer_size', 750000)
rootItem = Container(None, self.name)
self.set_root_item(rootItem)
if (self.showStandardFeeds):
standardfeeds_uri = 'http://gdata.youtube.com/feeds/api/standardfeeds'
if self.locale is not None:
standardfeeds_uri += "/%s" % self.locale
standardfeeds_uri += "/%s"
self.appendFeed('Most Viewed', standardfeeds_uri % 'most_viewed', rootItem)
self.appendFeed('Top Rated', standardfeeds_uri % 'top_rated', rootItem)
self.appendFeed('Recently Featured', standardfeeds_uri % 'recently_featured', rootItem)
self.appendFeed('Watch On Mobile', standardfeeds_uri % 'watch_on_mobile', rootItem)
self.appendFeed('Most Discussed', standardfeeds_uri % 'most_discussed', rootItem)
self.appendFeed('Top Favorites', standardfeeds_uri % 'top_favorites', rootItem)
self.appendFeed('Most Linked', standardfeeds_uri % 'most_linked', rootItem)
self.appendFeed('Most Responded', standardfeeds_uri % 'most_responded', rootItem)
self.appendFeed('Most Recent', standardfeeds_uri % 'most_recent', rootItem)
if len(self.login) > 0:
userfeeds_uri = 'http://gdata.youtube.com/feeds/api/users/%s/%s'
self.appendFeed('My Uploads', userfeeds_uri % (self.login,'uploads'), rootItem)
self.appendFeed('My Favorites', userfeeds_uri % (self.login,'favorites'), rootItem)
playlistsItem = LazyContainer(rootItem, 'My Playlists', None, self.refresh, self.retrievePlaylistFeeds)
rootItem.add_child(playlistsItem)
subscriptionsItem = LazyContainer(rootItem, 'My Subscriptions', None, self.refresh, self.retrieveSubscriptionFeeds)
rootItem.add_child(subscriptionsItem)
self.init_completed()
def __repr__(self):
return self.__class__.__name__
def appendFeed( self, name, feed_uri, parent):
item = LazyContainer(parent, name, None, self.refresh, self.retrieveFeedItems, feed_uri=feed_uri)
parent.add_child(item, external_id=feed_uri)
def appendVideoEntry(self, entry, parent):
external_id = entry.id.text.split('/')[-1]
title = entry.media.title.text
url = entry.media.player.url
mimetype = MPEG4_MIMETYPE
#mimetype = 'video/mpeg'
item = YoutubeVideoItem (external_id, title, url, mimetype, entry, self)
item.parent = parent
parent.add_child(item, external_id=external_id)
def upnp_init(self):
self.current_connection_id = None
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['http-get:*:%s:*' % MPEG4_MIMETYPE],
default=True)
self.wmc_mapping = {'15': self.get_root_id()}
self.yt_service = YouTubeService()
self.yt_service.client_id = 'ytapi-JeanMichelSizun-youtubebackendpl-ruabstu7-0'
self.yt_service.developer_key = 'AI39si7dv2WWffH-s3pfvmw8fTND-cPWeqF1DOcZ8rwTgTPi4fheX7jjQXpn7SG61Ido0Zm_9gYR52TcGog9Pt3iG9Sa88-1yg'
self.yt_service.email = self.login
self.yt_service.password = self.password
self.yt_service.source = 'Coherence UPnP backend'
if len(self.login) > 0:
d = threads.deferToThread(self.yt_service.ProgrammaticLogin)
def retrieveFeedItems (self, parent=None, feed_uri=''):
feed = threads.deferToThread(self.yt_service.GetYouTubeVideoFeed, feed_uri)
def gotFeed(feed):
if feed is None:
self.warning("Unable to retrieve feed %s" % feed_uri)
return
for entry in feed.entry:
self.appendVideoEntry(entry, parent)
def gotError(error):
self.warning("ERROR: %s" % error)
feed.addCallbacks(gotFeed, gotError)
return feed
def retrievePlaylistFeedItems (self, parent, playlist_id):
feed = threads.deferToThread(self.yt_service.GetYouTubePlaylistVideoFeed,playlist_id=playlist_id)
def gotFeed(feed):
if feed is None:
self.warning("Unable to retrieve playlist items %s" % feed_uri)
return
for entry in feed.entry:
self.appendVideoEntry(entry, parent)
def gotError(error):
self.warning("ERROR: %s" % error)
feed.addCallbacks(gotFeed, gotError)
return feed
def retrieveSubscriptionFeedItems (self, parent, uri):
entry = threads.deferToThread(self.yt_service.GetYouTubeSubscriptionEntry,uri)
def gotEntry(entry):
if entry is None:
self.warning("Unable to retrieve subscription items %s" % uri)
return
feed_uri = entry.feed_link[0].href
return self.retrieveFeedItems(parent, feed_uri)
def gotError(error):
self.warning("ERROR: %s" % error)
entry.addCallbacks(gotEntry, gotError)
return entry
def retrievePlaylistFeeds(self, parent):
playlists_feed = threads.deferToThread(self.yt_service.GetYouTubePlaylistFeed, username=self.login)
def gotPlaylists(playlist_video_feed):
if playlist_video_feed is None:
self.warning("Unable to retrieve playlists feed")
return
for playlist_video_entry in playlist_video_feed.entry:
title = playlist_video_entry.title.text
playlist_id = playlist_video_entry.id.text.split("/")[-1] # FIXME find better way to retrieve the playlist ID
item = LazyContainer(parent, title, playlist_id, self.refresh, self.retrievePlaylistFeedItems, playlist_id=playlist_id)
parent.add_child(item, external_id=playlist_id)
def gotError(error):
self.warning("ERROR: %s" % error)
playlists_feed.addCallbacks(gotPlaylists, gotError)
return playlists_feed
def retrieveSubscriptionFeeds(self, parent):
playlists_feed = threads.deferToThread(self.yt_service.GetYouTubeSubscriptionFeed, username=self.login)
def gotPlaylists(playlist_video_feed):
if playlist_video_feed is None:
self.warning("Unable to retrieve subscriptions feed")
return
for entry in playlist_video_feed.entry:
type = entry.GetSubscriptionType()
title = entry.title.text
uri = entry.id.text
name = "[%s] %s" % (type,title)
item = LazyContainer(parent, name, uri, self.refresh, self.retrieveSubscriptionFeedItems, uri=uri)
item.parent = parent
parent.add_child(item, external_id=uri)
def gotError(error):
self.warning("ERROR: %s" % error)
playlists_feed.addCallbacks(gotPlaylists, gotError)
return playlists_feed
|
|
import copy
import logging
import math
import re
from kubernetes import client
from kubernetes.client.rest import ApiException
from ray.autoscaler._private._kubernetes import auth_api, core_api, log_prefix
import ray.ray_constants as ray_constants
logger = logging.getLogger(__name__)
MEMORY_SIZE_UNITS = {
"K": 2**10,
"M": 2**20,
"G": 2**30,
"T": 2**40,
"P": 2**50
}
class InvalidNamespaceError(ValueError):
def __init__(self, field_name, namespace):
self.message = ("Namespace of {} config doesn't match provided "
"namespace '{}'. Either set it to {} or remove the "
"field".format(field_name, namespace, namespace))
def __str__(self):
return self.message
def using_existing_msg(resource_type, name):
return "using existing {} '{}'".format(resource_type, name)
def updating_existing_msg(resource_type, name):
return "updating existing {} '{}'".format(resource_type, name)
def not_found_msg(resource_type, name):
return "{} '{}' not found, attempting to create it".format(
resource_type, name)
def not_checking_msg(resource_type, name):
return "not checking if {} '{}' exists".format(resource_type, name)
def created_msg(resource_type, name):
return "successfully created {} '{}'".format(resource_type, name)
def not_provided_msg(resource_type):
return "no {} config provided, must already exist".format(resource_type)
def bootstrap_kubernetes(config):
if not config["provider"]["use_internal_ips"]:
return ValueError(
"Exposing external IP addresses for ray containers isn't "
"currently supported. Please set "
"'use_internal_ips' to false.")
if config["provider"].get("_operator"):
namespace = config["provider"]["namespace"]
else:
namespace = _configure_namespace(config["provider"])
_configure_services(namespace, config["provider"])
if not config["provider"].get("_operator"):
# These steps are unecessary when using the Operator.
_configure_autoscaler_service_account(namespace, config["provider"])
_configure_autoscaler_role(namespace, config["provider"])
_configure_autoscaler_role_binding(namespace, config["provider"])
return config
def fillout_resources_kubernetes(config):
"""Fills CPU and GPU resources by reading pod spec of each available node
type.
For each node type and each of CPU/GPU, looks at container's resources
and limits, takes min of the two. The result is rounded up, as Ray does
not currently support fractional CPU.
"""
if "available_node_types" not in config:
return config
node_types = copy.deepcopy(config["available_node_types"])
head_node_type = config["head_node_type"]
for node_type in node_types:
node_config = node_types[node_type]["node_config"]
# The next line is for compatibility with configs like
# kubernetes/example-ingress.yaml,
# cf. KubernetesNodeProvider.create_node().
pod = node_config.get("pod", node_config)
container_data = pod["spec"]["containers"][0]
autodetected_resources = get_autodetected_resources(container_data)
if node_types == head_node_type:
# we only autodetect worker type node memory resource
autodetected_resources.pop("memory")
if "resources" not in config["available_node_types"][node_type]:
config["available_node_types"][node_type]["resources"] = {}
autodetected_resources.update(
config["available_node_types"][node_type]["resources"])
config["available_node_types"][node_type][
"resources"] = autodetected_resources
logger.debug(
"Updating the resources of node type {} to include {}.".format(
node_type, autodetected_resources))
return config
def get_autodetected_resources(container_data):
container_resources = container_data.get("resources", None)
if container_resources is None:
return {"CPU": 0, "GPU": 0}
node_type_resources = {
resource_name.upper(): get_resource(container_resources, resource_name)
for resource_name in ["cpu", "gpu"]
}
memory_limits = get_resource(container_resources, "memory")
node_type_resources["memory"] = int(
memory_limits *
(1 - ray_constants.DEFAULT_OBJECT_STORE_MEMORY_PROPORTION))
return node_type_resources
def get_resource(container_resources, resource_name):
request = _get_resource(
container_resources, resource_name, field_name="requests")
limit = _get_resource(
container_resources, resource_name, field_name="limits")
resource = min(request, limit)
# float("inf") value means the resource wasn't detected in either
# requests or limits
return 0 if resource == float("inf") else int(resource)
def _get_resource(container_resources, resource_name, field_name):
"""Returns the resource quantity.
The amount of resource is rounded up to nearest integer.
Returns float("inf") if the resource is not present.
Args:
container_resources (dict): Container's resource field.
resource_name (str): One of 'cpu', 'gpu' or memory.
field_name (str): One of 'requests' or 'limits'.
Returns:
Union[int, float]: Detected resource quantity.
"""
if field_name not in container_resources:
# No limit/resource field.
return float("inf")
resources = container_resources[field_name]
# Look for keys containing the resource_name. For example,
# the key 'nvidia.com/gpu' contains the key 'gpu'.
matching_keys = [key for key in resources if resource_name in key.lower()]
if len(matching_keys) == 0:
return float("inf")
if len(matching_keys) > 1:
# Should have only one match -- mostly relevant for gpu.
raise ValueError(f"Multiple {resource_name} types not supported.")
# E.g. 'nvidia.com/gpu' or 'cpu'.
resource_key = matching_keys.pop()
resource_quantity = resources[resource_key]
if resource_name == "memory":
return _parse_memory_resource(resource_quantity)
else:
return _parse_cpu_or_gpu_resource(resource_quantity)
def _parse_cpu_or_gpu_resource(resource):
resource_str = str(resource)
if resource_str[-1] == "m":
# For example, '500m' rounds up to 1.
return math.ceil(int(resource_str[:-1]) / 1000)
else:
return int(resource_str)
def _parse_memory_resource(resource):
resource_str = str(resource)
try:
return int(resource_str)
except ValueError:
pass
memory_size = re.sub(r"([KMGTP]+)", r" \1", resource_str)
number, unit_index = [item.strip() for item in memory_size.split()]
unit_index = unit_index[0]
return float(number) * MEMORY_SIZE_UNITS[unit_index]
def _configure_namespace(provider_config):
namespace_field = "namespace"
if namespace_field not in provider_config:
raise ValueError("Must specify namespace in Kubernetes config.")
namespace = provider_config[namespace_field]
field_selector = "metadata.name={}".format(namespace)
try:
namespaces = core_api().list_namespace(
field_selector=field_selector).items
except ApiException:
logger.warning(log_prefix +
not_checking_msg(namespace_field, namespace))
return namespace
if len(namespaces) > 0:
assert len(namespaces) == 1
logger.info(log_prefix +
using_existing_msg(namespace_field, namespace))
return namespace
logger.info(log_prefix + not_found_msg(namespace_field, namespace))
namespace_config = client.V1Namespace(
metadata=client.V1ObjectMeta(name=namespace))
core_api().create_namespace(namespace_config)
logger.info(log_prefix + created_msg(namespace_field, namespace))
return namespace
def _configure_autoscaler_service_account(namespace, provider_config):
account_field = "autoscaler_service_account"
if account_field not in provider_config:
logger.info(log_prefix + not_provided_msg(account_field))
return
account = provider_config[account_field]
if "namespace" not in account["metadata"]:
account["metadata"]["namespace"] = namespace
elif account["metadata"]["namespace"] != namespace:
raise InvalidNamespaceError(account_field, namespace)
name = account["metadata"]["name"]
field_selector = "metadata.name={}".format(name)
accounts = core_api().list_namespaced_service_account(
namespace, field_selector=field_selector).items
if len(accounts) > 0:
assert len(accounts) == 1
logger.info(log_prefix + using_existing_msg(account_field, name))
return
logger.info(log_prefix + not_found_msg(account_field, name))
core_api().create_namespaced_service_account(namespace, account)
logger.info(log_prefix + created_msg(account_field, name))
def _configure_autoscaler_role(namespace, provider_config):
role_field = "autoscaler_role"
if role_field not in provider_config:
logger.info(log_prefix + not_provided_msg(role_field))
return
role = provider_config[role_field]
if "namespace" not in role["metadata"]:
role["metadata"]["namespace"] = namespace
elif role["metadata"]["namespace"] != namespace:
raise InvalidNamespaceError(role_field, namespace)
name = role["metadata"]["name"]
field_selector = "metadata.name={}".format(name)
accounts = auth_api().list_namespaced_role(
namespace, field_selector=field_selector).items
if len(accounts) > 0:
assert len(accounts) == 1
logger.info(log_prefix + using_existing_msg(role_field, name))
return
logger.info(log_prefix + not_found_msg(role_field, name))
auth_api().create_namespaced_role(namespace, role)
logger.info(log_prefix + created_msg(role_field, name))
def _configure_autoscaler_role_binding(namespace, provider_config):
binding_field = "autoscaler_role_binding"
if binding_field not in provider_config:
logger.info(log_prefix + not_provided_msg(binding_field))
return
binding = provider_config[binding_field]
if "namespace" not in binding["metadata"]:
binding["metadata"]["namespace"] = namespace
elif binding["metadata"]["namespace"] != namespace:
raise InvalidNamespaceError(binding_field, namespace)
for subject in binding["subjects"]:
if "namespace" not in subject:
subject["namespace"] = namespace
elif subject["namespace"] != namespace:
raise InvalidNamespaceError(
binding_field + " subject '{}'".format(subject["name"]),
namespace)
name = binding["metadata"]["name"]
field_selector = "metadata.name={}".format(name)
accounts = auth_api().list_namespaced_role_binding(
namespace, field_selector=field_selector).items
if len(accounts) > 0:
assert len(accounts) == 1
logger.info(log_prefix + using_existing_msg(binding_field, name))
return
logger.info(log_prefix + not_found_msg(binding_field, name))
auth_api().create_namespaced_role_binding(namespace, binding)
logger.info(log_prefix + created_msg(binding_field, name))
def _configure_services(namespace, provider_config):
service_field = "services"
if service_field not in provider_config:
logger.info(log_prefix + not_provided_msg(service_field))
return
services = provider_config[service_field]
for service in services:
if "namespace" not in service["metadata"]:
service["metadata"]["namespace"] = namespace
elif service["metadata"]["namespace"] != namespace:
raise InvalidNamespaceError(service_field, namespace)
name = service["metadata"]["name"]
field_selector = "metadata.name={}".format(name)
services = core_api().list_namespaced_service(
namespace, field_selector=field_selector).items
if len(services) > 0:
assert len(services) == 1
existing_service = services[0]
if service == existing_service:
logger.info(log_prefix + using_existing_msg("service", name))
return
else:
logger.info(log_prefix +
updating_existing_msg("service", name))
core_api().patch_namespaced_service(name, namespace, service)
else:
logger.info(log_prefix + not_found_msg("service", name))
core_api().create_namespaced_service(namespace, service)
logger.info(log_prefix + created_msg("service", name))
|
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Anchor box and labeler definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
import tensorflow.compat.v1 as tf
from utils.object_detection import argmax_matcher
from utils.object_detection import balanced_positive_negative_sampler
from utils.object_detection import box_list
from utils.object_detection import faster_rcnn_box_coder
from utils.object_detection import region_similarity_calculator
from utils.object_detection import target_assigner
class Anchor(object):
"""Anchor class for anchor-based object detectors."""
def __init__(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect ratio anchors
added on each level. The number indicates the ratio of width to height.
For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing
[height, width] of the input image size.The image_size should be divided
by the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_size = anchor_size
self.image_size = image_size
self.boxes = self._generate_boxes()
def _generate_boxes(self):
"""Generates multiscale anchor boxes.
Returns:
a Tensor of shape [N, 4], representing anchor boxes of all levels
concatenated together.
"""
boxes_all = []
for level in range(self.min_level, self.max_level + 1):
boxes_l = []
for scale in range(self.num_scales):
for aspect_ratio in self.aspect_ratios:
stride = 2 ** level
intermidate_scale = 2 ** (scale / float(self.num_scales))
base_anchor_size = self.anchor_size * stride * intermidate_scale
aspect_x = aspect_ratio ** 0.5
aspect_y = aspect_ratio ** -0.5
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
x = tf.range(stride / 2, self.image_size[1], stride)
y = tf.range(stride / 2, self.image_size[0], stride)
xv, yv = tf.meshgrid(x, y)
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
# Tensor shape Nx4.
boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x,
yv + half_anchor_size_y, xv + half_anchor_size_x],
axis=1)
boxes_l.append(boxes)
# Concat anchors on the same level to tensor shape NxAx4.
boxes_l = tf.stack(boxes_l, axis=1)
boxes_l = tf.reshape(boxes_l, [-1, 4])
boxes_all.append(boxes_l)
return tf.concat(boxes_all, axis=0)
def unpack_labels(self, labels, is_box=False):
"""Unpacks an array of labels into multiscales labels.
Args:
labels: labels to unpack.
is_box: to unpack anchor boxes or not. If it is true, will unpack to 2D,
otherwise, will unpack to 3D.
Returns:
unpacked_labels: a dictionary contains unpack labels in different levels.
"""
unpacked_labels = collections.OrderedDict()
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)
feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)
steps = feat_size_y * feat_size_x * self.anchors_per_location
if is_box:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[-1, 4])
else:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[feat_size_y, feat_size_x, -1])
count += steps
return unpacked_labels
@property
def anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
@property
def multilevel_boxes(self):
return self.unpack_labels(self.boxes, is_box=True)
class AnchorLabeler(object):
"""Labeler for dense object detector."""
def __init__(self,
anchor,
match_threshold=0.5,
unmatched_threshold=0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
anchor: an instance of class Anchors.
match_threshold: a float number between 0 and 1 representing the
lower-bound threshold to assign positive labels for anchors. An anchor
with a score over the threshold is labeled positive.
unmatched_threshold: a float number between 0 and 1 representing the
upper-bound threshold to assign negative labels for anchors. An anchor
with a score below the threshold is labeled negative.
"""
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(
match_threshold,
unmatched_threshold=unmatched_threshold,
negatives_lower_than_unmatched=True,
force_match_for_each_row=True)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
self._target_assigner = target_assigner.TargetAssigner(
similarity_calc, matcher, box_coder)
self._anchor = anchor
self._match_threshold = match_threshold
self._unmatched_threshold = unmatched_threshold
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: a float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: an integer tensor with shape [N, 1] representing groundtruth
classes.
Returns:
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors_per_location]. The height_l and
width_l represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors_per_location * 4]. The height_l
and width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: scalar tensor storing number of positives in an image.
"""
gt_box_list = box_list.BoxList(gt_boxes)
anchor_box_list = box_list.BoxList(self._anchor.boxes)
# The cls_weights, box_weights are not used.
cls_targets, _, box_targets, _, matches = self._target_assigner.assign(
anchor_box_list, gt_box_list, gt_labels)
# Labels definition in matches.match_results:
# (1) match_results[i]>=0, meaning that column i is matched with row
# match_results[i].
# (2) match_results[i]=-1, meaning that column i is not matched.
# (3) match_results[i]=-2, meaning that column i is ignored.
match_results = tf.expand_dims(matches.match_results, axis=1)
cls_targets = tf.cast(cls_targets, tf.int32)
cls_targets = tf.where(tf.equal(match_results, -1),
-tf.ones_like(cls_targets), cls_targets)
cls_targets = tf.where(tf.equal(match_results, -2),
-2*tf.ones_like(cls_targets), cls_targets)
# Unpacks labels into multi-level representations.
cls_targets_dict = self._anchor.unpack_labels(cls_targets)
box_targets_dict = self._anchor.unpack_labels(box_targets)
num_positives = tf.reduce_sum(
tf.cast(tf.greater(matches.match_results, -1), tf.float32))
return cls_targets_dict, box_targets_dict, num_positives
class RpnAnchorLabeler(AnchorLabeler):
"""Labeler for Region Proposal Network."""
def __init__(self,
anchor,
match_threshold=0.7,
unmatched_threshold=0.3,
rpn_batch_size_per_im=256,
rpn_fg_fraction=0.5):
AnchorLabeler.__init__(self,
anchor,
match_threshold=match_threshold,
unmatched_threshold=unmatched_threshold)
self._rpn_batch_size_per_im = rpn_batch_size_per_im
self._rpn_fg_fraction = rpn_fg_fraction
def _get_rpn_samples(self, match_results):
"""Computes anchor labels.
This function performs subsampling for foreground (fg) and background (bg)
anchors.
Args:
match_results: an integer tensor with shape [N] representing the
matching results of anchors. (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Returns:
score_targets: an integer tensor with the a shape of [N].
(1) score_targets[i]=1, the anchor is a positive sample.
(2) score_targets[i]=0, negative. (3) score_targets[i]=-1, the anchor is
don't care (ignore).
"""
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler(
positive_fraction=self._rpn_fg_fraction, is_static=False))
# indicator includes both positive and negative labels.
# labels includes only positives labels.
# positives = indicator & labels.
# negatives = indicator & !labels.
# ignore = !indicator.
indicator = tf.greater(match_results, -2)
labels = tf.greater(match_results, -1)
samples = sampler.subsample(
indicator, self._rpn_batch_size_per_im, labels)
positive_labels = tf.where(
tf.logical_and(samples, labels),
tf.constant(2, dtype=tf.int32, shape=match_results.shape),
tf.constant(0, dtype=tf.int32, shape=match_results.shape))
negative_labels = tf.where(
tf.logical_and(samples, tf.logical_not(labels)),
tf.constant(1, dtype=tf.int32, shape=match_results.shape),
tf.constant(0, dtype=tf.int32, shape=match_results.shape))
ignore_labels = tf.fill(match_results.shape, -1)
return (ignore_labels + positive_labels + negative_labels,
positive_labels, negative_labels)
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: a float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: an integer tensor with shape [N, 1] representing groundtruth
classes.
Returns:
score_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
"""
gt_box_list = box_list.BoxList(gt_boxes)
anchor_box_list = box_list.BoxList(self._anchor.boxes)
# cls_targets, cls_weights, box_weights are not used.
_, _, box_targets, _, matches = self._target_assigner.assign(
anchor_box_list, gt_box_list, gt_labels)
# score_targets contains the subsampled positive and negative anchors.
score_targets, _, _ = self._get_rpn_samples(matches.match_results)
# Unpacks labels.
score_targets_dict = self._anchor.unpack_labels(score_targets)
box_targets_dict = self._anchor.unpack_labels(box_targets)
return score_targets_dict, box_targets_dict
|
|
"""Test to verify that Home Assistant core works."""
# pylint: disable=protected-access
import asyncio
import unittest
from unittest.mock import patch, MagicMock, sentinel
from datetime import datetime, timedelta
import pytz
import pytest
import homeassistant.core as ha
from homeassistant.exceptions import InvalidEntityFormatError
from homeassistant.util.async import run_coroutine_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import (METRIC_SYSTEM)
from homeassistant.const import (
__version__, EVENT_STATE_CHANGED, ATTR_FRIENDLY_NAME, CONF_UNIT_SYSTEM,
ATTR_NOW, EVENT_TIME_CHANGED, EVENT_HOMEASSISTANT_STOP,
EVENT_HOMEASSISTANT_CLOSE, EVENT_HOMEASSISTANT_START,
EVENT_SERVICE_REGISTERED, EVENT_SERVICE_REMOVED)
from tests.common import get_test_home_assistant
PST = pytz.timezone('America/Los_Angeles')
def test_split_entity_id():
"""Test split_entity_id."""
assert ha.split_entity_id('domain.object_id') == ['domain', 'object_id']
def test_async_add_job_schedule_callback():
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, ha.callback(job))
assert len(hass.loop.call_soon.mock_calls) == 1
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=True)
def test_async_add_job_schedule_coroutinefunction(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 1
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=False)
def test_async_add_job_add_threaded_job_to_pool(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.loop.run_in_executor.mock_calls) == 1
def test_async_run_job_calls_callback():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, ha.callback(job))
assert len(calls) == 1
assert len(hass.async_add_job.mock_calls) == 0
def test_async_run_job_delegates_non_async():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, job)
assert len(calls) == 0
assert len(hass.async_add_job.mock_calls) == 1
def test_stage_shutdown():
"""Simulate a shutdown, test calling stuff."""
hass = get_test_home_assistant()
test_stop = []
test_close = []
test_all = []
hass.bus.listen(
EVENT_HOMEASSISTANT_STOP, lambda event: test_stop.append(event))
hass.bus.listen(
EVENT_HOMEASSISTANT_CLOSE, lambda event: test_close.append(event))
hass.bus.listen('*', lambda event: test_all.append(event))
hass.stop()
assert len(test_stop) == 1
assert len(test_close) == 1
assert len(test_all) == 1
class TestHomeAssistant(unittest.TestCase):
"""Test the Home Assistant core classes."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_pending_sheduler(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for i in range(3):
self.hass.add_job(test_coro())
run_coroutine_threadsafe(
asyncio.wait(self.hass._pending_tasks, loop=self.hass.loop),
loop=self.hass.loop
).result()
assert len(self.hass._pending_tasks) == 3
assert len(call_count) == 3
def test_async_add_job_pending_tasks_coro(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for i in range(2):
self.hass.add_job(test_coro())
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
assert len(self.hass._pending_tasks) == 2
self.hass.block_till_done()
assert len(call_count) == 2
def test_async_add_job_pending_tasks_executor(self):
"""Run a executor in pending tasks."""
call_count = []
def test_executor():
"""Test executor."""
call_count.append('call')
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
for i in range(2):
self.hass.add_job(test_executor)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
assert len(self.hass._pending_tasks) == 2
self.hass.block_till_done()
assert len(call_count) == 2
def test_async_add_job_pending_tasks_callback(self):
"""Run a callback in pending tasks."""
call_count = []
@ha.callback
def test_callback():
"""Test callback."""
call_count.append('call')
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
for i in range(2):
self.hass.add_job(test_callback)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
self.hass.block_till_done()
assert len(self.hass._pending_tasks) == 0
assert len(call_count) == 2
def test_add_job_with_none(self):
"""Try to add a job with None as function."""
with pytest.raises(ValueError):
self.hass.add_job(None, 'test_arg')
class TestEvent(unittest.TestCase):
"""A Test Event class."""
def test_eq(self):
"""Test events."""
now = dt_util.utcnow()
data = {'some': 'attr'}
event1, event2 = [
ha.Event('some_type', data, time_fired=now)
for _ in range(2)
]
self.assertEqual(event1, event2)
def test_repr(self):
"""Test that repr method works."""
self.assertEqual(
"<Event TestEvent[L]>",
str(ha.Event("TestEvent")))
self.assertEqual(
"<Event TestEvent[R]: beer=nice>",
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote)))
def test_as_dict(self):
"""Test as dictionary."""
event_type = 'some_type'
now = dt_util.utcnow()
data = {'some': 'attr'}
event = ha.Event(event_type, data, ha.EventOrigin.local, now)
expected = {
'event_type': event_type,
'data': data,
'origin': 'LOCAL',
'time_fired': now,
}
self.assertEqual(expected, event.as_dict())
class TestEventBus(unittest.TestCase):
"""Test EventBus methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.bus = self.hass.bus
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_add_remove_listener(self):
"""Test remove_listener method."""
self.hass.allow_pool = False
old_count = len(self.bus.listeners)
def listener(_): pass
unsub = self.bus.listen('test', listener)
self.assertEqual(old_count + 1, len(self.bus.listeners))
# Remove listener
unsub()
self.assertEqual(old_count, len(self.bus.listeners))
# Should do nothing now
unsub()
def test_unsubscribe_listener(self):
"""Test unsubscribe listener from returned function."""
calls = []
@ha.callback
def listener(event):
"""Mock listener."""
calls.append(event)
unsub = self.bus.listen('test', listener)
self.bus.fire('test')
self.hass.block_till_done()
assert len(calls) == 1
unsub()
self.bus.fire('event')
self.hass.block_till_done()
assert len(calls) == 1
def test_listen_once_event_with_callback(self):
"""Test listen_once_event method."""
runs = []
@ha.callback
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_listen_once_event_with_coroutine(self):
"""Test listen_once_event method."""
runs = []
@asyncio.coroutine
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_listen_once_event_with_thread(self):
"""Test listen_once_event method."""
runs = []
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_thread_event_listener(self):
"""Test a event listener listeners."""
thread_calls = []
def thread_listener(event):
thread_calls.append(event)
self.bus.listen('test_thread', thread_listener)
self.bus.fire('test_thread')
self.hass.block_till_done()
assert len(thread_calls) == 1
def test_callback_event_listener(self):
"""Test a event listener listeners."""
callback_calls = []
@ha.callback
def callback_listener(event):
callback_calls.append(event)
self.bus.listen('test_callback', callback_listener)
self.bus.fire('test_callback')
self.hass.block_till_done()
assert len(callback_calls) == 1
def test_coroutine_event_listener(self):
"""Test a event listener listeners."""
coroutine_calls = []
@asyncio.coroutine
def coroutine_listener(event):
coroutine_calls.append(event)
self.bus.listen('test_coroutine', coroutine_listener)
self.bus.fire('test_coroutine')
self.hass.block_till_done()
assert len(coroutine_calls) == 1
class TestState(unittest.TestCase):
"""Test State methods."""
def test_init(self):
"""Test state.init."""
self.assertRaises(
InvalidEntityFormatError, ha.State,
'invalid_entity_format', 'test_state')
def test_domain(self):
"""Test domain."""
state = ha.State('some_domain.hello', 'world')
self.assertEqual('some_domain', state.domain)
def test_object_id(self):
"""Test object ID."""
state = ha.State('domain.hello', 'world')
self.assertEqual('hello', state.object_id)
def test_name_if_no_friendly_name_attr(self):
"""Test if there is no friendly name."""
state = ha.State('domain.hello_world', 'world')
self.assertEqual('hello world', state.name)
def test_name_if_friendly_name_attr(self):
"""Test if there is a friendly name."""
name = 'Some Unique Name'
state = ha.State('domain.hello_world', 'world',
{ATTR_FRIENDLY_NAME: name})
self.assertEqual(name, state.name)
def test_dict_conversion(self):
"""Test conversion of dict."""
state = ha.State('domain.hello', 'world', {'some': 'attr'})
self.assertEqual(state, ha.State.from_dict(state.as_dict()))
def test_dict_conversion_with_wrong_data(self):
"""Test conversion with wrong data."""
self.assertIsNone(ha.State.from_dict(None))
self.assertIsNone(ha.State.from_dict({'state': 'yes'}))
self.assertIsNone(ha.State.from_dict({'entity_id': 'yes'}))
def test_repr(self):
"""Test state.repr."""
self.assertEqual("<state happy.happy=on @ 1984-12-08T12:00:00+00:00>",
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0))))
self.assertEqual(
"<state happy.happy=on; brightness=144 @ "
"1984-12-08T12:00:00+00:00>",
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0))))
class TestStateMachine(unittest.TestCase):
"""Test State machine methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.states = self.hass.states
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_is_state(self):
"""Test is_state method."""
self.assertTrue(self.states.is_state('light.Bowl', 'on'))
self.assertFalse(self.states.is_state('light.Bowl', 'off'))
self.assertFalse(self.states.is_state('light.Non_existing', 'on'))
def test_is_state_attr(self):
"""Test is_state_attr method."""
self.states.set("light.Bowl", "on", {"brightness": 100})
self.assertTrue(
self.states.is_state_attr('light.Bowl', 'brightness', 100))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 200))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 'Bowl'))
self.assertFalse(
self.states.is_state_attr('light.Non_existing', 'brightness', 100))
def test_entity_ids(self):
"""Test get_entity_ids method."""
ent_ids = self.states.entity_ids()
self.assertEqual(2, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
self.assertTrue('switch.ac' in ent_ids)
ent_ids = self.states.entity_ids('light')
self.assertEqual(1, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
def test_all(self):
"""Test everything."""
states = sorted(state.entity_id for state in self.states.all())
self.assertEqual(['light.bowl', 'switch.ac'], states)
def test_remove(self):
"""Test remove method."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.assertIn('light.bowl', self.states.entity_ids())
self.assertTrue(self.states.remove('light.bowl'))
self.hass.block_till_done()
self.assertNotIn('light.bowl', self.states.entity_ids())
self.assertEqual(1, len(events))
self.assertEqual('light.bowl', events[0].data.get('entity_id'))
self.assertIsNotNone(events[0].data.get('old_state'))
self.assertEqual('light.bowl', events[0].data['old_state'].entity_id)
self.assertIsNone(events[0].data.get('new_state'))
# If it does not exist, we should get False
self.assertFalse(self.states.remove('light.Bowl'))
self.hass.block_till_done()
self.assertEqual(1, len(events))
def test_case_insensitivty(self):
"""Test insensitivty."""
runs = []
@ha.callback
def callback(event):
runs.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.BOWL', 'off')
self.hass.block_till_done()
self.assertTrue(self.states.is_state('light.bowl', 'off'))
self.assertEqual(1, len(runs))
def test_last_changed_not_updated_on_same_state(self):
"""Test to not update the existing, same state."""
state = self.states.get('light.Bowl')
future = dt_util.utcnow() + timedelta(hours=10)
with patch('homeassistant.util.dt.utcnow', return_value=future):
self.states.set("light.Bowl", "on", {'attr': 'triggers_change'})
self.hass.block_till_done()
state2 = self.states.get('light.Bowl')
assert state2 is not None
assert state.last_changed == state2.last_changed
def test_force_update(self):
"""Test force update option."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.bowl', 'on')
self.hass.block_till_done()
self.assertEqual(0, len(events))
self.states.set('light.bowl', 'on', None, True)
self.hass.block_till_done()
self.assertEqual(1, len(events))
class TestServiceCall(unittest.TestCase):
"""Test ServiceCall class."""
def test_repr(self):
"""Test repr method."""
self.assertEqual(
"<ServiceCall homeassistant.start>",
str(ha.ServiceCall('homeassistant', 'start')))
self.assertEqual(
"<ServiceCall homeassistant.start: fast=yes>",
str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"})))
class TestServiceRegistry(unittest.TestCase):
"""Test ServicerRegistry methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.services = self.hass.services
@ha.callback
def mock_service(call):
pass
self.services.register("Test_Domain", "TEST_SERVICE", mock_service)
self.calls_register = []
@ha.callback
def mock_event_register(event):
"""Mock register event."""
self.calls_register.append(event)
self.hass.bus.listen(EVENT_SERVICE_REGISTERED, mock_event_register)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_has_service(self):
"""Test has_service method."""
self.assertTrue(
self.services.has_service("tesT_domaiN", "tesT_servicE"))
self.assertFalse(
self.services.has_service("test_domain", "non_existing"))
self.assertFalse(
self.services.has_service("non_existing", "test_service"))
def test_services(self):
"""Test services."""
expected = {
'test_domain': {'test_service': {'description': '', 'fields': {}}}
}
self.assertEqual(expected, self.services.services)
def test_call_with_blocking_done_in_time(self):
"""Test call with blocking."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler."""
calls.append(call)
self.services.register(
"test_domain", "register_calls", service_handler)
self.hass.block_till_done()
assert len(self.calls_register) == 1
assert self.calls_register[-1].data['domain'] == 'test_domain'
assert self.calls_register[-1].data['service'] == 'register_calls'
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.assertEqual(1, len(calls))
def test_call_non_existing_with_blocking(self):
"""Test non-existing with blocking."""
prior = ha.SERVICE_CALL_LIMIT
try:
ha.SERVICE_CALL_LIMIT = 0.01
assert not self.services.call('test_domain', 'i_do_not_exist',
blocking=True)
finally:
ha.SERVICE_CALL_LIMIT = prior
def test_async_service(self):
"""Test registering and calling an async service."""
calls = []
@asyncio.coroutine
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register(
'test_domain', 'register_calls', service_handler)
self.hass.block_till_done()
assert len(self.calls_register) == 1
assert self.calls_register[-1].data['domain'] == 'test_domain'
assert self.calls_register[-1].data['service'] == 'register_calls'
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
def test_callback_service(self):
"""Test registering and calling an async service."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register(
'test_domain', 'register_calls', service_handler)
self.hass.block_till_done()
assert len(self.calls_register) == 1
assert self.calls_register[-1].data['domain'] == 'test_domain'
assert self.calls_register[-1].data['service'] == 'register_calls'
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
def test_remove_service(self):
"""Test remove service."""
calls_remove = []
@ha.callback
def mock_event_remove(event):
"""Mock register event."""
calls_remove.append(event)
self.hass.bus.listen(EVENT_SERVICE_REMOVED, mock_event_remove)
assert self.services.has_service('test_Domain', 'test_Service')
self.services.remove('test_Domain', 'test_Service')
self.hass.block_till_done()
assert not self.services.has_service('test_Domain', 'test_Service')
assert len(calls_remove) == 1
assert calls_remove[-1].data['domain'] == 'test_domain'
assert calls_remove[-1].data['service'] == 'test_service'
def test_remove_service_that_not_exists(self):
"""Test remove service that not exists."""
calls_remove = []
@ha.callback
def mock_event_remove(event):
"""Mock register event."""
calls_remove.append(event)
self.hass.bus.listen(EVENT_SERVICE_REMOVED, mock_event_remove)
assert not self.services.has_service('test_xxx', 'test_yyy')
self.services.remove('test_xxx', 'test_yyy')
self.hass.block_till_done()
assert len(calls_remove) == 0
class TestConfig(unittest.TestCase):
"""Test configuration methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.config = ha.Config()
self.assertIsNone(self.config.config_dir)
def test_path_with_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/test.conf",
self.config.path("test.conf"))
def test_path_with_dir_and_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/dir/test.conf",
self.config.path("dir", "test.conf"))
def test_as_dict(self):
"""Test as dict."""
self.config.config_dir = '/tmp/ha-config'
expected = {
'latitude': None,
'longitude': None,
'elevation': None,
CONF_UNIT_SYSTEM: METRIC_SYSTEM.as_dict(),
'location_name': None,
'time_zone': 'UTC',
'components': set(),
'config_dir': '/tmp/ha-config',
'version': __version__,
}
self.assertEqual(expected, self.config.as_dict())
@patch('homeassistant.core.monotonic')
def test_create_timer(mock_monotonic, loop):
"""Test create timer."""
hass = MagicMock()
funcs = []
orig_callback = ha.callback
def mock_callback(func):
funcs.append(func)
return orig_callback(func)
with patch.object(ha, 'callback', mock_callback):
ha._async_create_timer(hass)
assert len(funcs) == 3
fire_time_event, start_timer, stop_timer = funcs
assert len(hass.bus.async_listen_once.mock_calls) == 1
event_type, callback = hass.bus.async_listen_once.mock_calls[0][1]
assert event_type == EVENT_HOMEASSISTANT_START
assert callback is start_timer
mock_monotonic.side_effect = 10.2, 10.3
with patch('homeassistant.core.dt_util.utcnow',
return_value=sentinel.mock_date):
start_timer(None)
assert len(hass.bus.async_listen_once.mock_calls) == 2
assert len(hass.bus.async_fire.mock_calls) == 1
assert len(hass.loop.call_later.mock_calls) == 1
event_type, callback = hass.bus.async_listen_once.mock_calls[1][1]
assert event_type == EVENT_HOMEASSISTANT_STOP
assert callback is stop_timer
slp_seconds, callback, nxt = hass.loop.call_later.mock_calls[0][1]
assert abs(slp_seconds - 0.9) < 0.001
assert callback is fire_time_event
assert abs(nxt - 11.2) < 0.001
event_type, event_data = hass.bus.async_fire.mock_calls[0][1]
assert event_type == EVENT_TIME_CHANGED
assert event_data[ATTR_NOW] is sentinel.mock_date
@patch('homeassistant.core.monotonic')
def test_timer_out_of_sync(mock_monotonic, loop):
"""Test create timer."""
hass = MagicMock()
funcs = []
orig_callback = ha.callback
def mock_callback(func):
funcs.append(func)
return orig_callback(func)
with patch.object(ha, 'callback', mock_callback):
ha._async_create_timer(hass)
assert len(funcs) == 3
fire_time_event, start_timer, stop_timer = funcs
mock_monotonic.side_effect = 10.2, 11.3, 11.3
with patch('homeassistant.core.dt_util.utcnow',
return_value=sentinel.mock_date):
start_timer(None)
assert len(hass.loop.call_later.mock_calls) == 1
slp_seconds, callback, nxt = hass.loop.call_later.mock_calls[0][1]
assert slp_seconds == 1
assert callback is fire_time_event
assert abs(nxt - 12.3) < 0.001
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import datetime
import iso8601
from oslo_utils import timeutils
import six
import six.moves.urllib.parse as urlparse
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import simple_tenant_usage as schema
from nova.api.openstack.compute.views import usages as usages_view
from nova.api.openstack import wsgi
from nova.api import validation
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import objects
from nova.policies import simple_tenant_usage as stu_policies
CONF = nova.conf.CONF
def parse_strtime(dstr, fmt):
try:
return timeutils.parse_strtime(dstr, fmt)
except (TypeError, ValueError) as e:
raise exception.InvalidStrTime(reason=six.text_type(e))
class SimpleTenantUsageController(wsgi.Controller):
_view_builder_class = usages_view.ViewBuilder
def _hours_for(self, instance, period_start, period_stop):
launched_at = instance.launched_at
terminated_at = instance.terminated_at
if terminated_at is not None:
if not isinstance(terminated_at, datetime.datetime):
# NOTE(mriedem): Instance object DateTime fields are
# timezone-aware so convert using isotime.
terminated_at = timeutils.parse_isotime(terminated_at)
if launched_at is not None:
if not isinstance(launched_at, datetime.datetime):
launched_at = timeutils.parse_isotime(launched_at)
if terminated_at and terminated_at < period_start:
return 0
# nothing if it started after the usage report ended
if launched_at and launched_at > period_stop:
return 0
if launched_at:
# if instance launched after period_started, don't charge for first
start = max(launched_at, period_start)
if terminated_at:
# if instance stopped before period_stop, don't charge after
stop = min(period_stop, terminated_at)
else:
# instance is still running, so charge them up to current time
stop = period_stop
dt = stop - start
return dt.total_seconds() / 3600.0
else:
# instance hasn't launched, so no charge
return 0
def _get_flavor(self, context, instance, flavors_cache):
"""Get flavor information from the instance object,
allowing a fallback to lookup by-id for deleted instances only.
"""
try:
return instance.get_flavor()
except exception.NotFound:
if not instance.deleted:
# Only support the fallback mechanism for deleted instances
# that would have been skipped by migration #153
raise
flavor_type = instance.instance_type_id
if flavor_type in flavors_cache:
return flavors_cache[flavor_type]
try:
flavor_ref = objects.Flavor.get_by_id(context, flavor_type)
flavors_cache[flavor_type] = flavor_ref
except exception.FlavorNotFound:
# can't bill if there is no flavor
flavor_ref = None
return flavor_ref
def _get_instances_all_cells(self, context, period_start, period_stop,
tenant_id, limit, marker):
all_instances = []
cells = objects.CellMappingList.get_all(context)
for cell in cells:
with nova_context.target_cell(context, cell) as cctxt:
try:
instances = (
objects.InstanceList.get_active_by_window_joined(
cctxt, period_start, period_stop, tenant_id,
expected_attrs=['flavor'], limit=limit,
marker=marker))
except exception.MarkerNotFound:
# NOTE(danms): We need to keep looking through the later
# cells to find the marker
continue
all_instances.extend(instances)
# NOTE(danms): We must have found a marker if we had one,
# so make sure we don't require a marker in the next cell
marker = None
if limit:
limit -= len(instances)
if limit <= 0:
break
if marker is not None and len(all_instances) == 0:
# NOTE(danms): If we did not find the marker in any cell,
# mimic the db_api behavior here
raise exception.MarkerNotFound(marker=marker)
return all_instances
def _tenant_usages_for_period(self, context, period_start, period_stop,
tenant_id=None, detailed=True, limit=None,
marker=None):
instances = self._get_instances_all_cells(context, period_start,
period_stop, tenant_id,
limit, marker)
rval = collections.OrderedDict()
flavors = {}
all_server_usages = []
for instance in instances:
info = {}
info['hours'] = self._hours_for(instance,
period_start,
period_stop)
flavor = self._get_flavor(context, instance, flavors)
if not flavor:
info['flavor'] = ''
else:
info['flavor'] = flavor.name
info['instance_id'] = instance.uuid
info['name'] = instance.display_name
info['tenant_id'] = instance.project_id
try:
info['memory_mb'] = instance.flavor.memory_mb
info['local_gb'] = (instance.flavor.root_gb +
instance.flavor.ephemeral_gb)
info['vcpus'] = instance.flavor.vcpus
except exception.InstanceNotFound:
# This is rare case, instance disappear during analysis
# As it's just info collection, we can try next one
continue
# NOTE(mriedem): We need to normalize the start/end times back
# to timezone-naive so the response doesn't change after the
# conversion to objects.
info['started_at'] = timeutils.normalize_time(instance.launched_at)
info['ended_at'] = (
timeutils.normalize_time(instance.terminated_at) if
instance.terminated_at else None)
if info['ended_at']:
info['state'] = 'terminated'
else:
info['state'] = instance.vm_state
now = timeutils.utcnow()
if info['state'] == 'terminated':
delta = info['ended_at'] - info['started_at']
else:
delta = now - info['started_at']
info['uptime'] = int(delta.total_seconds())
if info['tenant_id'] not in rval:
summary = {}
summary['tenant_id'] = info['tenant_id']
if detailed:
summary['server_usages'] = []
summary['total_local_gb_usage'] = 0
summary['total_vcpus_usage'] = 0
summary['total_memory_mb_usage'] = 0
summary['total_hours'] = 0
summary['start'] = timeutils.normalize_time(period_start)
summary['stop'] = timeutils.normalize_time(period_stop)
rval[info['tenant_id']] = summary
summary = rval[info['tenant_id']]
summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
summary['total_memory_mb_usage'] += (info['memory_mb'] *
info['hours'])
summary['total_hours'] += info['hours']
all_server_usages.append(info)
if detailed:
summary['server_usages'].append(info)
return list(rval.values()), all_server_usages
def _parse_datetime(self, dtstr):
if not dtstr:
value = timeutils.utcnow()
elif isinstance(dtstr, datetime.datetime):
value = dtstr
else:
for fmt in ["%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%S.%f",
"%Y-%m-%d %H:%M:%S.%f"]:
try:
value = parse_strtime(dtstr, fmt)
break
except exception.InvalidStrTime:
pass
else:
msg = _("Datetime is in invalid format")
raise exception.InvalidStrTime(reason=msg)
# NOTE(mriedem): Instance object DateTime fields are timezone-aware
# so we have to force UTC timezone for comparing this datetime against
# instance object fields and still maintain backwards compatibility
# in the API.
if value.utcoffset() is None:
value = value.replace(tzinfo=iso8601.UTC)
return value
def _get_datetime_range(self, req):
qs = req.environ.get('QUERY_STRING', '')
env = urlparse.parse_qs(qs)
# NOTE(lzyeval): env.get() always returns a list
period_start = self._parse_datetime(env.get('start', [None])[0])
period_stop = self._parse_datetime(env.get('end', [None])[0])
if not period_start < period_stop:
msg = _("Invalid start time. The start time cannot occur after "
"the end time.")
raise exc.HTTPBadRequest(explanation=msg)
detailed = env.get('detailed', ['0'])[0] == '1'
return (period_start, period_stop, detailed)
@wsgi.Controller.api_version("2.40")
@validation.query_schema(schema.index_query_v240)
@wsgi.expected_errors(400)
def index(self, req):
"""Retrieve tenant_usage for all tenants."""
return self._index(req, links=True)
@wsgi.Controller.api_version("2.1", "2.39") # noqa
@validation.query_schema(schema.index_query)
@wsgi.expected_errors(400)
def index(self, req):
"""Retrieve tenant_usage for all tenants."""
return self._index(req)
@wsgi.Controller.api_version("2.40")
@validation.query_schema(schema.show_query_v240)
@wsgi.expected_errors(400)
def show(self, req, id):
"""Retrieve tenant_usage for a specified tenant."""
return self._show(req, id, links=True)
@wsgi.Controller.api_version("2.1", "2.39") # noqa
@validation.query_schema(schema.show_query)
@wsgi.expected_errors(400)
def show(self, req, id):
"""Retrieve tenant_usage for a specified tenant."""
return self._show(req, id)
def _index(self, req, links=False):
context = req.environ['nova.context']
context.can(stu_policies.POLICY_ROOT % 'list')
try:
(period_start, period_stop, detailed) = self._get_datetime_range(
req)
except exception.InvalidStrTime as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
now = timeutils.parse_isotime(timeutils.utcnow().isoformat())
if period_stop > now:
period_stop = now
marker = None
limit = CONF.api.max_limit
if links:
limit, marker = common.get_limit_and_marker(req)
try:
usages, server_usages = self._tenant_usages_for_period(
context, period_start, period_stop, detailed=detailed,
limit=limit, marker=marker)
except exception.MarkerNotFound as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
tenant_usages = {'tenant_usages': usages}
if links:
usages_links = self._view_builder.get_links(req, server_usages)
if usages_links:
tenant_usages['tenant_usages_links'] = usages_links
return tenant_usages
def _show(self, req, id, links=False):
tenant_id = id
context = req.environ['nova.context']
context.can(stu_policies.POLICY_ROOT % 'show',
{'project_id': tenant_id})
try:
(period_start, period_stop, ignore) = self._get_datetime_range(
req)
except exception.InvalidStrTime as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
now = timeutils.parse_isotime(timeutils.utcnow().isoformat())
if period_stop > now:
period_stop = now
marker = None
limit = CONF.api.max_limit
if links:
limit, marker = common.get_limit_and_marker(req)
try:
usage, server_usages = self._tenant_usages_for_period(
context, period_start, period_stop, tenant_id=tenant_id,
detailed=True, limit=limit, marker=marker)
except exception.MarkerNotFound as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if len(usage):
usage = list(usage)[0]
else:
usage = {}
tenant_usage = {'tenant_usage': usage}
if links:
usages_links = self._view_builder.get_links(
req, server_usages, tenant_id=tenant_id)
if usages_links:
tenant_usage['tenant_usage_links'] = usages_links
return tenant_usage
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
ds = distributions
class MultivariateNormalTriLTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_chol(self, *shape):
mat = self._rng.rand(*shape)
chol = ds.matrix_diag_transform(mat, transform=nn_ops.softplus)
chol = array_ops.matrix_band_part(chol, -1, 0)
sigma = math_ops.matmul(chol, chol, adjoint_b=True)
return chol.eval(), sigma.eval()
def testLogPDFScalarBatch(self):
with self.test_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
chol[1, 1] = -chol[1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
x = self._rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testLogPDFXIsHigherRank(self):
with self.test_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
chol[0, 0] = -chol[0, 0]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
x = self._rng.rand(3, 2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((3,), log_pdf.get_shape())
self.assertEqual((3,), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval(), atol=0., rtol=0.02)
self.assertAllClose(expected_pdf, pdf.eval(), atol=0., rtol=0.03)
def testLogPDFXLowerDimension(self):
with self.test_session():
mu = self._rng.rand(3, 2)
chol, sigma = self._random_chol(3, 2, 2)
chol[0, 0, 0] = -chol[0, 0, 0]
chol[2, 1, 1] = -chol[2, 1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
x = self._rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
self.assertEqual((3,), log_pdf.get_shape())
self.assertEqual((3,), pdf.get_shape())
# scipy can't do batches, so just test one of them.
scipy_mvn = stats.multivariate_normal(mean=mu[1, :], cov=sigma[1, :, :])
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval()[1])
self.assertAllClose(expected_pdf, pdf.eval()[1])
def testEntropy(self):
with self.test_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
chol[0, 0] = -chol[0, 0]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
entropy = mvn.entropy()
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_entropy = scipy_mvn.entropy()
self.assertEqual(entropy.get_shape(), ())
self.assertAllClose(expected_entropy, entropy.eval())
def testEntropyMultidimensional(self):
with self.test_session():
mu = self._rng.rand(3, 5, 2)
chol, sigma = self._random_chol(3, 5, 2, 2)
chol[1, 0, 0, 0] = -chol[1, 0, 0, 0]
chol[2, 3, 1, 1] = -chol[2, 3, 1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
entropy = mvn.entropy()
# Scipy doesn't do batches, so test one of them.
expected_entropy = stats.multivariate_normal(
mean=mu[1, 1, :], cov=sigma[1, 1, :, :]).entropy()
self.assertEqual(entropy.get_shape(), (3, 5))
self.assertAllClose(expected_entropy, entropy.eval()[1, 1])
def testSample(self):
with self.test_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
chol[0, 0] = -chol[0, 0]
sigma[0, 1] = -sigma[0, 1]
sigma[1, 0] = -sigma[1, 0]
n = constant_op.constant(100000)
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
samples = mvn.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), [int(100e3), 2])
self.assertAllClose(sample_values.mean(axis=0), mu, atol=1e-2)
self.assertAllClose(np.cov(sample_values, rowvar=0), sigma, atol=0.06)
def testSingularScaleRaises(self):
with self.test_session():
mu = None
chol = [[1., 0.], [0., 0.]]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
with self.assertRaisesOpError("Singular operator"):
mvn.sample().eval()
def testSampleWithSampleShape(self):
with self.test_session():
mu = self._rng.rand(3, 5, 2)
chol, sigma = self._random_chol(3, 5, 2, 2)
chol[1, 0, 0, 0] = -chol[1, 0, 0, 0]
chol[2, 3, 1, 1] = -chol[2, 3, 1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
samples_val = mvn.sample((10, 11, 12), seed=137).eval()
# Check sample shape
self.assertEqual((10, 11, 12, 3, 5, 2), samples_val.shape)
# Check sample means
x = samples_val[:, :, :, 1, 1, :]
self.assertAllClose(
x.reshape(10 * 11 * 12, 2).mean(axis=0), mu[1, 1], atol=0.05)
# Check that log_prob(samples) works
log_prob_val = mvn.log_prob(samples_val).eval()
x_log_pdf = log_prob_val[:, :, :, 1, 1]
expected_log_pdf = stats.multivariate_normal(
mean=mu[1, 1, :], cov=sigma[1, 1, :, :]).logpdf(x)
self.assertAllClose(expected_log_pdf, x_log_pdf)
def testSampleMultiDimensional(self):
with self.test_session():
mu = self._rng.rand(3, 5, 2)
chol, sigma = self._random_chol(3, 5, 2, 2)
chol[1, 0, 0, 0] = -chol[1, 0, 0, 0]
chol[2, 3, 1, 1] = -chol[2, 3, 1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
n = constant_op.constant(100000)
samples = mvn.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (100000, 3, 5, 2))
self.assertAllClose(
sample_values[:, 1, 1, :].mean(axis=0), mu[1, 1, :], atol=0.05)
self.assertAllClose(
np.cov(sample_values[:, 1, 1, :], rowvar=0),
sigma[1, 1, :, :],
atol=1e-1)
def testShapes(self):
with self.test_session():
mu = self._rng.rand(3, 5, 2)
chol, _ = self._random_chol(3, 5, 2, 2)
chol[1, 0, 0, 0] = -chol[1, 0, 0, 0]
chol[2, 3, 1, 1] = -chol[2, 3, 1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
# Shapes known at graph construction time.
self.assertEqual((2,), tuple(mvn.event_shape.as_list()))
self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list()))
# Shapes known at runtime.
self.assertEqual((2,), tuple(mvn.event_shape_tensor().eval()))
self.assertEqual((3, 5), tuple(mvn.batch_shape_tensor().eval()))
def _random_mu_and_sigma(self, batch_shape, event_shape):
# This ensures sigma is positive def.
mat_shape = batch_shape + event_shape + event_shape
mat = self._rng.randn(*mat_shape)
perm = np.arange(mat.ndim)
perm[-2:] = [perm[-1], perm[-2]]
sigma = np.matmul(mat, np.transpose(mat, perm))
mu_shape = batch_shape + event_shape
mu = self._rng.randn(*mu_shape)
return mu, sigma
def testKLNonBatch(self):
batch_shape = ()
event_shape = (2,)
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalTriL(
loc=mu_a,
scale_tril=np.linalg.cholesky(sigma_a),
validate_args=True)
mvn_b = ds.MultivariateNormalTriL(
loc=mu_b,
scale_tril=np.linalg.cholesky(sigma_b),
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl = _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b)
self.assertAllClose(expected_kl, kl_v)
def testKLBatch(self):
batch_shape = (2,)
event_shape = (3,)
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalTriL(
loc=mu_a,
scale_tril=np.linalg.cholesky(sigma_a),
validate_args=True)
mvn_b = ds.MultivariateNormalTriL(
loc=mu_b,
scale_tril=np.linalg.cholesky(sigma_b),
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b[0, :], sigma_b[0, :])
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b[1, :], sigma_b[1, :])
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def testKLTwoIdenticalDistributionsIsZero(self):
batch_shape = (2,)
event_shape = (3,)
with self.test_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalTriL(
loc=mu_a,
scale_tril=np.linalg.cholesky(sigma_a),
validate_args=True)
# Should be zero since KL(p || p) = =.
kl = ds.kl_divergence(mvn_a, mvn_a)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
self.assertAllClose(np.zeros(*batch_shape), kl_v)
def testSampleLarge(self):
mu = np.array([-1., 1], dtype=np.float32)
scale_tril = np.array([[3., 0], [1, -2]], dtype=np.float32) / 3.
true_mean = mu
true_scale = scale_tril
true_covariance = np.matmul(true_scale, true_scale.T)
true_variance = np.diag(true_covariance)
true_stddev = np.sqrt(true_variance)
with self.test_session() as sess:
dist = ds.MultivariateNormalTriL(
loc=mu,
scale_tril=scale_tril,
validate_args=True)
# The following distributions will test the KL divergence calculation.
mvn_chol = ds.MultivariateNormalTriL(
loc=np.array([0.5, 1.2], dtype=np.float32),
scale_tril=np.array([[3., 0], [1, 2]], dtype=np.float32),
validate_args=True)
n = int(10e3)
samps = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(samps, 0)
x = samps - sample_mean
sample_covariance = math_ops.matmul(x, x, transpose_a=True) / n
sample_kl_chol = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol = ds.kl_divergence(dist, mvn_chol)
scale = dist.scale.to_dense()
[
sample_mean_,
analytical_mean_,
sample_covariance_,
analytical_covariance_,
analytical_variance_,
analytical_stddev_,
sample_kl_chol_, analytical_kl_chol_,
scale_,
] = sess.run([
sample_mean,
dist.mean(),
sample_covariance,
dist.covariance(),
dist.variance(),
dist.stddev(),
sample_kl_chol, analytical_kl_chol,
scale,
])
sample_variance_ = np.diag(sample_covariance_)
sample_stddev_ = np.sqrt(sample_variance_)
logging.vlog(2, "true_mean:\n{} ".format(true_mean))
logging.vlog(2, "sample_mean:\n{}".format(sample_mean_))
logging.vlog(2, "analytical_mean:\n{}".format(analytical_mean_))
logging.vlog(2, "true_covariance:\n{}".format(true_covariance))
logging.vlog(2, "sample_covariance:\n{}".format(sample_covariance_))
logging.vlog(
2, "analytical_covariance:\n{}".format(analytical_covariance_))
logging.vlog(2, "true_variance:\n{}".format(true_variance))
logging.vlog(2, "sample_variance:\n{}".format(sample_variance_))
logging.vlog(2, "analytical_variance:\n{}".format(analytical_variance_))
logging.vlog(2, "true_stddev:\n{}".format(true_stddev))
logging.vlog(2, "sample_stddev:\n{}".format(sample_stddev_))
logging.vlog(2, "analytical_stddev:\n{}".format(analytical_stddev_))
logging.vlog(2, "true_scale:\n{}".format(true_scale))
logging.vlog(2, "scale:\n{}".format(scale_))
logging.vlog(2, "kl_chol: analytical:{} sample:{}".format(
analytical_kl_chol_, sample_kl_chol_))
self.assertAllClose(true_mean, sample_mean_,
atol=0., rtol=0.03)
self.assertAllClose(true_mean, analytical_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(true_covariance, sample_covariance_,
atol=0., rtol=0.03)
self.assertAllClose(true_covariance, analytical_covariance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_variance, sample_variance_,
atol=0., rtol=0.02)
self.assertAllClose(true_variance, analytical_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_stddev, sample_stddev_,
atol=0., rtol=0.01)
self.assertAllClose(true_stddev, analytical_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(true_scale, scale_,
atol=0., rtol=1e-6)
self.assertAllClose(sample_kl_chol_, analytical_kl_chol_,
atol=0., rtol=0.02)
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):
"""Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b)."""
# Check using numpy operations
# This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.
# So it is important to also check that KL(mvn, mvn) = 0.
sigma_b_inv = np.linalg.inv(sigma_b)
t = np.trace(sigma_b_inv.dot(sigma_a))
q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)
k = mu_a.shape[0]
l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))
return 0.5 * (t + q - k + l)
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Marcos Cardoso
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import six
from elasticsearch import Elasticsearch
from elasticsearch.client.utils import query_params
from elasticsearch.exceptions import NotFoundError
from .utilities import get_random_id
# noinspection PyShadowingBuiltins
class FakeElasticsearch(Elasticsearch):
__documents_dict = None
def __init__(self):
self.__documents_dict = {}
@query_params()
def ping(self, params=None):
return True
@query_params()
def info(self, params=None):
return {
'status': 200,
'cluster_name': 'elasticmock',
'version':
{
'lucene_version': '4.10.4',
'build_hash': '00f95f4ffca6de89d68b7ccaf80d148f1f70e4d4',
'number': '1.7.5',
'build_timestamp': '2016-02-02T09:55:30Z',
'build_snapshot': False
},
'name': 'Nightwatch',
'tagline': 'You Know, for Search'
}
@query_params('consistency', 'op_type', 'parent', 'refresh', 'replication',
'routing', 'timeout', 'timestamp', 'ttl', 'version', 'version_type')
def index(self, index, doc_type, body, id=None, params=None):
if index not in self.__documents_dict:
self.__documents_dict[index] = list()
if id is None:
id = get_random_id()
version = 1
self.__documents_dict[index].append({
'_type': doc_type,
'_id': id,
'_source': body,
'_index': index,
'_version': version
})
return {
'_type': doc_type,
'_id': id,
'created': True,
'_version': version,
'_index': index
}
@query_params('parent', 'preference', 'realtime', 'refresh', 'routing')
def exists(self, index, doc_type, id, params=None):
result = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get('_id') == id and document.get('_type') == doc_type:
result = True
break
return result
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'parent', 'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get(self, index, id, doc_type='_all', params=None):
result = None
if index in self.__documents_dict:
result = self.find_document(doc_type, id, index, result)
if result:
result['found'] = True
else:
error_data = {
'_index': index,
'_type': doc_type,
'_id': id,
'found': False
}
raise NotFoundError(404, json.dumps(error_data))
return result
def find_document(self, doc_type, id, index, result):
for document in self.__documents_dict[index]:
if document.get('_id') == id:
if doc_type == '_all':
result = document
break
elif document.get('_type') == doc_type:
result = document
break
return result
@query_params('_source', '_source_exclude', '_source_include', 'parent',
'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get_source(self, index, doc_type, id, params=None):
document = self.get(index=index, doc_type=doc_type, id=id, params=params)
return document.get('_source')
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def count(self, index=None, doc_type=None, body=None, params=None):
searchable_indexes = self._normalize_index_to_list(index)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
i = 0
for searchable_index in searchable_indexes:
for document in self.__documents_dict[searchable_index]:
if searchable_doc_types\
and document.get('_type') not in searchable_doc_types:
continue
i += 1
result = {
'count': i,
'_shards': {
'successful': 1,
'failed': 0,
'total': 1
}
}
return result
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def search(self, index=None, doc_type=None, body=None, params=None):
searchable_indexes = self._normalize_index_to_list(index)
matches = self._find_match(index, doc_type, body)
result = {
'hits': {
'total': len(matches),
'max_score': 1.0
},
'_shards': {
# Simulate indexes with 1 shard each
'successful': len(searchable_indexes),
'failed': 0,
'total': len(searchable_indexes)
},
'took': 1,
'timed_out': False
}
hits = []
for match in matches:
match['_score'] = 1.0
hits.append(match)
result['hits']['hits'] = hits
return result
@query_params('consistency', 'parent', 'refresh', 'replication', 'routing',
'timeout', 'version', 'version_type')
def delete(self, index, doc_type, id, params=None):
found = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get('_type') == doc_type and document.get('_id') == id:
found = True
self.__documents_dict[index].remove(document)
break
result_dict = {
'found': found,
'_index': index,
'_type': doc_type,
'_id': id,
'_version': 1,
}
if found:
return result_dict
else:
raise NotFoundError(404, json.dumps(result_dict))
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'preference', 'routing')
def suggest(self, body, index=None, params=None):
if index is not None and index not in self.__documents_dict:
raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(index))
result_dict = {}
for key, value in body.items():
text = value.get('text')
suggestion = int(text) + 1 if isinstance(text, int) \
else '{0}_suggestion'.format(text)
result_dict[key] = [
{
'text': text,
'length': 1,
'options': [
{
'text': suggestion,
'freq': 1,
'score': 1.0
}
],
'offset': 0
}
]
return result_dict
def _find_match(self, index, doc_type, body):
searchable_indexes = self._normalize_index_to_list(index)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
must = body['query']['bool']['must'][0] # only support one must
matches = []
for searchable_index in searchable_indexes:
self.find_document_in_searchable_index(matches, must, searchable_doc_types, searchable_index)
return matches
def find_document_in_searchable_index(self, matches, must, searchable_doc_types, searchable_index):
for document in self.__documents_dict[searchable_index]:
if searchable_doc_types and document.get('_type') not in searchable_doc_types:
continue
if 'match_phrase' in must:
self.match_must_phrase(document, matches, must)
else:
matches.append(document)
@staticmethod
def match_must_phrase(document, matches, must):
for query_id in must['match_phrase']:
query_val = must['match_phrase'][query_id]
if query_id in document['_source']:
if query_val in document['_source'][query_id]:
# use in as a proxy for match_phrase
matches.append(document)
def _normalize_index_to_list(self, index):
# Ensure to have a list of index
if index is None:
searchable_indexes = self.__documents_dict.keys()
elif isinstance(index, six.string_types):
searchable_indexes = [index]
elif isinstance(index, list):
searchable_indexes = index
else:
# Is it the correct exception to use ?
raise ValueError("Invalid param 'index'")
# Check index(es) exists
for searchable_index in searchable_indexes:
if searchable_index not in self.__documents_dict:
raise NotFoundError(404,
'IndexMissingException[[{0}] missing]'
.format(searchable_index))
return searchable_indexes
@staticmethod
def _normalize_doc_type_to_list(doc_type):
# Ensure to have a list of index
if doc_type is None:
searchable_doc_types = []
elif isinstance(doc_type, six.string_types):
searchable_doc_types = [doc_type]
elif isinstance(doc_type, list):
searchable_doc_types = doc_type
else:
# Is it the correct exception to use ?
raise ValueError("Invalid param 'index'")
return searchable_doc_types
|
|
from __future__ import annotations
import numbers
from typing import (
TYPE_CHECKING,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._typing import (
ArrayLike,
AstypeArg,
Dtype,
DtypeObj,
npt,
type_t,
)
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_bool_dtype,
is_float,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
ExtensionDtype,
register_extension_dtype,
)
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.masked import (
BaseMaskedArray,
BaseMaskedDtype,
)
if TYPE_CHECKING:
import pyarrow
@register_extension_dtype
class BooleanDtype(BaseMaskedDtype):
"""
Extension dtype for boolean data.
.. versionadded:: 1.0.0
.. warning::
BooleanDtype is considered experimental. The implementation and
parts of the API may change without warning.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> pd.BooleanDtype()
BooleanDtype
"""
name = "boolean"
# https://github.com/python/mypy/issues/4125
# error: Signature of "type" incompatible with supertype "BaseMaskedDtype"
@property
def type(self) -> type: # type: ignore[override]
return np.bool_
@property
def kind(self) -> str:
return "b"
@property
def numpy_dtype(self) -> np.dtype:
return np.dtype("bool")
@classmethod
def construct_array_type(cls) -> type_t[BooleanArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return BooleanArray
def __repr__(self) -> str:
return "BooleanDtype"
@property
def _is_boolean(self) -> bool:
return True
@property
def _is_numeric(self) -> bool:
return True
def __from_arrow__(
self, array: pyarrow.Array | pyarrow.ChunkedArray
) -> BooleanArray:
"""
Construct BooleanArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
if array.type != pyarrow.bool_():
raise TypeError(f"Expected array of boolean type, got {array.type} instead")
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
buflist = arr.buffers()
data = pyarrow.BooleanArray.from_buffers(
arr.type, len(arr), [None, buflist[1]], offset=arr.offset
).to_numpy(zero_copy_only=False)
if arr.null_count != 0:
mask = pyarrow.BooleanArray.from_buffers(
arr.type, len(arr), [None, buflist[0]], offset=arr.offset
).to_numpy(zero_copy_only=False)
mask = ~mask
else:
mask = np.zeros(len(arr), dtype=bool)
bool_arr = BooleanArray(data, mask)
results.append(bool_arr)
if not results:
return BooleanArray(
np.array([], dtype=np.bool_), np.array([], dtype=np.bool_)
)
else:
return BooleanArray._concat_same_type(results)
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
# Handle only boolean + np.bool_ -> boolean, since other cases like
# Int64 + boolean -> Int64 will be handled by the other type
if all(
isinstance(t, BooleanDtype)
or (isinstance(t, np.dtype) and (np.issubdtype(t, np.bool_)))
for t in dtypes
):
return BooleanDtype()
else:
return None
def coerce_to_array(
values, mask=None, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
if isinstance(values, BooleanArray):
if mask is not None:
raise ValueError("cannot pass mask for BooleanArray input")
values, mask = values._data, values._mask
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
mask_values = None
if isinstance(values, np.ndarray) and values.dtype == np.bool_:
if copy:
values = values.copy()
elif isinstance(values, np.ndarray) and is_numeric_dtype(values.dtype):
mask_values = isna(values)
values_bool = np.zeros(len(values), dtype=bool)
values_bool[~mask_values] = values[~mask_values].astype(bool)
if not np.all(
values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
):
raise TypeError("Need to pass bool-like values")
values = values_bool
else:
values_object = np.asarray(values, dtype=object)
inferred_dtype = lib.infer_dtype(values_object, skipna=True)
integer_like = ("floating", "integer", "mixed-integer-float")
if inferred_dtype not in ("boolean", "empty") + integer_like:
raise TypeError("Need to pass bool-like values")
mask_values = isna(values_object)
values = np.zeros(len(values), dtype=bool)
values[~mask_values] = values_object[~mask_values].astype(bool)
# if the values were integer-like, validate it were actually 0/1's
if (inferred_dtype in integer_like) and not (
np.all(
values[~mask_values].astype(float)
== values_object[~mask_values].astype(float)
)
):
raise TypeError("Need to pass bool-like values")
if mask is None and mask_values is None:
mask = np.zeros(len(values), dtype=bool)
elif mask is None:
mask = mask_values
else:
if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
if mask_values is not None:
mask = mask | mask_values
else:
if copy:
mask = mask.copy()
else:
mask = np.array(mask, dtype=bool)
if mask_values is not None:
mask = mask | mask_values
if values.shape != mask.shape:
raise ValueError("values.shape and mask.shape must match")
return values, mask
class BooleanArray(BaseMaskedArray):
"""
Array of boolean (True/False) data with missing values.
This is a pandas Extension array for boolean data, under the hood
represented by 2 numpy arrays: a boolean array with the data and
a boolean array with the mask (True indicating missing).
BooleanArray implements Kleene logic (sometimes called three-value
logic) for logical operations. See :ref:`boolean.kleene` for more.
To construct an BooleanArray from generic array-like input, use
:func:`pandas.array` specifying ``dtype="boolean"`` (see examples
below).
.. versionadded:: 1.0.0
.. warning::
BooleanArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : numpy.ndarray
A 1-d boolean-dtype array with the data.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values (True
indicates missing).
copy : bool, default False
Whether to copy the `values` and `mask` arrays.
Attributes
----------
None
Methods
-------
None
Returns
-------
BooleanArray
Examples
--------
Create an BooleanArray with :func:`pandas.array`:
>>> pd.array([True, False, None], dtype="boolean")
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
# Fill values used for any/all
_truthy_value = True
_falsey_value = False
_TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
_FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
raise TypeError(
"values should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
self._dtype = BooleanDtype()
super().__init__(values, mask, copy=copy)
@property
def dtype(self) -> BooleanDtype:
return self._dtype
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> BooleanArray:
if dtype:
assert dtype == "boolean"
values, mask = coerce_to_array(scalars, copy=copy)
return BooleanArray(values, mask)
@classmethod
def _from_sequence_of_strings(
cls,
strings: list[str],
*,
dtype: Dtype | None = None,
copy: bool = False,
true_values: list[str] | None = None,
false_values: list[str] | None = None,
) -> BooleanArray:
true_values_union = cls._TRUE_VALUES.union(true_values or [])
false_values_union = cls._FALSE_VALUES.union(false_values or [])
def map_string(s):
if isna(s):
return s
elif s in true_values_union:
return True
elif s in false_values_union:
return False
else:
raise ValueError(f"{s} cannot be cast to bool")
scalars = [map_string(x) for x in strings]
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
def _coerce_to_array(self, value) -> tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value)
@overload
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
...
@overload
def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
...
@overload
def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
...
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an BooleanDtype, equivalent of same_kind
casting
"""
dtype = pandas_dtype(dtype)
if isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy)
if is_bool_dtype(dtype):
# astype_nansafe converts np.nan to True
if self._hasna:
raise ValueError("cannot convert float NaN to bool")
else:
return self._data.astype(dtype, copy=copy)
# for integer, error if there are missing values
if is_integer_dtype(dtype) and self._hasna:
raise ValueError("cannot convert NA to integer")
# for float dtype, ensure we use np.nan before casting (numpy cannot
# deal with pd.NA)
na_value = self._na_value
if is_float_dtype(dtype):
na_value = np.nan
# coerce
return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort : Return the indices that would sort this array.
"""
data = self._data.copy()
data[self._mask] = -1
return data
def any(self, *, skipna: bool = True, axis: int | None = 0, **kwargs):
"""
Return whether any element is True.
Returns False unless there is at least one element that is True.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be False, as for an empty array.
If `skipna` is False, the result will still be True if there is
at least one element that is True, otherwise NA will be returned
if there are NA's present.
axis : int or None, default 0
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.any : Numpy version of this method.
BooleanArray.all : Return whether all elements are True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, False, True]).any()
True
>>> pd.array([True, False, pd.NA]).any()
True
>>> pd.array([False, False, pd.NA]).any()
False
>>> pd.array([], dtype="boolean").any()
False
>>> pd.array([pd.NA], dtype="boolean").any()
False
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, False, pd.NA]).any(skipna=False)
True
>>> pd.array([False, False, pd.NA]).any(skipna=False)
<NA>
"""
kwargs.pop("axis", None)
nv.validate_any((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, False)
result = values.any(axis=axis)
if skipna:
return result
else:
if result or self.size == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def all(self, *, skipna: bool = True, axis: int | None = 0, **kwargs):
"""
Return whether all elements are True.
Returns True unless there is at least one element that is False.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be True, as for an empty array.
If `skipna` is False, the result will still be False if there is
at least one element that is False, otherwise NA will be returned
if there are NA's present.
axis : int or None, default 0
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.all : Numpy version of this method.
BooleanArray.any : Return whether any element is True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, True, pd.NA]).all()
True
>>> pd.array([True, False, pd.NA]).all()
False
>>> pd.array([], dtype="boolean").all()
True
>>> pd.array([pd.NA], dtype="boolean").all()
True
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, True, pd.NA]).all(skipna=False)
<NA>
>>> pd.array([True, False, pd.NA]).all(skipna=False)
False
"""
kwargs.pop("axis", None)
nv.validate_all((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, True)
result = values.all(axis=axis)
if skipna:
return result
else:
if not result or self.size == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other_is_booleanarray = isinstance(other, BooleanArray)
other_is_scalar = lib.is_scalar(other)
mask = None
if other_is_booleanarray:
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other, dtype="bool")
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
other, mask = coerce_to_array(other, copy=False)
elif isinstance(other, np.bool_):
other = other.item()
if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
raise TypeError(
"'other' should be pandas.NA or a bool. "
f"Got {type(other).__name__} instead."
)
if not other_is_scalar and len(self) != len(other):
raise ValueError("Lengths must match to compare")
if op.__name__ in {"or_", "ror_"}:
result, mask = ops.kleene_or(self._data, other, self._mask, mask)
elif op.__name__ in {"and_", "rand_"}:
result, mask = ops.kleene_and(self._data, other, self._mask, mask)
elif op.__name__ in {"xor", "rxor"}:
result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
# error: Argument 2 to "BooleanArray" has incompatible type "Optional[Any]";
# expected "ndarray"
return BooleanArray(result, mask) # type: ignore[arg-type]
def _cmp_method(self, other, op):
from pandas.arrays import (
FloatingArray,
IntegerArray,
)
if isinstance(other, (IntegerArray, FloatingArray)):
return NotImplemented
mask = None
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
result = np.zeros_like(self._data)
mask = np.ones_like(self._data)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
result = op(self._data, other)
# nans propagate
if mask is None:
mask = self._mask.copy()
else:
mask = self._mask | mask
return BooleanArray(result, mask, copy=False)
def _arith_method(self, other, op):
mask = None
op_name = op.__name__
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match")
# nans propagate
if mask is None:
mask = self._mask
if other is libmissing.NA:
mask |= True
else:
mask = self._mask | mask
if other is libmissing.NA:
# if other is NA, the result will be all NA and we can't run the
# actual op, so we need to choose the resulting dtype manually
if op_name in {"floordiv", "rfloordiv", "mod", "rmod", "pow", "rpow"}:
dtype = "int8"
else:
dtype = "bool"
result = np.zeros(len(self._data), dtype=dtype)
else:
if op_name in {"pow", "rpow"} and isinstance(other, np.bool_):
# Avoid DeprecationWarning: In future, it will be an error
# for 'np.bool_' scalars to be interpreted as an index
other = bool(other)
with np.errstate(all="ignore"):
result = op(self._data, other)
# divmod returns a tuple
if op_name == "divmod":
div, mod = result
return (
self._maybe_mask_result(div, mask, other, "floordiv"),
self._maybe_mask_result(mod, mask, other, "mod"),
)
return self._maybe_mask_result(result, mask, other, op_name)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
if (is_float_dtype(other) or is_float(other)) or (
op_name in ["rtruediv", "truediv"]
):
from pandas.core.arrays import FloatingArray
return FloatingArray(result, mask, copy=False)
elif is_bool_dtype(result):
return BooleanArray(result, mask, copy=False)
elif is_integer_dtype(result):
from pandas.core.arrays import IntegerArray
return IntegerArray(result, mask, copy=False)
else:
result[mask] = np.nan
return result
def __abs__(self):
return self.copy()
|
|
from __future__ import print_function
import filecmp
import glob
import hashlib
from optparse import OptionParser
import os
import shutil
import sys
import numpy as np
sys.path.insert(0, os.path.join(os.pardir, os.pardir))
from input_set import InputSet, MGInputSet
import openmc
class TestHarness(object):
"""General class for running OpenMC regression tests."""
def __init__(self, statepoint_name, tallies_present=False):
self._sp_name = statepoint_name
self._tallies = tallies_present
self.parser = OptionParser()
self.parser.add_option('--exe', dest='exe', default='openmc')
self.parser.add_option('--mpi_exec', dest='mpi_exec', default=None)
self.parser.add_option('--mpi_np', dest='mpi_np', default='2')
self.parser.add_option('--update', dest='update', action='store_true',
default=False)
self._opts = None
self._args = None
def main(self):
"""Accept commandline arguments and either run or update tests."""
(self._opts, self._args) = self.parser.parse_args()
if self._opts.update:
self.update_results()
else:
self.execute_test()
def execute_test(self):
"""Run OpenMC with the appropriate arguments and check the outputs."""
try:
self._run_openmc()
self._test_output_created()
results = self._get_results()
self._write_results(results)
self._compare_results()
finally:
self._cleanup()
def update_results(self):
"""Update the results_true using the current version of OpenMC."""
try:
self._run_openmc()
self._test_output_created()
results = self._get_results()
self._write_results(results)
self._overwrite_results()
finally:
self._cleanup()
def _run_openmc(self):
if self._opts.mpi_exec is not None:
returncode = openmc.run(
openmc_exec=self._opts.exe,
mpi_args=[self._opts.mpi_exec, '-n', self._opts.mpi_np])
else:
returncode = openmc.run(openmc_exec=self._opts.exe)
assert returncode == 0, 'OpenMC did not exit successfully.'
def _test_output_created(self):
"""Make sure statepoint.* and tallies.out have been created."""
statepoint = glob.glob(self._sp_name)
assert len(statepoint) == 1, 'Either multiple or no statepoint files' \
' exist.'
assert statepoint[0].endswith('h5'), \
'Statepoint file is not a HDF5 file.'
if self._tallies:
assert os.path.exists('tallies.out'), \
'Tally output file does not exist.'
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
statepoint = glob.glob(self._sp_name)[0]
sp = openmc.StatePoint(statepoint)
# Write out k-combined.
outstr = 'k-combined:\n'
form = '{0:12.6E} {1:12.6E}\n'
outstr += form.format(sp.k_combined[0], sp.k_combined[1])
# Write out tally data.
if self._tallies:
tally_num = 1
for tally_ind in sp.tallies:
tally = sp.tallies[tally_ind]
results = np.zeros((tally.sum.size * 2, ))
results[0::2] = tally.sum.ravel()
results[1::2] = tally.sum_sq.ravel()
results = ['{0:12.6E}'.format(x) for x in results]
outstr += 'tally ' + str(tally_num) + ':\n'
outstr += '\n'.join(results) + '\n'
tally_num += 1
# Hash the results if necessary.
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def _write_results(self, results_string):
"""Write the results to an ASCII file."""
with open('results_test.dat', 'w') as fh:
fh.write(results_string)
def _overwrite_results(self):
"""Overwrite the results_true with the results_test."""
shutil.copyfile('results_test.dat', 'results_true.dat')
def _compare_results(self):
"""Make sure the current results agree with the _true standard."""
compare = filecmp.cmp('results_test.dat', 'results_true.dat')
if not compare:
os.rename('results_test.dat', 'results_error.dat')
assert compare, 'Results do not agree.'
def _cleanup(self):
"""Delete statepoints, tally, and test files."""
output = glob.glob('statepoint.*.h5')
output += ['tallies.out', 'results_test.dat', 'summary.h5']
output += glob.glob('volume_*.h5')
for f in output:
if os.path.exists(f):
os.remove(f)
class HashedTestHarness(TestHarness):
"""Specialized TestHarness that hashes the results."""
def _get_results(self):
"""Digest info in the statepoint and return as a string."""
return super(HashedTestHarness, self)._get_results(True)
class CMFDTestHarness(TestHarness):
"""Specialized TestHarness for running OpenMC CMFD tests."""
def _get_results(self):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
statepoint = glob.glob(self._sp_name)[0]
sp = openmc.StatePoint(statepoint)
# Write out the eigenvalue and tallies.
outstr = super(CMFDTestHarness, self)._get_results()
# Write out CMFD data.
outstr += 'cmfd indices\n'
outstr += '\n'.join(['{0:12.6E}'.format(x) for x in sp.cmfd_indices])
outstr += '\nk cmfd\n'
outstr += '\n'.join(['{0:12.6E}'.format(x) for x in sp.k_cmfd])
outstr += '\ncmfd entropy\n'
outstr += '\n'.join(['{0:12.6E}'.format(x) for x in sp.cmfd_entropy])
outstr += '\ncmfd balance\n'
outstr += '\n'.join(['{0:12.6E}'.format(x) for x in sp.cmfd_balance])
outstr += '\ncmfd dominance ratio\n'
outstr += '\n'.join(['{0:10.3E}'.format(x) for x in sp.cmfd_dominance])
outstr += '\ncmfd openmc source comparison\n'
outstr += '\n'.join(['{0:12.6E}'.format(x) for x in sp.cmfd_srccmp])
outstr += '\ncmfd source\n'
cmfdsrc = np.reshape(sp.cmfd_src, np.product(sp.cmfd_indices),
order='F')
outstr += '\n'.join(['{0:12.6E}'.format(x) for x in cmfdsrc])
outstr += '\n'
return outstr
class ParticleRestartTestHarness(TestHarness):
"""Specialized TestHarness for running OpenMC particle restart tests."""
def _run_openmc(self):
# Set arguments
args = {'openmc_exec': self._opts.exe}
if self._opts.mpi_exec is not None:
args['mpi_args'] = [self._opts.mpi_exec, '-n', self._opts.mpi_np]
# Initial run
returncode = openmc.run(**args)
assert returncode == 0, 'OpenMC did not exit successfully.'
# Run particle restart
args.update({'restart_file': self._sp_name})
returncode = openmc.run(**args)
assert returncode == 0, 'OpenMC did not exit successfully.'
def _test_output_created(self):
"""Make sure the restart file has been created."""
particle = glob.glob(self._sp_name)
assert len(particle) == 1, 'Either multiple or no particle restart ' \
'files exist.'
assert particle[0].endswith('h5'), \
'Particle restart file is not a HDF5 file.'
def _get_results(self):
"""Digest info in the statepoint and return as a string."""
# Read the particle restart file.
particle = glob.glob(self._sp_name)[0]
p = openmc.Particle(particle)
# Write out the properties.
outstr = ''
outstr += 'current batch:\n'
outstr += "{0:12.6E}\n".format(p.current_batch)
outstr += 'current generation:\n'
outstr += "{0:12.6E}\n".format(p.current_generation)
outstr += 'particle id:\n'
outstr += "{0:12.6E}\n".format(p.id)
outstr += 'run mode:\n'
outstr += "{0}\n".format(p.run_mode)
outstr += 'particle weight:\n'
outstr += "{0:12.6E}\n".format(p.weight)
outstr += 'particle energy:\n'
outstr += "{0:12.6E}\n".format(p.energy)
outstr += 'particle xyz:\n'
outstr += "{0:12.6E} {1:12.6E} {2:12.6E}\n".format(p.xyz[0], p.xyz[1],
p.xyz[2])
outstr += 'particle uvw:\n'
outstr += "{0:12.6E} {1:12.6E} {2:12.6E}\n".format(p.uvw[0], p.uvw[1],
p.uvw[2])
return outstr
class PyAPITestHarness(TestHarness):
def __init__(self, statepoint_name, tallies_present=False, mg=False):
super(PyAPITestHarness, self).__init__(statepoint_name,
tallies_present)
self.parser.add_option('--build-inputs', dest='build_only',
action='store_true', default=False)
if mg:
self._input_set = MGInputSet()
else:
self._input_set = InputSet()
def main(self):
"""Accept commandline arguments and either run or update tests."""
(self._opts, self._args) = self.parser.parse_args()
if self._opts.build_only:
self._build_inputs()
elif self._opts.update:
self.update_results()
else:
self.execute_test()
def execute_test(self):
"""Build input XMLs, run OpenMC, and verify correct results."""
try:
self._build_inputs()
inputs = self._get_inputs()
self._write_inputs(inputs)
self._compare_inputs()
self._run_openmc()
self._test_output_created()
results = self._get_results()
self._write_results(results)
self._compare_results()
finally:
self._cleanup()
def update_results(self):
"""Update results_true.dat and inputs_true.dat"""
try:
self._build_inputs()
inputs = self._get_inputs()
self._write_inputs(inputs)
self._overwrite_inputs()
self._run_openmc()
self._test_output_created()
results = self._get_results()
self._write_results(results)
self._overwrite_results()
finally:
self._cleanup()
def _build_inputs(self):
"""Write input XML files."""
self._input_set.build_default_materials_and_geometry()
self._input_set.build_default_settings()
self._input_set.export()
def _get_inputs(self):
"""Return a hash digest of the input XML files."""
xmls = ['geometry.xml', 'materials.xml', 'settings.xml',
'tallies.xml', 'plots.xml']
return ''.join([open(fname).read() for fname in xmls
if os.path.exists(fname)])
def _write_inputs(self, input_digest):
"""Write the digest of the input XMLs to an ASCII file."""
with open('inputs_test.dat', 'w') as fh:
fh.write(input_digest)
def _overwrite_inputs(self):
"""Overwrite inputs_true.dat with inputs_test.dat"""
shutil.copyfile('inputs_test.dat', 'inputs_true.dat')
def _compare_inputs(self):
"""Make sure the current inputs agree with the _true standard."""
compare = filecmp.cmp('inputs_test.dat', 'inputs_true.dat')
if not compare:
f = open('inputs_test.dat')
for line in f.readlines():
print(line)
f.close()
os.rename('inputs_test.dat', 'inputs_error.dat')
assert compare, 'Input files are broken.'
def _cleanup(self):
"""Delete XMLs, statepoints, tally, and test files."""
super(PyAPITestHarness, self)._cleanup()
output = ['materials.xml', 'geometry.xml', 'settings.xml',
'tallies.xml', 'inputs_test.dat']
for f in output:
if os.path.exists(f):
os.remove(f)
class HashedPyAPITestHarness(PyAPITestHarness):
def _get_results(self):
"""Digest info in the statepoint and return as a string."""
return super(HashedPyAPITestHarness, self)._get_results(True)
|
|
#!/usr/bin/env python
"""This plugin adds artifact functionality to the UI."""
import itertools
import StringIO
from grr.gui import renderers
from grr.gui.plugins import fileview
from grr.gui.plugins import forms
from grr.gui.plugins import semantic
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_lib
from grr.lib import parsers
from grr.lib import rdfvalue
class ArtifactListRenderer(forms.MultiSelectListRenderer):
"""Renderer for listing the available Artifacts."""
type = rdfvalue.ArtifactName
artifact_template = ("""
<div id='{{unique|escape}}_artifact_description'>
<h4><div name='artifact_name'/></h4>
<div name='artifact_description'/>
<table>
<tr><td>Labels<td><div name='artifact_labels'/></tr>
<tr><td>Platforms<td><div name='artifact_supported_os'/></tr>
<tr><td>Conditions<td><div name='artifact_conditions'/></tr>
<tr><td>Dependencies<td><div name='artifact_dependencies'/></tr>
<tr><td>Links<td><div name='artifact_links'/></tr>
<tr><td>Output Type<td><div name='artifact_output_type'/></tr>
</table>
<h5>Artifact Collectors</h5>
<table name='artifact_collectors'>
<tbody></tbody>
</table>
<h5>Artifact Processors</h5>
<table name='artifact_processors'>
<tbody></tbody>
</table>
</div>""")
layout_template = (
"""<div class="control-group">"""
+ forms.TypeDescriptorFormRenderer.default_description_view + """
<div id='{{unique|escape}}_artifact_renderer' class="controls">
<div>
<table class='artifact_table'>
<tr>
<td>
<input id='{{unique|escape}}_search'
placeholder="Search"></input><br>
<select id='{{unique|escape}}_os_filter'
placeholder="OS Filter"></input>
<td>
<td>
</tr>
<tr>
<td class="artifact_table">
<select id='{{unique|escape}}_artifact_list' class='artifact_list'
multiple />
<td class="artifact_table">
<select id='{{this.prefix|escape}}' class='artifact_list' multiple/>
<td class="artifact_table">"""
+ artifact_template + """
</tr>
<tr>
<td>
<a id='{{unique|escape}}_artifact_add'>Add</a>
<a id='{{unique|escape}}_artifact_add_all'
class='pull-right'>Add all </a>
<td>
<a id='{{unique|escape}}_select_clear'>Clear</a>
<a id='{{unique|escape}}_select_remove'
class='pull-right'>Remove </a>
<td>
</tr>
</table>
</div>
</div>
</div>
""")
def Layout(self, request, response):
"""Get available artifact information for display."""
# Get all artifacts that aren't Bootstrap and aren't the base class.
self.artifacts = {}
artifact.LoadArtifactsFromDatastore(token=request.token)
for arifact_name, artifact_cls in artifact_lib.Artifact.classes.items():
if artifact_cls is not artifact_lib.Artifact.top_level_class:
if set(["Bootstrap"]).isdisjoint(artifact_cls.LABELS):
self.artifacts[arifact_name] = artifact_cls
self.labels = artifact_lib.ARTIFACT_LABELS
# Convert artifacts into a dict usable from javascript.
artifact_dict = {}
for artifact_name, artifact_cls in self.artifacts.items():
if artifact_name == "Artifact":
continue
artifact_dict[artifact_name] = artifact_cls.ToExtendedDict()
processors = []
for processor in parsers.Parser.GetClassesByArtifact(artifact_name):
processors.append({"name": processor.__name__,
"output_types": processor.output_types,
"doc": processor.GetDescription()})
artifact_dict[artifact_name]["processors"] = processors
# Skip the our parent and call the TypeDescriptorFormRenderer direct.
response = renderers.TypeDescriptorFormRenderer.Layout(self, request,
response)
return self.CallJavascript(response, "ArtifactListRenderer.Layout",
prefix=self.prefix,
artifacts=artifact_dict,
supported_os=artifact_lib.SUPPORTED_OS_LIST,
labels=self.labels)
class ArtifactRDFValueRenderer(semantic.RDFValueRenderer):
"""A special renderer for ArtifactRDFValues."""
classname = "Artifact"
layout_template = renderers.Template(
"""
<div id={{unique|escape}}_artifact_description>"""
+ ArtifactListRenderer.artifact_template + """
</div>
<script>
var description_element = "{{unique|escapejs}}_artifact_description";
var artifact_obj = JSON.parse("{{this.artifact_str|escapejs}}");
grr.artifact_view.renderArtifactFromObject(artifact_obj, description_element);
$('div[name=artifact_name]').hide(); // Remove heading to clean up display.
</script>
""")
def Layout(self, request, response):
self.artifact_str = self.proxy.ToPrettyJson()
super(ArtifactRDFValueRenderer, self).Layout(request, response)
class ArtifactRawRDFValueRenderer(semantic.RDFValueRenderer):
"""A renderer for showing JSON format for ArtifactRDFValues."""
classname = "Artifact"
layout_template = renderers.Template(
"<pre>{{this.artifact_str|escape}}</pre>")
def Layout(self, request, response):
self.artifact_str = self.proxy.ToPrettyJson(extended=True)
super(ArtifactRawRDFValueRenderer, self).Layout(request, response)
class ArtifactManagerView(renderers.TableRenderer):
"""Artifact Manager table with toolbar."""
description = "Artifact Manager"
behaviours = frozenset(["Configuration"])
order = 50
toolbar = "ArtifactManagerToolbar"
def __init__(self, **kwargs):
super(ArtifactManagerView, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn("Artifact Name", width="5%"))
self.AddColumn(semantic.RDFValueColumn(
"Artifact Details", width="50%", renderer=ArtifactRDFValueRenderer))
self.AddColumn(semantic.RDFValueColumn(
"Artifact Raw", width="40%", renderer=ArtifactRawRDFValueRenderer))
def BuildTable(self, start_row, end_row, request):
"""Builds table artifacts."""
artifact_urn = rdfvalue.RDFURN("aff4:/artifact_store")
try:
collection = aff4.FACTORY.Open(artifact_urn,
aff4_type="RDFValueCollection",
token=request.token)
except IOError:
return
self.size = len(collection)
row_index = start_row
for value in itertools.islice(collection, start_row, end_row):
self.AddCell(row_index, "Artifact Name", value.name)
self.AddCell(row_index, "Artifact Details", value)
self.AddCell(row_index, "Artifact Raw", value)
row_index += 1
def Layout(self, request, response):
"""Populate the table state with the request."""
if self.toolbar:
tb_cls = renderers.Renderer.classes[self.toolbar]
tb_cls().Layout(request, response)
return super(ArtifactManagerView, self).Layout(request, response)
class ArtifactManagerToolbar(renderers.TemplateRenderer):
"""A navigation enhancing toolbar.
Internal State:
- aff4_path: The path we are viewing now in the table.
"""
post_parameters = ["aff4_path"]
event_queue = "file_select"
layout_template = renderers.Template("""
<ul id="toolbar_{{unique|escape}}" class="breadcrumb">
<li>
<button id='{{unique|escape}}_upload' class="btn"
title="Upload Artifacts as JSON"
data-toggle="modal" data-target="#upload_dialog_{{unique|escape}}">
<img src='/static/images/upload.png' class='toolbar_icon'>
</button>
</li>
</ul>
<div id="upload_dialog_{{unique|escape}}" class="modal hide" tabindex="-1"
role="dialog" aria-hidden="true">
<div class="modal-header">
<button id="upload_artifact_btn_{{unique|escape}}" type="button"
class="close" data-dismiss="modal" aria-hidden="true">
x</button>
<h3>Upload File</h3>
</div>
<div class="modal-body" id="upload_dialog_body_{{unique|escape}}"></div>
<div class="modal-footer">
<button id="upload_artifact_close_btn_{{unique|escape}}" class="btn"
data-dismiss="modal" aria-hidden="true">Close</button>
</div>
</div>
<script>
$("#upload_dialog_{{unique|escapejs}}").on("show", function () {
grr.layout("ArtifactJsonUploadView",
"upload_dialog_body_{{unique|escapejs}}");
});
</script>
""")
class ArtifactJsonUploadView(fileview.UploadView):
"""Renders a binary upload page."""
post_parameters = []
upload_handler = "ArtifactUploadHandler"
storage_path = "aff4:/artifact_store"
class ArtifactUploadHandler(fileview.UploadHandler):
"""Handles upload of a binary config file such as a driver."""
def RenderAjax(self, request, response):
"""Handle the upload via ajax."""
try:
self.uploaded_file = request.FILES.items()[0][1]
content = StringIO.StringIO()
for chunk in self.uploaded_file.chunks():
content.write(chunk)
self.dest_path = artifact.UploadArtifactJsonFile(
content.getvalue(), token=request.token)
return renderers.TemplateRenderer.Layout(self, request, response,
self.success_template)
except (IOError, artifact_lib.ArtifactDefinitionError) as e:
self.error = "Could not write artifact to database %s" % e
return renderers.TemplateRenderer.Layout(self, request, response,
self.error_template)
|
|
import numpy as np
import os
#embedding the position
def pos_embed(x):
if x < -60:
return 0
if x >= -60 and x <= 60:
return x+61
if x > 60:
return 122
#find the index of x in y, if x not in y, return -1
def find_index(x,y):
flag = -1
for i in range(len(y)):
if x != y[i]:
continue
else:
return i
return flag
#reading data
def init():
print 'reading word embedding data...'
vec = []
word2id = {}
f = open('./origin_data/vec.txt')
f.readline()
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
word2id[content[0]] = len(word2id)
content = content[1:]
content = [(float)(i) for i in content]
vec.append(content)
f.close()
word2id['UNK'] = len(word2id)
word2id['BLANK'] = len(word2id)
dim = 50
vec.append(np.random.normal(size=dim,loc=0,scale=0.05))
vec.append(np.random.normal(size=dim,loc=0,scale=0.05))
vec = np.array(vec,dtype=np.float32)
print 'reading relation to id'
relation2id = {}
f = open('./origin_data/relation2id.txt','r')
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
relation2id[content[0]] = int(content[1])
f.close()
#length of sentence is 70
fixlen = 70
#max length of position embedding is 60 (-60~+60)
maxlen = 60
train_sen = {} #{entity pair:[[[label1-sentence 1],[label1-sentence 2]...],[[label2-sentence 1],[label2-sentence 2]...]}
train_ans = {} #{entity pair:[label1,label2,...]} the label is one-hot vector
print 'reading train data...'
f = open('./origin_data/train.txt','r')
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
#get entity name
en1 = content[2]
en2 = content[3]
relation = 0
if content[4] not in relation2id:
relation = relation2id['NA']
else:
relation = relation2id[content[4]]
#put the same entity pair sentences into a dict
tup = (en1,en2)
label_tag = 0
if tup not in train_sen:
train_sen[tup]=[]
train_sen[tup].append([])
y_id = relation
label_tag = 0
label = [0 for i in range(len(relation2id))]
label[y_id] = 1
train_ans[tup] = []
train_ans[tup].append(label)
else:
y_id = relation
label_tag = 0
label = [0 for i in range(len(relation2id))]
label[y_id] = 1
temp = find_index(label,train_ans[tup])
if temp == -1:
train_ans[tup].append(label)
label_tag = len(train_ans[tup])-1
train_sen[tup].append([])
else:
label_tag = temp
sentence = content[5:-1]
en1pos = 0
en2pos = 0
for i in range(len(sentence)):
if sentence[i] == en1:
en1pos = i
if sentence[i] == en2:
en2pos = i
output = []
for i in range(fixlen):
word = word2id['BLANK']
rel_e1 = pos_embed(i - en1pos)
rel_e2 = pos_embed(i - en2pos)
output.append([word,rel_e1,rel_e2])
for i in range(min(fixlen,len(sentence))):
word = 0
if sentence[i] not in word2id:
word = word2id['UNK']
else:
word = word2id[sentence[i]]
output[i][0] = word
train_sen[tup][label_tag].append(output)
print('reading test data ...')
test_sen = {} #{entity pair:[[sentence 1],[sentence 2]...]}
test_ans = {} #{entity pair:[labels,...]} the labels is N-hot vector (N is the number of multi-label)
f = open('./origin_data/test.txt','r')
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
en1 = content[2]
en2 = content[3]
relation = 0
if content[4] not in relation2id:
relation = relation2id['NA']
else:
relation = relation2id[content[4]]
tup = (en1,en2)
if tup not in test_sen:
test_sen[tup]=[]
y_id = relation
label_tag = 0
label = [0 for i in range(len(relation2id))]
label[y_id] = 1
test_ans[tup] = label
else:
y_id = relation
test_ans[tup][y_id] = 1
sentence = content[5:-1]
en1pos = 0
en2pos = 0
for i in range(len(sentence)):
if sentence[i] == en1:
en1pos = i
if sentence[i] == en2:
en2pos = i
output = []
for i in range(fixlen):
word = word2id['BLANK']
rel_e1 = pos_embed(i - en1pos)
rel_e2 = pos_embed(i - en2pos)
output.append([word,rel_e1,rel_e2])
for i in range(min(fixlen,len(sentence))):
word = 0
if sentence[i] not in word2id:
word = word2id['UNK']
else:
word = word2id[sentence[i]]
output[i][0] = word
test_sen[tup].append(output)
train_x = []
train_y = []
test_x = []
test_y = []
print 'organizing train data'
f = open('./data/train_q&a.txt','w')
temp = 0
for i in train_sen:
if len(train_ans[i]) != len(train_sen[i]):
print 'ERROR'
lenth = len(train_ans[i])
for j in range(lenth):
train_x.append(train_sen[i][j])
train_y.append(train_ans[i][j])
f.write(str(temp)+'\t'+i[0]+'\t'+i[1]+'\t'+str(np.argmax(train_ans[i][j]))+'\n')
temp+=1
f.close()
print 'organizing test data'
f = open('./data/test_q&a.txt','w')
temp=0
for i in test_sen:
test_x.append(test_sen[i])
test_y.append(test_ans[i])
tempstr = ''
for j in range(len(test_ans[i])):
if test_ans[i][j]!=0:
tempstr = tempstr+str(j)+'\t'
f.write(str(temp)+'\t'+i[0]+'\t'+i[1]+'\t'+tempstr+'\n')
temp+=1
f.close()
train_x = np.array(train_x)
train_y = np.array(train_y)
test_x = np.array(test_x)
test_y = np.array(test_y)
np.save('./data/vec.npy',vec)
np.save('./data/train_x.npy',train_x)
np.save('./data/train_y.npy',train_y)
np.save('./data/testall_x.npy',test_x)
np.save('./data/testall_y.npy',test_y)
#get test data for P@N evaluation, in which only entity pairs with more than 1 sentence exist
print 'get test data for p@n test'
pone_test_x = []
pone_test_y = []
ptwo_test_x = []
ptwo_test_y = []
pall_test_x = []
pall_test_y = []
for i in range(len(test_x)):
if len(test_x[i]) > 1:
pall_test_x.append(test_x[i])
pall_test_y.append(test_y[i])
onetest = []
temp = np.random.randint(len(test_x[i]))
onetest.append(test_x[i][temp])
pone_test_x.append(onetest)
pone_test_y.append(test_y[i])
twotest = []
temp1 = np.random.randint(len(test_x[i]))
temp2 = np.random.randint(len(test_x[i]))
while temp1 == temp2:
temp2 = np.random.randint(len(test_x[i]))
twotest.append(test_x[i][temp1])
twotest.append(test_x[i][temp2])
ptwo_test_x.append(twotest)
ptwo_test_y.append(test_y[i])
pone_test_x = np.array(pone_test_x)
pone_test_y = np.array(pone_test_y)
ptwo_test_x = np.array(ptwo_test_x)
ptwo_test_y = np.array(ptwo_test_y)
pall_test_x = np.array(pall_test_x)
pall_test_y = np.array(pall_test_y)
np.save('./data/pone_test_x.npy',pone_test_x)
np.save('./data/pone_test_y.npy',pone_test_y)
np.save('./data/ptwo_test_x.npy',ptwo_test_x)
np.save('./data/ptwo_test_y.npy',ptwo_test_y)
np.save('./data/pall_test_x.npy',pall_test_x)
np.save('./data/pall_test_y.npy',pall_test_y)
def seperate():
print 'reading training data'
x_train = np.load('./data/train_x.npy')
train_word = []
train_pos1 = []
train_pos2 = []
print 'seprating train data'
for i in range(len(x_train)):
word = []
pos1 = []
pos2 = []
for j in x_train[i]:
temp_word = []
temp_pos1 = []
temp_pos2 = []
for k in j:
temp_word.append(k[0])
temp_pos1.append(k[1])
temp_pos2.append(k[2])
word.append(temp_word)
pos1.append(temp_pos1)
pos2.append(temp_pos2)
train_word.append(word)
train_pos1.append(pos1)
train_pos2.append(pos2)
train_word = np.array(train_word)
train_pos1 = np.array(train_pos1)
train_pos2 = np.array(train_pos2)
np.save('./data/train_word.npy',train_word)
np.save('./data/train_pos1.npy',train_pos1)
np.save('./data/train_pos2.npy',train_pos2)
print 'reading p-one test data'
x_test = np.load('./data/pone_test_x.npy')
print 'seperating p-one test data'
test_word = []
test_pos1 = []
test_pos2 = []
for i in range(len(x_test)):
word = []
pos1 = []
pos2 = []
for j in x_test[i]:
temp_word = []
temp_pos1 = []
temp_pos2 = []
for k in j:
temp_word.append(k[0])
temp_pos1.append(k[1])
temp_pos2.append(k[2])
word.append(temp_word)
pos1.append(temp_pos1)
pos2.append(temp_pos2)
test_word.append(word)
test_pos1.append(pos1)
test_pos2.append(pos2)
test_word = np.array(test_word)
test_pos1 = np.array(test_pos1)
test_pos2 = np.array(test_pos2)
np.save('./data/pone_test_word.npy',test_word)
np.save('./data/pone_test_pos1.npy',test_pos1)
np.save('./data/pone_test_pos2.npy',test_pos2)
print 'reading p-two test data'
x_test = np.load('./data/ptwo_test_x.npy')
print 'seperating p-two test data'
test_word = []
test_pos1 = []
test_pos2 = []
for i in range(len(x_test)):
word = []
pos1 = []
pos2 = []
for j in x_test[i]:
temp_word = []
temp_pos1 = []
temp_pos2 = []
for k in j:
temp_word.append(k[0])
temp_pos1.append(k[1])
temp_pos2.append(k[2])
word.append(temp_word)
pos1.append(temp_pos1)
pos2.append(temp_pos2)
test_word.append(word)
test_pos1.append(pos1)
test_pos2.append(pos2)
test_word = np.array(test_word)
test_pos1 = np.array(test_pos1)
test_pos2 = np.array(test_pos2)
np.save('./data/ptwo_test_word.npy',test_word)
np.save('./data/ptwo_test_pos1.npy',test_pos1)
np.save('./data/ptwo_test_pos2.npy',test_pos2)
print 'reading p-all test data'
x_test = np.load('./data/pall_test_x.npy')
print 'seperating p-all test data'
test_word = []
test_pos1 = []
test_pos2 = []
for i in range(len(x_test)):
word = []
pos1 = []
pos2 = []
for j in x_test[i]:
temp_word = []
temp_pos1 = []
temp_pos2 = []
for k in j:
temp_word.append(k[0])
temp_pos1.append(k[1])
temp_pos2.append(k[2])
word.append(temp_word)
pos1.append(temp_pos1)
pos2.append(temp_pos2)
test_word.append(word)
test_pos1.append(pos1)
test_pos2.append(pos2)
test_word = np.array(test_word)
test_pos1 = np.array(test_pos1)
test_pos2 = np.array(test_pos2)
np.save('./data/pall_test_word.npy',test_word)
np.save('./data/pall_test_pos1.npy',test_pos1)
np.save('./data/pall_test_pos2.npy',test_pos2)
print 'seperating test all data'
x_test = np.load('./data/testall_x.npy')
test_word = []
test_pos1 = []
test_pos2 = []
for i in range(len(x_test)):
word = []
pos1 = []
pos2 = []
for j in x_test[i]:
temp_word = []
temp_pos1 = []
temp_pos2 = []
for k in j:
temp_word.append(k[0])
temp_pos1.append(k[1])
temp_pos2.append(k[2])
word.append(temp_word)
pos1.append(temp_pos1)
pos2.append(temp_pos2)
test_word.append(word)
test_pos1.append(pos1)
test_pos2.append(pos2)
test_word = np.array(test_word)
test_pos1 = np.array(test_pos1)
test_pos2 = np.array(test_pos2)
np.save('./data/testall_word.npy',test_word)
np.save('./data/testall_pos1.npy',test_pos1)
np.save('./data/testall_pos2.npy',test_pos2)
def getsmall():
print 'reading training data'
word = np.load('./data/train_word.npy')
pos1 = np.load('./data/train_pos1.npy')
pos2 = np.load('./data/train_pos2.npy')
y = np.load('./data/train_y.npy')
new_word = []
new_pos1 = []
new_pos2 = []
new_y = []
#we slice some big batch in train data into small batches in case of running out of memory
print 'get small training data'
for i in range(len(word)):
lenth = len(word[i])
if lenth <= 1000:
new_word.append(word[i])
new_pos1.append(pos1[i])
new_pos2.append(pos2[i])
new_y.append(y[i])
if lenth > 1000 and lenth < 2000:
new_word.append(word[i][:1000])
new_word.append(word[i][1000:])
new_pos1.append(pos1[i][:1000])
new_pos1.append(pos1[i][1000:])
new_pos2.append(pos2[i][:1000])
new_pos2.append(pos2[i][1000:])
new_y.append(y[i])
new_y.append(y[i])
if lenth > 2000 and lenth < 3000:
new_word.append(word[i][:1000])
new_word.append(word[i][1000:2000])
new_word.append(word[i][2000:])
new_pos1.append(pos1[i][:1000])
new_pos1.append(pos1[i][1000:2000])
new_pos1.append(pos1[i][2000:])
new_pos2.append(pos2[i][:1000])
new_pos2.append(pos2[i][1000:2000])
new_pos2.append(pos2[i][2000:])
new_y.append(y[i])
new_y.append(y[i])
new_y.append(y[i])
if lenth > 3000 and lenth < 4000:
new_word.append(word[i][:1000])
new_word.append(word[i][1000:2000])
new_word.append(word[i][2000:3000])
new_word.append(word[i][3000:])
new_pos1.append(pos1[i][:1000])
new_pos1.append(pos1[i][1000:2000])
new_pos1.append(pos1[i][2000:3000])
new_pos1.append(pos1[i][3000:])
new_pos2.append(pos2[i][:1000])
new_pos2.append(pos2[i][1000:2000])
new_pos2.append(pos2[i][2000:3000])
new_pos2.append(pos2[i][3000:])
new_y.append(y[i])
new_y.append(y[i])
new_y.append(y[i])
new_y.append(y[i])
if lenth > 4000:
new_word.append(word[i][:1000])
new_word.append(word[i][1000:2000])
new_word.append(word[i][2000:3000])
new_word.append(word[i][3000:4000])
new_word.append(word[i][4000:])
new_pos1.append(pos1[i][:1000])
new_pos1.append(pos1[i][1000:2000])
new_pos1.append(pos1[i][2000:3000])
new_pos1.append(pos1[i][3000:4000])
new_pos1.append(pos1[i][4000:])
new_pos2.append(pos2[i][:1000])
new_pos2.append(pos2[i][1000:2000])
new_pos2.append(pos2[i][2000:3000])
new_pos2.append(pos2[i][3000:4000])
new_pos2.append(pos2[i][4000:])
new_y.append(y[i])
new_y.append(y[i])
new_y.append(y[i])
new_y.append(y[i])
new_y.append(y[i])
new_word = np.array(new_word)
new_pos1 = np.array(new_pos1)
new_pos2 = np.array(new_pos2)
new_y = np.array(new_y)
np.save('./data/small_word.npy',new_word)
np.save('./data/small_pos1.npy',new_pos1)
np.save('./data/small_pos2.npy',new_pos2)
np.save('./data/small_y.npy',new_y)
#get answer metric for PR curve evaluation
def getans():
test_y = np.load('./data/testall_y.npy')
eval_y = []
for i in test_y:
eval_y.append(i[1:])
allans = np.reshape(eval_y,(-1))
np.save('./data/allans.npy',allans)
def get_metadata():
fwrite = open('./data/metadata.tsv','w')
f = open('./origin_data/vec.txt')
f.readline()
while True:
content = f.readline().strip()
if content == '':
break
name = content.split()[0]
fwrite.write(name+'\n')
f.close()
fwrite.close()
init()
seperate()
getsmall()
getans()
get_metadata()
|
|
from __future__ import unicode_literals
import datetime
import unittest
import warnings
from decimal import Decimal
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.dateparse import parse_date
from django.utils.six.moves import reload_module
from rest_framework import filters, generics, serializers, status
from rest_framework.compat import django_filters, reverse
from rest_framework.test import APIRequestFactory
from .models import BaseFilterableItem, BasicModel, FilterableItem
factory = APIRequestFactory()
if django_filters:
class FilterableItemSerializer(serializers.ModelSerializer):
class Meta:
model = FilterableItem
fields = '__all__'
# Basic filter on a list view.
class FilterFieldsRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_fields = ['decimal', 'date']
filter_backends = (filters.DjangoFilterBackend,)
# These class are used to test a filter class.
class SeveralFieldsFilter(django_filters.FilterSet):
text = django_filters.CharFilter(lookup_expr='icontains')
decimal = django_filters.NumberFilter(lookup_expr='lt')
date = django_filters.DateFilter(lookup_expr='gt')
class Meta:
model = FilterableItem
fields = ['text', 'decimal', 'date']
class FilterClassRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = SeveralFieldsFilter
filter_backends = (filters.DjangoFilterBackend,)
# These classes are used to test a misconfigured filter class.
class MisconfiguredFilter(django_filters.FilterSet):
text = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = BasicModel
fields = ['text']
class IncorrectlyConfiguredRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = MisconfiguredFilter
filter_backends = (filters.DjangoFilterBackend,)
class FilterClassDetailView(generics.RetrieveAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = SeveralFieldsFilter
filter_backends = (filters.DjangoFilterBackend,)
# These classes are used to test base model filter support
class BaseFilterableItemFilter(django_filters.FilterSet):
text = django_filters.CharFilter()
class Meta:
model = BaseFilterableItem
fields = '__all__'
# Test the same filter using the deprecated internal FilterSet class.
class BaseFilterableItemFilterWithProxy(filters.FilterSet):
text = django_filters.CharFilter()
class Meta:
model = BaseFilterableItem
fields = '__all__'
class BaseFilterableItemFilterRootView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_class = BaseFilterableItemFilter
filter_backends = (filters.DjangoFilterBackend,)
class BaseFilterableItemFilterWithProxyRootView(BaseFilterableItemFilterRootView):
filter_class = BaseFilterableItemFilterWithProxy
# Regression test for #814
class FilterFieldsQuerysetView(generics.ListCreateAPIView):
queryset = FilterableItem.objects.all()
serializer_class = FilterableItemSerializer
filter_fields = ['decimal', 'date']
filter_backends = (filters.DjangoFilterBackend,)
class GetQuerysetView(generics.ListCreateAPIView):
serializer_class = FilterableItemSerializer
filter_class = SeveralFieldsFilter
filter_backends = (filters.DjangoFilterBackend,)
def get_queryset(self):
return FilterableItem.objects.all()
urlpatterns = [
url(r'^(?P<pk>\d+)/$', FilterClassDetailView.as_view(), name='detail-view'),
url(r'^$', FilterClassRootView.as_view(), name='root-view'),
url(r'^get-queryset/$', GetQuerysetView.as_view(),
name='get-queryset-view'),
]
class CommonFilteringTestCase(TestCase):
def _serialize_object(self, obj):
return {'id': obj.id, 'text': obj.text, 'decimal': str(obj.decimal), 'date': obj.date.isoformat()}
def setUp(self):
"""
Create 10 FilterableItem instances.
"""
base_data = ('a', Decimal('0.25'), datetime.date(2012, 10, 8))
for i in range(10):
text = chr(i + ord(base_data[0])) * 3 # Produces string 'aaa', 'bbb', etc.
decimal = base_data[1] + i
date = base_data[2] - datetime.timedelta(days=i * 2)
FilterableItem(text=text, decimal=decimal, date=date).save()
self.objects = FilterableItem.objects
self.data = [
self._serialize_object(obj)
for obj in self.objects.all()
]
class IntegrationTestFiltering(CommonFilteringTestCase):
"""
Integration tests for filtered list views.
"""
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_backend_deprecation(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
view = FilterFieldsRootView.as_view()
request = factory.get('/')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
self.assertTrue(issubclass(w[-1].category, PendingDeprecationWarning))
self.assertIn("'rest_framework.filters.DjangoFilterBackend' is pending deprecation.", str(w[-1].message))
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_no_df_deprecation(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
import django_filters.rest_framework
class DFFilterFieldsRootView(FilterFieldsRootView):
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
view = DFFilterFieldsRootView.as_view()
request = factory.get('/')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
assert len(w) == 0
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_get_filtered_fields_root_view(self):
"""
GET requests to paginated ListCreateAPIView should return paginated results.
"""
view = FilterFieldsRootView.as_view()
# Basic test with no filter.
request = factory.get('/')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
# Tests that the decimal filter works.
search_decimal = Decimal('2.25')
request = factory.get('/', {'decimal': '%s' % search_decimal})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if Decimal(f['decimal']) == search_decimal]
assert response.data == expected_data
# Tests that the date filter works.
search_date = datetime.date(2012, 9, 22)
request = factory.get('/', {'date': '%s' % search_date}) # search_date str: '2012-09-22'
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if parse_date(f['date']) == search_date]
assert response.data == expected_data
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_filter_with_queryset(self):
"""
Regression test for #814.
"""
view = FilterFieldsQuerysetView.as_view()
# Tests that the decimal filter works.
search_decimal = Decimal('2.25')
request = factory.get('/', {'decimal': '%s' % search_decimal})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if Decimal(f['decimal']) == search_decimal]
assert response.data == expected_data
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_filter_with_get_queryset_only(self):
"""
Regression test for #834.
"""
view = GetQuerysetView.as_view()
request = factory.get('/get-queryset/')
view(request).render()
# Used to raise "issubclass() arg 2 must be a class or tuple of classes"
# here when neither `model' nor `queryset' was specified.
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_get_filtered_class_root_view(self):
"""
GET requests to filtered ListCreateAPIView that have a filter_class set
should return filtered results.
"""
view = FilterClassRootView.as_view()
# Basic test with no filter.
request = factory.get('/')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
# Tests that the decimal filter set with 'lt' in the filter class works.
search_decimal = Decimal('4.25')
request = factory.get('/', {'decimal': '%s' % search_decimal})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if Decimal(f['decimal']) < search_decimal]
assert response.data == expected_data
# Tests that the date filter set with 'gt' in the filter class works.
search_date = datetime.date(2012, 10, 2)
request = factory.get('/', {'date': '%s' % search_date}) # search_date str: '2012-10-02'
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if parse_date(f['date']) > search_date]
assert response.data == expected_data
# Tests that the text filter set with 'icontains' in the filter class works.
search_text = 'ff'
request = factory.get('/', {'text': '%s' % search_text})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if search_text in f['text'].lower()]
assert response.data == expected_data
# Tests that multiple filters works.
search_decimal = Decimal('5.25')
search_date = datetime.date(2012, 10, 2)
request = factory.get('/', {
'decimal': '%s' % (search_decimal,),
'date': '%s' % (search_date,)
})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
expected_data = [f for f in self.data if parse_date(f['date']) > search_date and
Decimal(f['decimal']) < search_decimal]
assert response.data == expected_data
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_incorrectly_configured_filter(self):
"""
An error should be displayed when the filter class is misconfigured.
"""
view = IncorrectlyConfiguredRootView.as_view()
request = factory.get('/')
self.assertRaises(AssertionError, view, request)
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_base_model_filter(self):
"""
The `get_filter_class` model checks should allow base model filters.
"""
view = BaseFilterableItemFilterRootView.as_view()
request = factory.get('/?text=aaa')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert len(response.data) == 1
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_base_model_filter_with_proxy(self):
"""
The `get_filter_class` model checks should allow base model filters.
"""
view = BaseFilterableItemFilterWithProxyRootView.as_view()
request = factory.get('/?text=aaa')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert len(response.data) == 1
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_unknown_filter(self):
"""
GET requests with filters that aren't configured should return 200.
"""
view = FilterFieldsRootView.as_view()
search_integer = 10
request = factory.get('/', {'integer': '%s' % search_integer})
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
@override_settings(ROOT_URLCONF='tests.test_filters')
class IntegrationTestDetailFiltering(CommonFilteringTestCase):
"""
Integration tests for filtered detail views.
"""
def _get_url(self, item):
return reverse('detail-view', kwargs=dict(pk=item.pk))
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_get_filtered_detail_view(self):
"""
GET requests to filtered RetrieveAPIView that have a filter_class set
should return filtered results.
"""
item = self.objects.all()[0]
data = self._serialize_object(item)
# Basic test with no filter.
response = self.client.get(self._get_url(item))
assert response.status_code == status.HTTP_200_OK
assert response.data == data
# Tests that the decimal filter set that should fail.
search_decimal = Decimal('4.25')
high_item = self.objects.filter(decimal__gt=search_decimal)[0]
response = self.client.get(
'{url}'.format(url=self._get_url(high_item)),
{'decimal': '{param}'.format(param=search_decimal)})
assert response.status_code == status.HTTP_404_NOT_FOUND
# Tests that the decimal filter set that should succeed.
search_decimal = Decimal('4.25')
low_item = self.objects.filter(decimal__lt=search_decimal)[0]
low_item_data = self._serialize_object(low_item)
response = self.client.get(
'{url}'.format(url=self._get_url(low_item)),
{'decimal': '{param}'.format(param=search_decimal)})
assert response.status_code == status.HTTP_200_OK
assert response.data == low_item_data
# Tests that multiple filters works.
search_decimal = Decimal('5.25')
search_date = datetime.date(2012, 10, 2)
valid_item = self.objects.filter(decimal__lt=search_decimal, date__gt=search_date)[0]
valid_item_data = self._serialize_object(valid_item)
response = self.client.get(
'{url}'.format(url=self._get_url(valid_item)), {
'decimal': '{decimal}'.format(decimal=search_decimal),
'date': '{date}'.format(date=search_date)
})
assert response.status_code == status.HTTP_200_OK
assert response.data == valid_item_data
class SearchFilterModel(models.Model):
title = models.CharField(max_length=20)
text = models.CharField(max_length=100)
class SearchFilterSerializer(serializers.ModelSerializer):
class Meta:
model = SearchFilterModel
fields = '__all__'
class SearchFilterTests(TestCase):
def setUp(self):
# Sequence of title/text is:
#
# z abc
# zz bcd
# zzz cde
# ...
for idx in range(10):
title = 'z' * (idx + 1)
text = (
chr(idx + ord('a')) +
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
SearchFilterModel(title=title, text=text).save()
def test_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('title', 'text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'b'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'z', 'text': 'abc'},
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
def test_exact_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('=title', 'text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'zzz'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'zzz', 'text': 'cde'}
]
def test_startswith_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('title', '^text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'b'})
response = view(request)
assert response.data == [
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
def test_regexp_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('$title', '$text')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'z{2} ^b'})
response = view(request)
assert response.data == [
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
def test_search_with_nonstandard_search_param(self):
with override_settings(REST_FRAMEWORK={'SEARCH_PARAM': 'query'}):
reload_module(filters)
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModel.objects.all()
serializer_class = SearchFilterSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('title', 'text')
view = SearchListView.as_view()
request = factory.get('/', {'query': 'b'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'z', 'text': 'abc'},
{'id': 2, 'title': 'zz', 'text': 'bcd'}
]
reload_module(filters)
class AttributeModel(models.Model):
label = models.CharField(max_length=32)
class SearchFilterModelFk(models.Model):
title = models.CharField(max_length=20)
attribute = models.ForeignKey(AttributeModel, on_delete=models.CASCADE)
class SearchFilterFkSerializer(serializers.ModelSerializer):
class Meta:
model = SearchFilterModelFk
fields = '__all__'
class SearchFilterFkTests(TestCase):
def test_must_call_distinct(self):
filter_ = filters.SearchFilter()
prefixes = [''] + list(filter_.lookup_prefixes)
for prefix in prefixes:
assert not filter_.must_call_distinct(
SearchFilterModelFk._meta,
["%stitle" % prefix]
)
assert not filter_.must_call_distinct(
SearchFilterModelFk._meta,
["%stitle" % prefix, "%sattribute__label" % prefix]
)
def test_must_call_distinct_restores_meta_for_each_field(self):
# In this test case the attribute of the fk model comes first in the
# list of search fields.
filter_ = filters.SearchFilter()
prefixes = [''] + list(filter_.lookup_prefixes)
for prefix in prefixes:
assert not filter_.must_call_distinct(
SearchFilterModelFk._meta,
["%sattribute__label" % prefix, "%stitle" % prefix]
)
class SearchFilterModelM2M(models.Model):
title = models.CharField(max_length=20)
text = models.CharField(max_length=100)
attributes = models.ManyToManyField(AttributeModel)
class SearchFilterM2MSerializer(serializers.ModelSerializer):
class Meta:
model = SearchFilterModelM2M
fields = '__all__'
class SearchFilterM2MTests(TestCase):
def setUp(self):
# Sequence of title/text/attributes is:
#
# z abc [1, 2, 3]
# zz bcd [1, 2, 3]
# zzz cde [1, 2, 3]
# ...
for idx in range(3):
label = 'w' * (idx + 1)
AttributeModel(label=label)
for idx in range(10):
title = 'z' * (idx + 1)
text = (
chr(idx + ord('a')) +
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
SearchFilterModelM2M(title=title, text=text).save()
SearchFilterModelM2M.objects.get(title='zz').attributes.add(1, 2, 3)
def test_m2m_search(self):
class SearchListView(generics.ListAPIView):
queryset = SearchFilterModelM2M.objects.all()
serializer_class = SearchFilterM2MSerializer
filter_backends = (filters.SearchFilter,)
search_fields = ('=title', 'text', 'attributes__label')
view = SearchListView.as_view()
request = factory.get('/', {'search': 'zz'})
response = view(request)
assert len(response.data) == 1
def test_must_call_distinct(self):
filter_ = filters.SearchFilter()
prefixes = [''] + list(filter_.lookup_prefixes)
for prefix in prefixes:
assert not filter_.must_call_distinct(
SearchFilterModelM2M._meta,
["%stitle" % prefix]
)
assert filter_.must_call_distinct(
SearchFilterModelM2M._meta,
["%stitle" % prefix, "%sattributes__label" % prefix]
)
class OrderingFilterModel(models.Model):
title = models.CharField(max_length=20, verbose_name='verbose title')
text = models.CharField(max_length=100)
class OrderingFilterRelatedModel(models.Model):
related_object = models.ForeignKey(OrderingFilterModel, related_name="relateds", on_delete=models.CASCADE)
class OrderingFilterSerializer(serializers.ModelSerializer):
class Meta:
model = OrderingFilterModel
fields = '__all__'
class DjangoFilterOrderingModel(models.Model):
date = models.DateField()
text = models.CharField(max_length=10)
class Meta:
ordering = ['-date']
class DjangoFilterOrderingSerializer(serializers.ModelSerializer):
class Meta:
model = DjangoFilterOrderingModel
fields = '__all__'
class DjangoFilterOrderingTests(TestCase):
def setUp(self):
data = [{
'date': datetime.date(2012, 10, 8),
'text': 'abc'
}, {
'date': datetime.date(2013, 10, 8),
'text': 'bcd'
}, {
'date': datetime.date(2014, 10, 8),
'text': 'cde'
}]
for d in data:
DjangoFilterOrderingModel.objects.create(**d)
@unittest.skipUnless(django_filters, 'django-filter not installed')
def test_default_ordering(self):
class DjangoFilterOrderingView(generics.ListAPIView):
serializer_class = DjangoFilterOrderingSerializer
queryset = DjangoFilterOrderingModel.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ['text']
ordering = ('-date',)
view = DjangoFilterOrderingView.as_view()
request = factory.get('/')
response = view(request)
assert response.data == [
{'id': 3, 'date': '2014-10-08', 'text': 'cde'},
{'id': 2, 'date': '2013-10-08', 'text': 'bcd'},
{'id': 1, 'date': '2012-10-08', 'text': 'abc'}
]
class OrderingFilterTests(TestCase):
def setUp(self):
# Sequence of title/text is:
#
# zyx abc
# yxw bcd
# xwv cde
for idx in range(3):
title = (
chr(ord('z') - idx) +
chr(ord('y') - idx) +
chr(ord('x') - idx)
)
text = (
chr(idx + ord('a')) +
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
OrderingFilterModel(title=title, text=text).save()
def test_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'text'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
def test_reverse_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': '-text'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_incorrectfield_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'foobar'})
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_default_ordering(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('')
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_default_ordering_using_string(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = 'title'
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('')
response = view(request)
assert response.data == [
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 1, 'title': 'zyx', 'text': 'abc'},
]
def test_ordering_by_aggregate_field(self):
# create some related models to aggregate order by
num_objs = [2, 5, 3]
for obj, num_relateds in zip(OrderingFilterModel.objects.all(),
num_objs):
for _ in range(num_relateds):
new_related = OrderingFilterRelatedModel(
related_object=obj
)
new_related.save()
class OrderingListView(generics.ListAPIView):
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = 'title'
ordering_fields = '__all__'
queryset = OrderingFilterModel.objects.all().annotate(
models.Count("relateds"))
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'relateds__count'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
]
def test_ordering_with_nonstandard_ordering_param(self):
with override_settings(REST_FRAMEWORK={'ORDERING_PARAM': 'order'}):
reload_module(filters)
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
serializer_class = OrderingFilterSerializer
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
view = OrderingListView.as_view()
request = factory.get('/', {'order': 'text'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
reload_module(filters)
def test_get_template_context(self):
class OrderingListView(generics.ListAPIView):
ordering_fields = '__all__'
serializer_class = OrderingFilterSerializer
queryset = OrderingFilterModel.objects.all()
filter_backends = (filters.OrderingFilter,)
request = factory.get('/', {'ordering': 'title'}, HTTP_ACCEPT='text/html')
view = OrderingListView.as_view()
response = view(request)
self.assertContains(response, 'verbose title')
def test_ordering_with_overridden_get_serializer_class(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
# note: no ordering_fields and serializer_class specified
def get_serializer_class(self):
return OrderingFilterSerializer
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'text'})
response = view(request)
assert response.data == [
{'id': 1, 'title': 'zyx', 'text': 'abc'},
{'id': 2, 'title': 'yxw', 'text': 'bcd'},
{'id': 3, 'title': 'xwv', 'text': 'cde'},
]
def test_ordering_with_improper_configuration(self):
class OrderingListView(generics.ListAPIView):
queryset = OrderingFilterModel.objects.all()
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
# note: no ordering_fields and serializer_class
# or get_serializer_class specified
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'text'})
with self.assertRaises(ImproperlyConfigured):
view(request)
class SensitiveOrderingFilterModel(models.Model):
username = models.CharField(max_length=20)
password = models.CharField(max_length=100)
# Three different styles of serializer.
# All should allow ordering by username, but not by password.
class SensitiveDataSerializer1(serializers.ModelSerializer):
username = serializers.CharField()
class Meta:
model = SensitiveOrderingFilterModel
fields = ('id', 'username')
class SensitiveDataSerializer2(serializers.ModelSerializer):
username = serializers.CharField()
password = serializers.CharField(write_only=True)
class Meta:
model = SensitiveOrderingFilterModel
fields = ('id', 'username', 'password')
class SensitiveDataSerializer3(serializers.ModelSerializer):
user = serializers.CharField(source='username')
class Meta:
model = SensitiveOrderingFilterModel
fields = ('id', 'user')
class SensitiveOrderingFilterTests(TestCase):
def setUp(self):
for idx in range(3):
username = {0: 'userA', 1: 'userB', 2: 'userC'}[idx]
password = {0: 'passA', 1: 'passC', 2: 'passB'}[idx]
SensitiveOrderingFilterModel(username=username, password=password).save()
def test_order_by_serializer_fields(self):
for serializer_cls in [
SensitiveDataSerializer1,
SensitiveDataSerializer2,
SensitiveDataSerializer3
]:
class OrderingListView(generics.ListAPIView):
queryset = SensitiveOrderingFilterModel.objects.all().order_by('username')
filter_backends = (filters.OrderingFilter,)
serializer_class = serializer_cls
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': '-username'})
response = view(request)
if serializer_cls == SensitiveDataSerializer3:
username_field = 'user'
else:
username_field = 'username'
# Note: Inverse username ordering correctly applied.
assert response.data == [
{'id': 3, username_field: 'userC'},
{'id': 2, username_field: 'userB'},
{'id': 1, username_field: 'userA'},
]
def test_cannot_order_by_non_serializer_fields(self):
for serializer_cls in [
SensitiveDataSerializer1,
SensitiveDataSerializer2,
SensitiveDataSerializer3
]:
class OrderingListView(generics.ListAPIView):
queryset = SensitiveOrderingFilterModel.objects.all().order_by('username')
filter_backends = (filters.OrderingFilter,)
serializer_class = serializer_cls
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'password'})
response = view(request)
if serializer_cls == SensitiveDataSerializer3:
username_field = 'user'
else:
username_field = 'username'
# Note: The passwords are not in order. Default ordering is used.
assert response.data == [
{'id': 1, username_field: 'userA'}, # PassB
{'id': 2, username_field: 'userB'}, # PassC
{'id': 3, username_field: 'userC'}, # PassA
]
|
|
from __future__ import division
import pickle
import os
import sys
import numpy as np
from numpy.linalg import norm
import pandas as pd
import pdb
def sliceup():
data = load_pkl(os.path.join('..', "data", 'logisticData.pkl'))
X, y = data['X'], data['y']
Xvalid, yvalid = data['Xvalidate'], data['yvalidate']
n, _ = X.shape
randseed = np.random.permutation(n)
X = X[randseed,:]
y = y[randseed]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
data = np.hstack((X[0:100, :], y[0:100].reshape((100, 1))))
np.savetxt("./split_data/logData1.csv", data, delimiter=',')
data = np.hstack((X[100:200, :], y[100:200].reshape((100, 1))))
np.savetxt("./split_data/logData2.csv", data, delimiter=',')
data = np.hstack((X[200:300, :], y[200:300].reshape((100, 1))))
np.savetxt("./split_data/logData3.csv", data, delimiter=',')
data = np.hstack((X[300:400, :], y[300:400].reshape((100, 1))))
np.savetxt("./split_data/logData4.csv", data, delimiter=',')
data = np.hstack((X[400:500, :], y[400:500].reshape((100, 1))))
np.savetxt("./split_data/logData5.csv", data, delimiter=',')
data = np.hstack((Xvalid, yvalid.reshape((500, 1))))
np.savetxt("./split_data/logTest.csv", data, delimiter=',')
def load_dataset(dataset_name):
# Load and standardize the data and add the bias term
if dataset_name == "logisticData":
data = load_pkl(os.path.join('..', "data", 'logisticData.pkl'))
X, y = data['X'], data['y']
Xvalid, yvalid = data['Xvalidate'], data['yvalidate']
n, _ = X.shape
randseed = np.random.permutation(n)
X = X[randseed,:]
y = y[randseed]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "slices":
slices = pd.read_csv(os.path.join('..', "data", 'slice_localization_data.csv'))
n, d = slices.shape
npslices = slices.ix[np.random.permutation(n),:].as_matrix()
split = int(n * 0.70)
X = npslices[0:split, 1:d-1]
y = npslices[0:split, -1]
Xvalid = npslices[(split+1):n, 1:d-1]
yvalid = npslices[(split+1):n, -1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
#y, mu_y, sigma_y = standardize_outputs(y)
#yvalid, _, _ = standardize_outputs(yvalid, mu_y, sigma_y)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "magic":
magic = pd.read_csv(os.path.join('..', "data", 'magic04.data.txt'))
nn, dd = magic.shape
y = magic.ix[:,dd-1].as_matrix()
y[np.where(y == 'g')] = 1
y[np.where(y == 'h')] = -1
npmagic = magic.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = npmagic[0:split-1, 0:dd-2]
y = npmagic[0:split-1, dd-1]
Xvalid = npmagic[split:nn-1, 0:dd-2]
yvalid = npmagic[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "bank":
bank = pd.read_csv(os.path.join('..', "data", 'data_banknote_authentication.txt'))
nn, dd = bank.shape
y = bank.ix[:,dd-1].as_matrix()
#y[np.where(y == 'g')] = 1
y[np.where(y == 0)] = -1
npbank = bank.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = npbank[0:split-1, 0:dd-2]
y = npbank[0:split-1, dd-1]
Xvalid = npbank[split:nn-1, 0:dd-2]
yvalid = npbank[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "transfusion":
temp = pd.read_csv(os.path.join('..', "data", 'transfusion.data.txt'))
nn, dd = temp.shape
y = temp.ix[:,dd-1].as_matrix()
#y[np.where(y == 'g')] = 1
y[np.where(y == 0)] = -1
nptemp = temp.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = nptemp[0:split-1, 0:dd-2]
y = nptemp[0:split-1, dd-1]
Xvalid = nptemp[split:nn-1, 0:dd-2]
yvalid = nptemp[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "tom":
temp = pd.read_csv(os.path.join('..', "data", 'TomsHardware-Absolute-Sigma-500.data.txt'))
nn, dd = temp.shape
y = temp.ix[:,dd-1].as_matrix()
#y[np.where(y == 4)] = 1
y[np.where(y == 0)] = -1
nptemp = temp.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = nptemp[0:split-1, 0:dd-2]
y = nptemp[0:split-1, dd-1]
Xvalid = nptemp[split:nn-1, 0:dd-2]
yvalid = nptemp[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "twitter":
temp = pd.read_csv(os.path.join('..', "data", 'Twitter-Absolute-Sigma-500.data.txt'))
nn, dd = temp.shape
y = temp.ix[:,dd-1].as_matrix()
#y[np.where(y == 4)] = 1
y[np.where(y == 0)] = -1
nptemp = temp.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = nptemp[0:split-1, 0:dd-2]
y = nptemp[0:split-1, dd-1]
Xvalid = nptemp[split:nn-1, 0:dd-2]
yvalid = nptemp[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "creditcard":
temp = pd.read_csv(os.path.join('..', "data", 'CreditCard.csv'))
nn, dd = temp.shape
y = temp.ix[:,dd-1].as_matrix()
#y[np.where(y == 4)] = 1
y[np.where(y == 0)] = -1
nptemp = temp.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = nptemp[0:split-1, 0:dd-2]
y = nptemp[0:split-1, dd-1]
Xvalid = nptemp[split:nn-1, 0:dd-2]
yvalid = nptemp[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "eye":
temp = pd.read_csv(os.path.join('..', "data", 'EEG Eye State.arff.txt'))
nn, dd = temp.shape
y = temp.ix[:,dd-1].as_matrix()
#y[np.where(y == 4)] = 1
y[np.where(y == 0)] = -1
nptemp = temp.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = nptemp[0:split-1, 0:dd-2]
y = nptemp[0:split-1, dd-1]
Xvalid = nptemp[split:nn-1, 0:dd-2]
yvalid = nptemp[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "diabetic":
temp = pd.read_csv(os.path.join('..', "data", 'messidor_features.arff.txt'))
nn, dd = temp.shape
y = temp.ix[:,dd-1].as_matrix()
#y[np.where(y == 4)] = 1
y[np.where(y == 0)] = -1
nptemp = temp.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = nptemp[0:split-1, 0:dd-2]
y = nptemp[0:split-1, dd-1]
Xvalid = nptemp[split:nn-1, 0:dd-2]
yvalid = nptemp[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "pulsars":
temp = pd.read_csv(os.path.join('..', "data", 'HTRU_2.csv'))
nn, dd = temp.shape
y = temp.ix[:,dd-1].as_matrix()
#y[np.where(y == 4)] = 1
y[np.where(y == 0)] = -1
nptemp = temp.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = nptemp[0:split-1, 0:dd-2]
y = nptemp[0:split-1, dd-1]
Xvalid = nptemp[split:nn-1, 0:dd-2]
yvalid = nptemp[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "occupancy":
temp = pd.read_csv(os.path.join('..', "data", 'datatraining.txt'))
nn, dd = temp.shape
y = temp.ix[:,dd-1].as_matrix()
#y[np.where(y == 4)] = 1
y[np.where(y == 0)] = -1
nptemp = temp.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = nptemp[0:split-1, 0:dd-2]
y = nptemp[0:split-1, dd-1]
Xvalid = nptemp[split:nn-1, 0:dd-2]
yvalid = nptemp[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "skin":
temp = pd.read_csv(os.path.join('..', "data", 'Skin_NonSkin.csv'))
nn, dd = temp.shape
y = temp.ix[:,dd-1].as_matrix()
#y[np.where(y == 4)] = 1
y[np.where(y == 0)] = -1
nptemp = temp.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = nptemp[0:split-1, 0:dd-2]
y = nptemp[0:split-1, dd-1]
Xvalid = nptemp[split:nn-1, 0:dd-2]
yvalid = nptemp[split:nn-1, dd-1]
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
elif dataset_name == "sns":
sns = pd.read_csv(os.path.join('..', 'data', 'sns.txt'), sep="\t")
nn, dd = sns.shape
npsns = sns.ix[np.random.permutation(nn),:].as_matrix().astype(int)
split = int(nn * 0.70)
X = npsns[0:split-1, 0:dd-2]
y = ((npsns[0:split-1, dd-1] - 1.5) * 2).astype(int)
Xvalid = npsns[split:nn-1, 0:dd-2]
yvalid = ((npsns[split:nn-1, dd-1] - 1.5) * 2).astype(int)
X, mu, sigma = standardize_cols(X)
Xvalid, _, _ = standardize_cols(Xvalid, mu, sigma)
X = np.hstack([np.ones((X.shape[0], 1)), X])
Xvalid = np.hstack([np.ones((Xvalid.shape[0], 1)), Xvalid])
X = normalize_rows(X)
Xvalid = normalize_rows(Xvalid)
return {"X":X, "y":y,
"Xvalid":Xvalid,
"yvalid":yvalid}
def normalize_rows(X):
# Sets all rows to have L2 norm of 1. Needed for diff priv
nn, dd = X.shape
for i in range(nn):
X[i,] = X[i,] / norm(X[i,], 2)
return X
def standardize_cols(X, mu=None, sigma=None):
# Standardize each column with mean 0 and variance 1
n_rows, n_cols = X.shape
if mu is None:
mu = np.mean(X, axis=0)
if sigma is None:
sigma = np.std(X, axis=0)
sigma[sigma < 1e-8] = 1.
return (X - mu) / sigma, mu, sigma
def standardize_outputs(y, mu=None, sigma=None):
if mu is None:
mu = np.mean(y)
if sigma is None:
sigma = np.std(y)
if sigma < 1e-8:
sigma = 1.
return (y - mu) / sigma, mu, sigma
def check_gradient(model, X, y):
# This checks that the gradient implementation is correct
w = np.random.rand(model.w.size)
f, g = model.funObj(w, X, y)
# Check the gradient
estimated_gradient = approx_fprime(w,
lambda w: model.funObj(w,X,y)[0],
epsilon=1e-6)
implemented_gradient = model.funObj(w, X, y)[1]
if np.max(np.abs(estimated_gradient - implemented_gradient) > 1e-4):
raise Exception('User and numerical derivatives differ:\n%s\n%s' %
(estimated_gradient[:5], implemented_gradient[:5]))
else:
print('User and numerical derivatives agree.')
def lap_noise(loc=0, scale=1, size=1):
return np.random.laplace(loc=loc, scale=scale, size=size)
def exp_noise(scale=1, size=1):
return np.random.exponential(scale=scale, size=size)
def approx_fprime(x, f_func, epsilon=1e-7):
# Approximate the gradient using the complex step method
n_params = x.size
e = np.zeros(n_params)
gA = np.zeros(n_params)
for n in range(n_params):
e[n] = 1.
val = f_func(x + e * np.complex(0, epsilon))
gA[n] = np.imag(val) / epsilon
e[n] = 0
return gA
def regression_error(y, yhat):
return 0.5 * np.sum(np.square((y - yhat)) / float(yhat.size))
def classification_error(y, yhat):
return np.sum(y!=yhat) / float(yhat.size)
def load_pkl(fname):
"""Reads a pkl file.
Parameters
----------
fname : the name of the .pkl file
Returns
-------
data :
Returns the .pkl file as a 'dict'
"""
if not os.path.isfile(fname):
raise ValueError('File {} does not exist.'.format(fname))
if sys.version_info[0] < 3:
# Python 2
with open(fname, 'rb') as f:
data = pickle.load(f)
else:
# Python 3
with open(fname, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pathlib
import tempfile
from datetime import datetime
from unittest import TestCase
import pytest
from airflow.exceptions import AirflowException, DagRunAlreadyExists
from airflow.models import DAG, DagBag, DagModel, DagRun, Log, TaskInstance
from airflow.models.serialized_dag import SerializedDagModel
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
DEFAULT_DATE = datetime(2019, 1, 1, tzinfo=timezone.utc)
TEST_DAG_ID = "testdag"
TRIGGERED_DAG_ID = "triggerdag"
DAG_SCRIPT = (
"from datetime import datetime\n\n"
"from airflow.models import DAG\n"
"from airflow.operators.dummy import DummyOperator\n\n"
"dag = DAG(\n"
'dag_id="{dag_id}", \n'
'default_args={{"start_date": datetime(2019, 1, 1)}}, \n'
"schedule_interval=None,\n"
")\n"
'task = DummyOperator(task_id="test", dag=dag)'
).format(dag_id=TRIGGERED_DAG_ID)
class TestDagRunOperator(TestCase):
def setUp(self):
# Airflow relies on reading the DAG from disk when triggering it.
# Therefore write a temp file holding the DAG to trigger.
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
self._tmpfile = f.name
f.write(DAG_SCRIPT)
f.flush()
with create_session() as session:
session.add(DagModel(dag_id=TRIGGERED_DAG_ID, fileloc=self._tmpfile))
session.commit()
self.dag = DAG(TEST_DAG_ID, default_args={"owner": "airflow", "start_date": DEFAULT_DATE})
dagbag = DagBag(f.name, read_dags_from_db=False, include_examples=False)
dagbag.bag_dag(self.dag, root_dag=self.dag)
dagbag.sync_to_db()
def tearDown(self):
"""Cleanup state after testing in DB."""
with create_session() as session:
session.query(Log).filter(Log.dag_id == TEST_DAG_ID).delete(synchronize_session=False)
for dbmodel in [DagModel, DagRun, TaskInstance, SerializedDagModel]:
session.query(dbmodel).filter(dbmodel.dag_id.in_([TRIGGERED_DAG_ID, TEST_DAG_ID])).delete(
synchronize_session=False
)
pathlib.Path(self._tmpfile).unlink()
def test_trigger_dagrun(self):
"""Test TriggerDagRunOperator."""
task = TriggerDagRunOperator(task_id="test_task", trigger_dag_id=TRIGGERED_DAG_ID, dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
with create_session() as session:
dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
assert len(dagruns) == 1
assert dagruns[0].external_trigger
def test_trigger_dagrun_custom_run_id(self):
task = TriggerDagRunOperator(
task_id="test_task",
trigger_dag_id=TRIGGERED_DAG_ID,
trigger_run_id="custom_run_id",
dag=self.dag,
)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
assert len(dagruns) == 1
assert dagruns[0].run_id == "custom_run_id"
def test_trigger_dagrun_with_execution_date(self):
"""Test TriggerDagRunOperator with custom execution_date."""
utc_now = timezone.utcnow()
task = TriggerDagRunOperator(
task_id="test_trigger_dagrun_with_execution_date",
trigger_dag_id=TRIGGERED_DAG_ID,
execution_date=utc_now,
dag=self.dag,
)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
with create_session() as session:
dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
assert len(dagruns) == 1
assert dagruns[0].external_trigger
assert dagruns[0].execution_date == utc_now
def test_trigger_dagrun_twice(self):
"""Test TriggerDagRunOperator with custom execution_date."""
utc_now = timezone.utcnow()
task = TriggerDagRunOperator(
task_id="test_trigger_dagrun_with_execution_date",
trigger_dag_id=TRIGGERED_DAG_ID,
execution_date=utc_now,
dag=self.dag,
poke_interval=1,
reset_dag_run=True,
wait_for_completion=True,
)
run_id = f"manual__{utc_now.isoformat()}"
with create_session() as session:
dag_run = DagRun(
dag_id=TRIGGERED_DAG_ID,
execution_date=utc_now,
state=State.SUCCESS,
run_type="manual",
run_id=run_id,
)
session.add(dag_run)
session.commit()
task.execute(None)
dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
assert len(dagruns) == 1
assert dagruns[0].external_trigger
assert dagruns[0].execution_date == utc_now
def test_trigger_dagrun_with_templated_execution_date(self):
"""Test TriggerDagRunOperator with templated execution_date."""
task = TriggerDagRunOperator(
task_id="test_trigger_dagrun_with_str_execution_date",
trigger_dag_id=TRIGGERED_DAG_ID,
execution_date="{{ execution_date }}",
dag=self.dag,
)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
with create_session() as session:
dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
assert len(dagruns) == 1
assert dagruns[0].external_trigger
assert dagruns[0].execution_date == DEFAULT_DATE
def test_trigger_dagrun_operator_conf(self):
"""Test passing conf to the triggered DagRun."""
task = TriggerDagRunOperator(
task_id="test_trigger_dagrun_with_str_execution_date",
trigger_dag_id=TRIGGERED_DAG_ID,
conf={"foo": "bar"},
dag=self.dag,
)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
with create_session() as session:
dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
assert len(dagruns) == 1
assert dagruns[0].conf, {"foo": "bar"}
def test_trigger_dagrun_operator_templated_invalid_conf(self):
"""Test passing a conf that is not JSON Serializable raise error."""
with pytest.raises(AirflowException, match="^conf parameter should be JSON Serializable$"):
TriggerDagRunOperator(
task_id="test_trigger_dagrun_with_invalid_conf",
trigger_dag_id=TRIGGERED_DAG_ID,
conf={"foo": "{{ dag.dag_id }}", "datetime": timezone.utcnow()},
dag=self.dag,
)
def test_trigger_dagrun_operator_templated_conf(self):
"""Test passing a templated conf to the triggered DagRun."""
task = TriggerDagRunOperator(
task_id="test_trigger_dagrun_with_str_execution_date",
trigger_dag_id=TRIGGERED_DAG_ID,
conf={"foo": "{{ dag.dag_id }}"},
dag=self.dag,
)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
with create_session() as session:
dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
assert len(dagruns) == 1
assert dagruns[0].conf, {"foo": TEST_DAG_ID}
def test_trigger_dagrun_with_reset_dag_run_false(self):
"""Test TriggerDagRunOperator with reset_dag_run."""
execution_date = DEFAULT_DATE
task = TriggerDagRunOperator(
task_id="test_task",
trigger_dag_id=TRIGGERED_DAG_ID,
execution_date=execution_date,
reset_dag_run=False,
dag=self.dag,
)
task.run(start_date=execution_date, end_date=execution_date, ignore_ti_state=True)
with pytest.raises(DagRunAlreadyExists):
task.run(start_date=execution_date, end_date=execution_date, ignore_ti_state=True)
def test_trigger_dagrun_with_reset_dag_run_true(self):
"""Test TriggerDagRunOperator with reset_dag_run."""
execution_date = DEFAULT_DATE
task = TriggerDagRunOperator(
task_id="test_task",
trigger_dag_id=TRIGGERED_DAG_ID,
execution_date=execution_date,
reset_dag_run=True,
dag=self.dag,
)
task.run(start_date=execution_date, end_date=execution_date, ignore_ti_state=True)
task.run(start_date=execution_date, end_date=execution_date, ignore_ti_state=True)
with create_session() as session:
dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
assert len(dagruns) == 1
assert dagruns[0].external_trigger
def test_trigger_dagrun_with_wait_for_completion_true(self):
"""Test TriggerDagRunOperator with wait_for_completion."""
execution_date = DEFAULT_DATE
task = TriggerDagRunOperator(
task_id="test_task",
trigger_dag_id=TRIGGERED_DAG_ID,
execution_date=execution_date,
wait_for_completion=True,
poke_interval=10,
allowed_states=[State.QUEUED],
dag=self.dag,
)
task.run(start_date=execution_date, end_date=execution_date)
with create_session() as session:
dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
assert len(dagruns) == 1
def test_trigger_dagrun_with_wait_for_completion_true_fail(self):
"""Test TriggerDagRunOperator with wait_for_completion but triggered dag fails."""
execution_date = DEFAULT_DATE
task = TriggerDagRunOperator(
task_id="test_task",
trigger_dag_id=TRIGGERED_DAG_ID,
execution_date=execution_date,
wait_for_completion=True,
poke_interval=10,
failed_states=[State.QUEUED],
dag=self.dag,
)
with pytest.raises(AirflowException):
task.run(start_date=execution_date, end_date=execution_date)
def test_trigger_dagrun_triggering_itself(self):
"""Test TriggerDagRunOperator that triggers itself"""
execution_date = DEFAULT_DATE
task = TriggerDagRunOperator(
task_id="test_task",
trigger_dag_id=self.dag.dag_id,
dag=self.dag,
)
task.run(start_date=execution_date, end_date=execution_date)
with create_session() as session:
dagruns = (
session.query(DagRun)
.filter(DagRun.dag_id == self.dag.dag_id)
.order_by(DagRun.execution_date)
.all()
)
assert len(dagruns) == 2
assert dagruns[1].state == State.QUEUED
def test_trigger_dagrun_triggering_itself_with_execution_date(self):
"""Test TriggerDagRunOperator that triggers itself with execution date,
fails with DagRunAlreadyExists"""
execution_date = DEFAULT_DATE
task = TriggerDagRunOperator(
task_id="test_task",
trigger_dag_id=self.dag.dag_id,
execution_date=execution_date,
dag=self.dag,
)
with pytest.raises(DagRunAlreadyExists):
task.run(start_date=execution_date, end_date=execution_date)
|
|
# author : Kanika Sood
# Date : Feb 13
# default timings available for 256 matrices (default solver: gmres + ilu , solver id: 32168839)
#input file: '/Users/kanikas/Desktop/petsc_anamod_35.csv' (File has all features + solver + class)
#output file: '/Users/kanikas/Desktop/solver_pc.csv' (File has all features + solver + solver_name + pc_name + class ) manually removing solver from the list for now
#input for this script is generated from CsvComparison.java
import csv
from collections import OrderedDict
from itertools import islice
import operator
#Open and read the solver unique numbers from solverids_names.csv and make them the key of the dictionary
uniques_ids = {}
default_time = {}
with open('/Users/kanikas/Desktop/Feb22_MisPredictionAnalysis/3classlabelling/RS2/RFRS2combined.csv', 'r+') as csvinput:
#with open('/Users/kanikas/Desktop/Feb22_MisPredictionAnalysis/combined21505_feb22.csv', 'r+') as csvinput:
infile = csv.reader(csvinput)
csvoutput = '/Users/kanikas/Desktop/Feb22_MisPredictionAnalysis/3classlabelling/RS2/RS2default_Time433.csv'
with open(csvoutput,'w') as csvoutput:
writer = csv.writer(csvoutput)
header= infile.__next__()
#print(header)
writer.writerow(header + ['default_time']) #write the header to the new decoupled file before writing the feature values
for row in islice(infile, 1, None):
if row[6] == "32168839" : #67
default_time[row[8]] = row[9]
writer.writerow(row)
print(default_time)
default_time={'hydr1c_A_76':'0.000341',
'hydr1c_A_27':'0.000184',
'hydr1c_A_26':'0.000185',
'hydr1c_A_25':'0.000222',
'hydr1c_A_23':'0.016615',
'hydr1c_A_22':'0.000432',
'hydr1c_A_21':'0.00035',
'Si5H12':'0.048989',
'g7jac020':'0.24685',
'g7jac140':'449.2588',
'rail_5177_E':'0.000117',
'ex18':'0.000585',
'bcsstm20':'0.000012',
'fpga_dcop_23':'0.15893',
'fpga_dcop_26':'0.15943',
'fpga_dcop_28':'17.19969',
'extr1b_A_15':'0.000181',
'bayer10':'0.000449',
'Pres_Poisson':'0.034215',
'TSOPF_RS_b162_c4':'0.001469',
'S40PI_n':'22.73836',
'mark3jac020sc':'0.084734',
'adder_dcop_04':'0.00013',
'adder_dcop_07':'0.000383',
'adder_dcop_01':'0.073208',
't2d_q9_A_43':'0.006103',
'extr1b_A_12':'0.013618',
'radfr1':'0.000026',
'hydr1c_A_24':'0.000186',
'extr1b_A_11':'0.000106',
'ck400':'0.028677',
't2d_q9_A_45':'0.317482',
'extr1b_A_10':'0.000108',
't2d_q9_A_44':'0.006147',
't2d_q4_A_03':'0.006511',
'hydr1c_A_20':'0.000206',
'jan99jac020sc':'58.17999',
'fd18':'0.000313',
'fd15':'0.009101',
'rdist1':'0.000271',
'rdist2':'0.000128',
'fd12':'0.000068',
't2d_q4_A_11':'0.006502',
'fs_760_2':'0.001147',
'as-caida_G_121':'0.001232',
'as-caida_G_120':'258.1891',
't2d_q4_A_17':'0.006381',
'as-caida_G_122':'0.001217',
't2d_q4_A_19':'0.006317',
't2d_q4_A_18':'0.3368',
'oscil_dcop_56':'0.086988',
'c-39':'0.000231',
'c-38':'0.00016',
't2d_q9_A_03':'0.335155',
'oscil_dcop_57':'0.0899',
'oscil_dcop_41':'0.089111',
'hydr1c_A_17':'0.000185',
'c-31':'0.024532',
'c-30':'0.000115',
'c-33':'0.000119',
'c-32':'0.000105',
'c-35':'0.000128',
'c-34':'0.000193',
'c-37':'0.000154',
'hydr1c_A_15':'0.000186',
'oscil_dcop_52':'0.087516',
'cz20468':'4.1895',
'rail_1357_E':'0.01738',
'oscil_dcop_50':'0.087447',
'oscil_dcop_53':'0.087165',
'hydr1c_A_13':'0.000234',
'oscil_trans_01':'0.020292',
't2d_q4_A_14':'0.006471',
'hydr1c_A_10':'0.029182',
's1rmt3m1':'0.040435',
'west0381':'0.00001',
'hvdc1':'213.4033',
'Trefethen_150':'0.000012',
'oscil_dcop_51':'0.087491',
'std1_Jac2':'0.002412',
'jan99jac040':'0.005207',
'fpga_dcop_20':'0.1578',
'g7jac060sc':'179.4371',
'fpga_dcop_21':'0.15799',
'viscoplastic2_C_4':'6.897766',
'viscoplastic2_C_5':'0.17968',
'viscoplastic2_C_3':'0.13963',
'LF10000_E':'0.000261',
'c-48':'0.300205',
'c-49':'0.000412',
'c-44':'0.000189',
'c-45':'0.000356',
'c-46':'0.000266',
'c-47':'0.000353',
'c-40':'0.000195',
'c-41':'0.000234',
'c-42':'0.000209',
'circuit_2':'0.000843',
'ex21':'0.003923',
'fpga_dcop_27':'0.158',
'bodyy6':'0.000354',
'extr1b_A_04':'0.000112',
'extr1b_A_05':'0.013844',
'extr1b_A_06':'0.00021',
'extr1b_A_07':'0.000358',
'extr1b_A_01':'0.000202',
'extr1b_A_02':'0.000204',
'extr1b_A_03':'0.000214',
'extr1b_A_08':'0.00018',
'extr1b_A_09':'0.000181',
'lung1':'0.02021',
'ex26':'1.398',
'cavity20':'0.69697',
'cavity21':'24.50512',
'cavity22':'0.96544',
'cavity23':'1.0435',
'cavity24':'1.2904',
'cavity25':'1.4848',
'fpga_dcop_10':'0.15807',
'oscil_dcop_08':'11.57782',
'g7jac020sc':'55.05309',
't2d_q4_A_24':'0.006635',
's2rmq4m1':'0.000479',
'cyl6':'0.033528',
'TSOPF_RS_b162_c1':'0.00027',
'fpga_dcop_14':'0.15906',
'adder_dcop_41':'0.000504',
'adder_dcop_43':'0.019027',
'adder_dcop_42':'0.000507',
'adder_dcop_45':'0.000312',
'adder_dcop_44':'0.000269',
'ted_B_unscaled':'0.000295',
'raefsky3':'0.18363',
'adder_dcop_48':'0.000499',
'raefsky1':'0.007827',
'onetone1':'0.000785',
'raefsky6':'0.001105',
'raefsky5':'0.036347',
'raefsky4':'0.003524',
'west0132':'0.000007',
'circuit204':'0.011021',
'nasa4704':'0.00021',
'g7jac010sc':'0.184038',
'lhr17c':'0.001003',
'hydr1c_A_70':'0.016711',
'hydr1c_A_71':'0.00021',
'hydr1c_A_72':'0.000277',
'hydr1c_A_73':'0.000184',
'hydr1c_A_74':'0.000186',
'hydr1c_A_75':'0.000209',
'extr1':'0.013004',
't2d_q9_A_18':'0.006322',
't2d_q9_A_19':'0.00645',
'hydr1':'0.000293',
'utm5940':'1.5039',
's3rmt3m1':'0.035763',
'e40r0100':'0.001481',
'rim':'360.0818',
'cz148':'0.000132',
'g7jac050sc':'0.098392',
'LFAT5_E':'0.000009',
'wang3':'0.024596',
'Zd_Jac3':'0.004462',
'vt2010':'0.000436',
'Dubcova1':'0.026835',
'std1_Jac3_db':'0.001126',
'wang4':'0.018522',
'g7jac080':'214.4989',
'bbmat':'0.123987',
't2d_q4_A_43':'0.006026',
't2d_q4_A_41':'0.006019',
't2d_q4_A_44':'0.011432',
'fp':'289.5589',
't2dal_bci_E':'0.000038',
'cz5108':'14.41127',
'oscil_dcop_38':'0.086737',
'oscil_dcop_39':'0.088339',
'west0156':'0.000014',
'chipcool0':'1.073009',
'chipcool1':'0.034948',
'oscil_dcop_32':'0.086821',
'oscil_dcop_33':'0.087493',
'oscil_dcop_30':'0.086866',
'oscil_dcop_31':'0.090486',
'oscil_dcop_36':'0.086962',
'oscil_dcop_37':'0.086477',
'oscil_dcop_34':'11.57676',
'oscil_dcop_35':'0.087309',
'rajat22':'281.7301',
'rajat27':'0.00706',
'poli3':'0.078904',
'poli4':'0.002707',
'lhr04':'0.000125',
'west0167':'0.000009',
'S20PI_n':'16.33091',
'b_dyn':'0.00007',
'jan99jac040sc':'0.069728',
'inlet':'0.000568',
'PGPgiantcompo':'0.000142',
'pores_3':'0.000464',
'hydr1c_A_38':'0.000204',
'hydr1c_A_39':'0.000352',
't2d_q4_A_10':'0.006607',
'hydr1c_A_34':'0.000218',
'hydr1c_A_35':'0.000229',
'hydr1c_A_36':'0.017321',
'hydr1c_A_37':'0.000366',
'hydr1c_A_30':'0.000216',
'hydr1c_A_31':'0.000206',
'hydr1c_A_32':'0.000223',
'hydr1c_A_33':'0.000207',
'heart2':'0.074416',
'ted_A':'6.263',
'oscil_dcop_54':'0.086579',
't2d_q9_A_31':'0.006361',
't2d_q4_A_40':'0.312631',
'fpga_dcop_32':'0.1571',
'fpga_dcop_31':'17.17541',
'fpga_dcop_30':'0.15854',
'fpga_dcop_37':'0.15769',
'fpga_dcop_36':'0.15775',
'fpga_dcop_35':'0.15845',
'fpga_dcop_34':'0.15765',
'impcol_a':'0.000009',
'g7jac140sc':'449.92',
'fpga_dcop_39':'0.15938',
'fpga_dcop_38':'0.15852',
'rail_5177':'0.023114',
'Zd_Jac3_db':'0.001207',
'bayer08':'0.50837',
'bayer09':'0.000141',
'bayer06':'0.50804',
'bayer07':'0.52749',
'bayer04':'0.001025',
'bayer05':'0.5283',
'bayer02':'1.5747',
'bayer03':'0.00074',
'adder_dcop_10':'0.033598',
'adder_dcop_11':'0.000089',
't2d_q4_A_12':'0.006477',
'nasa1824':'0.01111',
't2dah_a':'0.000439',
'hydr1c_A_64':'0.0002',
't2dal_bci_Aside':'32.6453',
'rail_20209_E':'0.036738',
't2d_q9_A_29':'0.318966',
'garon1':'0.425441',
'xingo3012':'0.000704',
'add32':'0.001612',
'hydr1c_A_28':'0.000218',
't2d_q9_A_22':'0.006344',
't2d_q9_A_21':'0.006548',
't2d_q9_A_27':'0.006326',
't2d_q9_A_07':'0.006516',
't2d_q9_A_25':'0.00634',
't2d_q9_A_24':'0.006349',
'g7jac040':'0.6211',
't2d_q4_A_15':'0.00636',
'hydr1c_A_68':'0.000287',
'spiral_E':'0.1371',
'Zd_Jac6_db':'0.001451',
'smt':'0.008338',
't2d_q4_A_13':'0.006342',
't2d_q4_A_06':'0.006604',
't2d_q4_A_07':'0.00648',
't2d_q4_A_05':'0.0066',
't2d_q4_A_02':'0.331075',
'as-caida_G_115':'5.9352',
't2d_q4_A_01':'0.00648',
'as-caida_G_119':'258.412',
'Reuters911_Day_38':'0.00008',
'filter2D':'0.018787',
't2d_q4_A_08':'0.006542',
't2d_q4_A_09':'0.006577',
'cz628':'0.208129',
'flowmeter0':'0.000142',
'LFAT5000_E':'0.024796',
'flowmeter5':'0.724053',
't2d_q4_A_16':'0.006341',
'LFAT5000_M':'0.001364',
'DK01R':'0.020493',
'juba40k':'0.104063',
'piston_M':'20.84722',
'viscoplastic1':'0.012969',
'TSOPF_RS_b9_c6':'0.000166',
'av41092':'0.003284',
'SiNa':'0.000316',
'g7jac060':'150.898',
'oscil_dcop_55':'11.57841',
'S80PI_n':'35.45733',
't2d_q9_A_36':'0.00625',
'rail_20209':'0.000462',
'Si10H16':'0.001817',
'piston_E':'1.0572',
'west2021':'0.000038',
't2d_q9_A_37':'0.006268',
'spiral':'0.017827',
'std1_Jac3':'0.002031',
'west0655':'0.000019',
'c-57':'0.000714',
'c-56':'0.000701',
'c-54':'0.000668',
'c-53':'0.000794',
'c-52':'0.000415',
'c-51':'0.000402',
'c-50':'0.000557',
'extr1b_A_39':'0.000183',
'extr1b_A_38':'0.000189',
'extr1b_A_31':'0.38093',
'extr1b_A_30':'0.000113',
'extr1b_A_33':'0.013579',
'extr1b_A_32':'0.000189',
'extr1b_A_35':'0.000181',
'extr1b_A_34':'0.37655',
'extr1b_A_37':'0.37439',
'extr1b_A_36':'0.000094',
'oscil_dcop_45':'0.088029',
'oscil_dcop_49':'0.087198',
'oscil_dcop_48':'0.086043',
'lhr17':'0.00108',
'lhr10':'0.000331',
'LFAT5000':'0.000306',
'oscil_dcop_44':'0.084981',
'lhr11':'0.000334',
'sinc18':'0.00154',
'sinc12':'0.000591',
'c-36':'0.000137',
'sinc15':'0.024831',
't2dal_bci':'0.012745',
't2d_q4_A_42':'0.006091',
'adder_dcop_52':'0.000843',
'adder_dcop_50':'0.000847',
'adder_dcop_51':'0.028563',
'adder_dcop_56':'0.000303',
'adder_dcop_54':'0.000849',
'cz308':'0.000339',
'mark3jac040sc':'0.262266',
'oscil_dcop_43':'0.088443',
'Zhao2':'85.02454',
'jan99jac020':'0.129045',
'hydr1c_A_45':'0.000223',
'hydr1c_A_44':'0.000369',
'hydr1c_A_47':'0.000291',
'hydr1c_A_46':'0.016937',
'hydr1c_A_41':'0.000186',
'hydr1c_A_43':'0.000203',
'hydr1c_A_42':'0.000204',
'ex13':'0.021507',
'hydr1c_A_49':'0.000184',
'hydr1c_A_48':'0.000205',
'fpga_dcop_48':'17.17434',
'plat1919':'0.021004',
'fpga_dcop_41':'0.1571',
'fpga_dcop_46':'0.15783',
'lhr34c':'0.001476',
'ex7':'0.00174',
'Zd_Jac2':'0.003849',
'ex5':'0.013775',
'Zd_Jac6':'0.003437',
'g7jac120':'378.0842',
'tube2':'0.001447',
'TSOPF_RS_b678_c1':'0.006199',
'watt_2':'0.00156',
'inlet_E':'1.6284',
'S10PI_n':'0.000012',
'S80PI_n1_E':'0.011741',
'rotor2':'0.019503',
'oscil_dcop_40':'0.085712',
'rotor1':'0.000013',
'crystk03':'0.003133',
'gyro':'0.053408',
'Reuters911_Day_64':'0.02333',
'utm1700b':'0.006861',
'as-caida':'0.054264',
't2d_q4_A_39':'0.00588',
'oscil_dcop_47':'0.085542',
't2d_q4_A_37':'0.319179',
't2d_q4_A_36':'0.006276',
't2d_q4_A_35':'0.00625',
't2d_q4_A_34':'0.006227',
't2d_q4_A_33':'0.006303',
't2d_q4_A_31':'0.006464',
't2d_q4_A_30':'0.00639',
'lhr34':'0.001486',
'c-19':'0.000052',
'c-18':'0.000044',
'extr1b':'0.017872',
'lund_b':'0.000018',
'oscil_dcop_07':'0.085464',
'oscil_dcop_06':'0.085786',
'oscil_dcop_05':'0.087003',
'oscil_dcop_04':'0.086717',
'oscil_dcop_03':'0.086252',
'oscil_dcop_02':'0.086617',
'oscil_dcop_01':'0.086724',
'oscil_dcop_09':'0.087159',
'jan99jac120sc':'19.58834',
'TSOPF_RS_b300_c1':'0.003537',
'igbt3':'0.034771',
't2d_q9_A_41':'0.011486',
'plbuckle':'0.00006',
'oscil_dcop_11':'11.57478',
'Zd_Jac2_db':'0.001087',
'jan99jac100':'0.47581',
'jazz':'0.00001',
'utm300':'0.10796',
'LFAT5':'0.000008',
't2d_q9_A_40':'0.006112',
'S20PI_n1_E':'0.016275',
'piston':'0.001295',
'plbuckle_G':'0.21133',
'vibrobox':'0.000808',
'hydr1c_A_58':'0.000201',
'nasa2910':'0.022636',
'ww_36_pmec_36':'0.000131',
'hydr1c_A_01':'0.000298',
'hydr1c_A_03':'0.000205',
'hydr1c_A_02':'0.016727',
'hydr1c_A_05':'0.000411',
'hydr1c_A_04':'0.000223',
'hydr1c_A_07':'0.000223',
'hydr1c_A_06':'0.000202',
'hydr1c_A_09':'0.000235',
'hydr1c_A_08':'0.000225',
'hydr1c_A_59':'0.000202',
't2d_q9_A_42':'0.006113',
'fpga_dcop_08':'17.17836',
'fpga_dcop_07':'0.16217',
'fpga_dcop_04':'0.15829',
'fpga_dcop_05':'0.16029',
'fpga_dcop_02':'0.15858',
'fpga_dcop_03':'0.15838',
'mark3jac100':'0.866779',
'shermanACb':'0.040958',
'shermanACa':'0.000187',
'TSOPF_RS_b300_c2':'0.004954',
'TSOPF_RS_b300_c3':'0.00734',
'adder_dcop_29':'0.000349',
't2d_q9_A_30':'0.006373',
'meg1':'0.000092',
'adder_dcop_23':'0.000416',
'adder_dcop_22':'0.00019',
'adder_dcop_21':'0.000181',
'adder_dcop_20':'0.016015',
'mark3jac080':'279.7814',
'swang1':'0.034928',
'swang2':'0.00055',
'west1505':'0.000022',
't2d_q9_A_38':'0.304457',
't2d_q9_A_39':'0.005875',
'add20':'0.004999',
'cbuckle_G':'4.3263',
't2d_q9_A_32':'0.006191',
't2d_q9_A_33':'0.006307',
't2d_q9_A_34':'0.006246',
't2d_q9_A_35':'0.006254',
't2d_q9_A_05':'0.012345',
'iprob':'0.029418',
'Si2':'0.000041',
'jan99jac100sc':'10.05608',
't2d_q9_A_16':'0.006394',
'adder_trans_01':'0.017312',
'Reuters911_Day_23':'0.000099',
'g7jac080sc':'245.4342',
'wathen120':'0.000884',
'viscoplastic1_C_7':'0.032767',
'ns3Da':'0.67185',
'viscoplastic1_C_5':'1.739036',
'd_ss':'0.011484',
'viscoplastic1_C_3':'0.033181',
'viscoplastic1_C_2':'0.040066',
'viscoplastic1_C_1':'0.024796',
'nmos3':'0.069358',
'g7jac040sc':'111.9564',
'utm3060':'0.014253',
'jan99jac060sc':'10.03244',
'viscoplastic1_C_4':'0.032908',
'SiH4':'0.000277',
'S40PI_n1_E':'0.019613',
'LF10000_M':'0.080768',
'poli':'0.000285',
't2d_q9_A_14':'0.006338',
'Reuters911_Day_54':'0.000071',
'circuit_1':'0.000563',
'powersim':'115.8297',
'std1_Jac2_db':'0.000756',
'mark3jac040':'144.0612',
'cage9':'0.00041',
'circuit_3':'88.98433',
'Zhao1':'0.004691',
'cage5':'0.000026',
'cage7':'0.000056',
'c-43':'0.000275',
't2d_q4_A_29':'0.006259',
't2d_q9_A_26':'0.006361',
't2d_q9_A_15':'0.006336',
'nasa2146':'0.000118',
'hydr1c':'0.032442',
'wathen100':'0.000942',
'mark3jac060':'212.0164',
'c-62':'0.06327',
'c-60':'0.000652',
'c-61':'0.000689',
't2d_q4_A_27':'0.006475',
'extr1b_A_28':'0.000181',
'extr1b_A_29':'0.000189',
'extr1b_A_26':'0.000366',
'extr1b_A_27':'0.000183',
'extr1b_A_24':'0.38177',
'extr1b_A_25':'0.000114',
'extr1b_A_22':'0.000095',
'extr1b_A_23':'0.00018',
'extr1b_A_20':'0.01297',
'extr1b_A_21':'0.37072',
'rajat05':'0.001065',
'rajat04':'0.058287',
'shyy41':'0.000059',
'lhr11c':'0.000334',
'hydr1c_A_40':'0.00035',
'LF10_E':'0.000009',
't2d_q4_A_45':'0.006173',
'memplus':'0.055465',
'airfoil_2d':'0.022197',
'cavity06':'0.024206',
'cavity07':'2.055075',
'cavity04':'0.22392',
'cavity05':'0.012323',
'cavity02':'0.003016',
'cavity03':'0.029578',
'cavity01':'0.000682',
'Ill_Stokes':'2.082097',
'cavity08':'0.10273',
'cavity09':'0.3173',
'lhr07c':'0.00034',
'adder_dcop_69':'0.023033',
'adder_dcop_68':'0.000664',
'adder_dcop_67':'0.000351',
'adder_dcop_64':'0.000398',
'ww_vref_6405':'0.017408',
'adder_dcop_61':'0.000477',
's2rmt3m1':'0.034724',
'n3c6-b1':'0.000011',
'n3c6-b7':'0.080625',
'c-27':'0.000095',
'b2_ss':'0.000078',
'heart3':'8.4108',
'jan99jac060':'0.364682',
'heart1':'26.128',
'nemeth03':'0.000712',
'lhr10c':'0.000487',
'Chebyshev1':'0.079631',
'Chebyshev3':'0.000701',
'Chebyshev2':'0.002548',
'goodwin':'121.8114',
'mark3jac020':'76.07818',
'hydr1c_A_52':'0.0002',
'hydr1c_A_53':'0.000183',
'hydr1c_A_50':'0.000356',
'hydr1c_A_51':'0.017203',
'hydr1c_A_56':'0.000191',
'hydr1c_A_57':'0.000204',
'hydr1c_A_54':'0.000221',
'hydr1c_A_55':'0.000187',
'nopss_11k':'0.568694',
't2d_q9':'0.00655',
'hydr1c_A_67':'0.000214',
'Na5':'0.000438',
't2d_q4':'0.34408',
'TSOPF_RS_b2052_c1':'0.011267',
'g7jac010':'0.055089',
'fpga_dcop_51':'17.17431',
'ted_B':'0.000298',
't3dl_a':'0.000978',
'CAG_mat364':'0.001115',
'poli_large':'0.001039',
't2d_q4_A_32':'0.006192',
'west0067':'0.000007',
'viscoplastic1_C_6':'0.032588',
't3dl':'0.043827',
'nopoly':'0.028213',
'zenios':'0.00004',
's3rmq4m1':'0.018681',
'Reuters911_Day_12':'0.021751',
'chipcool1_E':'0.03257',
't2d_q4_A_28':'0.006413',
'flowmeter5_E':'0.000086',
't2d_q4_A_25':'0.006342',
't2d_q4_A_26':'0.00643',
't2d_q4_A_20':'0.334142',
't2d_q4_A_21':'0.006594',
't2d_q4_A_22':'0.006444',
'cz40948':'333.2154',
'lhr02':'0.000095',
'lhr01':'0.000037',
'rajat13':'0.048531',
'lhr07':'0.000225',
'c-28':'0.000078',
'c-29':'0.000118',
'c-26':'0.000078',
'big':'2.5555',
'c-24':'0.000097',
'c-25':'0.000121',
'c-22':'0.00007',
'c-23':'0.000071',
'c-20':'0.000055',
'c-21':'0.00007',
't2d_q4_A_23':'0.006445',
'Reuters911':'0.000354',
'gyro_k':'0.001594',
'oscil_dcop_10':'0.085602',
'bips98_1450':'18.09195',
'oscil_dcop_12':'0.086823',
'oscil_dcop_13':'0.087757',
'oscil_dcop_14':'0.087849',
'oscil_dcop_15':'0.087111',
'oscil_dcop_16':'0.08514',
'oscil_dcop_17':'0.086484',
'oscil_dcop_18':'0.086829',
'oscil_dcop_19':'0.086428',
'invextr1_new':'29.591',
'CAG_mat1916':'0.0293',
'fv2':'0.000246',
'fv1':'0.000161',
'g7jac100':'314.2712',
'lhr14c':'0.000654',
'watt_1':'0.000918',
'oscil_dcop_46':'11.58181',
'young3c':'0.14184',
't2d_q9_A_06':'0.006491',
'rail_1357':'0.023687',
'rdist3a':'0.000092',
'extr1b_A_17':'0.37494',
'extr1b_A_16':'0.000189',
'extr1b_A_14':'0.000096',
'TSOPF_RS_b162_c3':'0.001141',
'extr1b_A_19':'0.37743',
'extr1b_A_18':'0.000094',
'hydr1c_A_16':'0.000224',
'hydr1c_A_14':'0.000206',
'football':'0.000006',
'hydr1c_A_12':'0.00021',
'adder_dcop_18':'0.000089',
'filter2D_E':'0.010305',
'hydr1c_A_11':'0.000254',
'bips07_3078':'0.049205',
'adder_dcop_19':'0.000174',
'hydr1c_A_18':'0.000202',
'hydr1c_A_19':'0.00025',
'fpga_dcop_19':'0.15895',
'fpga_dcop_18':'0.15778',
'wang2':'0.130403',
'onetone2':'0.000688',
'adder_dcop_17':'0.000088',
'ex28':'0.021035',
'fpga_dcop_12':'0.15735',
'adder_dcop_14':'0.000187',
'fpga_dcop_16':'17.17656',
'chem_master1':'15.42097',
'ted_A_unscaled':'160.113',
'TSOPF_RS_b39_c19':'0.034573',
't2dal':'0.0001',
'fv3':'0.020721',
't2dah':'0.000358',
'west0479':'0.000011',
'adder_dcop_35':'0.000271',
'mark3jac100sc':'0.003188',
'adder_dcop_30':'0.023064',
'adder_dcop_31':'0.000269',
'adder_dcop_32':'0.000504',
'wang1':'0.001407',
'adder_dcop_38':'0.000272',
'adder_dcop_39':'0.000262',
's3rmt3m3':'0.000386',
'flowmeter0_E':'0.02575',
'cbuckle':'0.032719',
'g7jac120sc':'378.4294',
'hydr1c_A_63':'0.000187',
'hydr1c_A_62':'0.000186',
'hydr1c_A_61':'0.000227',
'hydr1c_A_60':'0.017087',
'hydr1c_A_66':'0.000185',
'hydr1c_A_65':'0.000222',
't2d_q9_A_04':'0.006501',
'hydr1c_A_69':'0.000202',
'G61':'0.000094',
't2d_q9_A_01':'0.006481',
't2d_q9_A_02':'0.006581',
's1rmq4m1':'0.039526',
'lhr04c':'0.000125',
'cz2548':'2.924118',
'cell1':'56.64679',
'appu':'4.421133',
'cell2':'0.70276',
'pdb1HYS':'0.134905',
'qh1484':'0.000705',
'cz10228':'70.3564',
'cage8':'0.000126',
'west0497':'0.000018',
'TSOPF_RS_b39_c7':'0.000337',
'xingo_afonso_itaipu':'0.630927',
'extr1b_A_13':'0.000095',
't2d_q9_A_09':'0.006494',
'raefsky2':'0.013656',
'zeros_nopss_13k':'0.009516',
'pesa':'2.0776',
'cage6':'0.012658',
'jan99jac080sc':'0.47076',
'Reuters911_Day_47':'0.000081',
't520':'0.000602',
'mark3jac080sc':'279.78',
'GT01R':'0.004259',
'hydr1c_A_29':'0.000205',
'ex29':'0.020207',
't2d_q9_A_28':'0.006336',
't2d_q9_A_08':'0.006536',
'robot':'0.050883',
'LF10000':'0.036248',
'd_dyn':'0.000021',
't2d_q9_A_23':'0.006395',
't2dal_a':'0.00008',
'psmigr_2':'0.00071',
'oscil_dcop_29':'0.086952',
'oscil_dcop_28':'11.58117',
't2d_q9_A_20':'0.006632',
'cz1268':'0.012165',
'oscil_dcop_21':'0.086759',
'oscil_dcop_20':'0.085919',
'oscil_dcop_23':'0.086948',
'oscil_dcop_22':'0.086629',
'oscil_dcop_25':'0.086552',
'oscil_dcop_24':'0.085776',
'oscil_dcop_27':'0.085893',
'oscil_dcop_26':'0.085525',
'rajat14':'0.000052',
'rajat15':'0.003519',
'rajat11':'0.001176',
'rajat12':'0.002426',
'aft01':'0.027148',
'chipcool0_E':'0.000162',
'S10PI_n1_E':'0.00811',
'lhr14':'0.00097',
'd_dyn1':'0.011493',
'SiO':'0.071063',
'rajat19':'0.17685',
'west0989':'0.000017',
'cavity19':'0.59992',
'cavity18':'0.79127',
'jan99jac120':'1.603934',
'g7jac100sc':'314.4667',
'cavity11':'0.20657',
'cavity10':'3.574854',
'cavity13':'0.333',
'cavity12':'0.25624',
'cavity15':'1.0658',
'cavity14':'0.52393',
'cavity17':'0.79361',
'cavity16':'0.63559',
'jan99jac080':'0.421288',
't2d_q4_A_38':'0.011211',
't2d_q9_A_12':'0.012326',
'cage11':'0.005368',
'cage10':'0.001304',
'olafu':'0.001916',
'cryg10000':'1.3192',
't2d_q9_A_13':'0.00639',
't2d_q9_A_10':'0.00656',
'oscil_dcop_42':'0.088063',
't2d_q9_A_11':'0.333715',
'mark3jac060sc':'212.0599',
't2d_q4_A_04':'0.006479',
'extr1b_A_40':'28.57054',
'extr1b_A_41':'0.00019',
'extr1b_A_42':'0.36607',
'extr1b_A_43':'0.000095',
'extr1b_A_44':'0.000187',
't2d_q9_A_17':'0.006322'}
with open('/Users/kanikas/Desktop/Feb22_MisPredictionAnalysis/3classlabelling/RS2/RS2default_Time433.csv', 'r+') as csvinput:
infile = csv.reader(csvinput)
csvoutput = '/Users/kanikas/Desktop/Feb22_MisPredictionAnalysis/3classlabelling/RS2/RS2TimeComparisonWithDefault433.csv'
with open(csvoutput,'w') as csvout:
writer = csv.writer(csvout)
infile = csv.reader(csvinput)
writer.writerow(header + ['default_time'] + ['Time Difference'] + ['Comparison'] + ['SpeedUp'])
diff = 0.0
bad=0
good=0
for row in infile:
for key in default_time:
if row[8] == key :
print(key)
speed_up = float(default_time[key])/float(row[9])
diff = round(((float(row[9]) - float(default_time[key]))/float(default_time[key]))*100,2)
if diff>0:
bad+=1
ext = " % more time of default solver time"
else:
good+=1
ext = " % less time of default solver time"
diff1 = str(diff) + ext
writer.writerow(row + [default_time[key]] + [diff] + [diff1] + [speed_up])
print("We did better than default solver in ",good,"many cases out of ",(good+bad))
print(good,bad, (good+bad))
print("Result in file -->", csvoutput)
#writing only for good class labels
with open('/Users/kanikas/Desktop/Feb22_MisPredictionAnalysis/3classlabelling/RS2/RS2TimeComparisonWithDefault433.csv', 'r+') as csvinput:
infile = csv.reader(csvinput)
csvoutput = '/Users/kanikas/Desktop/Feb22_MisPredictionAnalysis/3classlabelling/RS2TimeComparisonWithDefaultForGood.csv'
with open(csvoutput,'w') as csvout:
writer = csv.writer(csvout)
infile = csv.reader(csvinput)
writer.writerow(header + ['default_time'] + ['Time Difference'] + ['Comparison'] + ['SpeedUp'])
for row in islice(infile, 1, None):
if row[9] == "good" :
writer.writerow(row)
print("Result for only good labels in file -->", csvoutput)
|
|
import argparse
import json
import os
import sys
from . import TimeoutError, run_script
def main():
parser = argparse.ArgumentParser(description='Run tests.')
parser.add_argument('action', nargs='+', choices=['build', 'test'])
parser.add_argument('--workspace', required=True, help='Workspace to build and test.')
parser.add_argument('--scheme', required=True, help='Scheme to build and test.')
parser.add_argument('--target', help='Test target.')
parser.add_argument('--retries', type=int, default=4, help='The maximum number of times to retry a set of tests without progress.')
parser.add_argument('--timeout', type=int, default=120, help='The number of seconds to wait without output before failing a test run.')
parser.add_argument('--partition', type=int, default=0, help='The partition index to run.')
parser.add_argument('--partition-count', dest='partition_count', type=int, default=1, help='The total number of partitions.')
parser.add_argument('--devices', default='iPhone 5,9.0;iPad 2,9.0')
args = parser.parse_args()
xctool_path = '/usr/local/bin/xctool'
build_path = os.path.abspath('build')
try:
os.makedirs(build_path)
except:
pass
for action in args.action:
if action == 'build':
build_tests(xctool_path=xctool_path, workspace=args.workspace, scheme=args.scheme, build_path=build_path, timeout=args.timeout)
elif action == 'test':
if not args.target:
print_message('Target is required when testing')
exit(1)
run_tests(xctool_path=xctool_path, workspace=args.workspace, scheme=args.scheme, target=args.target, build_path=build_path, partition=args.partition, partition_count=args.partition_count, devices=parse_devices(args.devices), retries=args.retries, timeout=args.timeout)
def print_message(message):
message = 'xctool-runner: ' + message
print '=' * len(message)
print message
print '=' * len(message)
sys.stdout.flush()
def parse_devices(string):
devices = []
for name, version in [device_spec.split(',', 1) for device_spec in string.split(';')]:
devices.append(dict(
destination='platform=iOS Simulator,OS={version},name={name}'.format(version=version, name=name),
description='{name} / iOS {version}'.format(version=version, name=name),
name=name,
version=version,
))
return devices
def build_tests(xctool_path, workspace, scheme, build_path, timeout):
print_message('Building tests')
try:
script = '{xctool_path} -workspace "{workspace}" -scheme "{scheme}" -sdk iphonesimulator CONFIGURATION_BUILD_DIR="{build_path}" -derivedDataPath="{build_path}" build-tests -reporter pretty'.format(
xctool_path=xctool_path,
workspace=workspace,
scheme=scheme,
build_path=build_path,
)
script_result, _ = run_script(script, timeout)
if script_result != 0:
print_message('Failed to build tests')
exit(1)
except TimeoutError:
print_message('Timed out building tests')
exit(1)
def get_all_tests(xctool_path, workspace, scheme, build_path, target, timeout):
print_message('Listing tests')
stream_json_path = os.path.join(build_path, 'stream.json')
try:
script = '{xctool_path} -workspace "{workspace}" -scheme "{scheme}" -sdk iphonesimulator CONFIGURATION_BUILD_DIR="{build_path}" -derivedDataPath="{build_path}" run-tests -listTestsOnly -only {target} -reporter pretty -reporter json-stream:{stream_json_path}'.format(
xctool_path=xctool_path,
workspace=workspace,
scheme=scheme,
build_path=build_path,
target=target,
stream_json_path=stream_json_path,
)
script_result, _ = run_script(script, timeout)
if script_result != 0:
print_message('Failed to list tests')
exit(1)
except TimeoutError:
print_message('Timed out listing tests')
exit(1)
tests = []
with open(stream_json_path) as f:
for line in f.readlines():
event = json.loads(line)
if event['event'] == 'begin-test':
tests.append(dict(
class_name=event['className'],
method_name=event['methodName'],
))
return tests
def get_partitions(elements, count):
partitions = []
division = float(len(elements)) / float(count)
for i in xrange(0, count):
start = int(round(division * float(i)))
end = int(round(division * float(i + 1)))
partition = elements[start:end]
partitions.append(partition)
return partitions
def run_tests(xctool_path, workspace, scheme, build_path, target, partition, partition_count, devices, retries, timeout):
tests = get_all_tests(xctool_path=xctool_path, workspace=workspace, scheme=scheme, build_path=build_path, target=target, timeout=timeout)
print_message('Got list of tests')
partitions = get_partitions(tests, partition_count)
partitioned_tests = partitions[partition]
for test in tests:
marker = '>' if test in partitioned_tests else ' '
print '\t{marker} {class_name}.{method_name}'.format(marker=marker, class_name=test['class_name'], method_name=test['method_name'])
for device in devices:
attempt = 1
remaining_tests = partitioned_tests
while remaining_tests and attempt <= retries + 1:
attempt_description = 'attempt {attempt}'.format(attempt=attempt)
print_message('Running {test_count} test(s) on {device_description} ({attempt_description})'.format(test_count=len(remaining_tests), device_description=device['description'], attempt_description=attempt_description))
for test in remaining_tests:
print '\t> {class_name}.{method_name}'.format(class_name=test['class_name'], method_name=test['method_name'])
stream_json_path = os.path.join(build_path, 'stream.json')
try:
os.remove(stream_json_path)
except:
pass
try:
script = '{xctool_path} -workspace "{workspace}" -scheme "{scheme}" -sdk iphonesimulator -destination "{destination}" CONFIGURATION_BUILD_DIR="{build_path}" -derivedDataPath="{build_path}" run-tests -freshSimulator -resetSimulator -only {target} -reporter pretty -reporter json-stream:{stream_json_path}'.format(
xctool_path=xctool_path,
workspace=workspace,
scheme=scheme,
destination=device['destination'],
build_path=build_path,
target='{target}:{tests}'.format(target=target, tests=','.join(['{}/{}'.format(test['class_name'], test['method_name']) for test in remaining_tests])),
stream_json_path=stream_json_path,
)
run_script(script, timeout)
except TimeoutError:
print_message('Timed out running tests')
failed_tests = list(remaining_tests)
with open(stream_json_path) as f:
for line in f.readlines():
event = json.loads(line)
if event['event'] == 'end-test' and event['succeeded'] == True:
failed_tests.remove(dict(
class_name=event['className'],
method_name=event['methodName'],
))
if failed_tests:
print_message('{failure_count} of {test_count} test(s) FAILED on {device_description} ({attempt_description})'.format(failure_count=len(failed_tests), test_count=len(remaining_tests), device_description=device['description'], attempt_description=attempt_description))
if len(failed_tests) < len(remaining_tests):
attempt = 1
else:
attempt += 1
remaining_tests = failed_tests
if remaining_tests:
print_message('Tests FAILED on {device_description} too many times without progress'.format(device_description=device['description']))
exit(1)
print_message('Tests PASSED on {device_description}'.format(device_description=device['description']))
print_message('All tests PASSED on all devices')
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import re
import sys
from ifmap_global import CamelCase, getGoLangType
class GoLangApiGenerator(object):
def __init__(self, parser, type_map, identifiers, metadata):
self._parser = parser
self._type_map = type_map
self._identifier_map = identifiers
self._metadata_map = metadata
self._top_level_map = {}
self._type_count = {}
def _GenerateTypeMap(self, dirname):
file = self._parser.makeFile(os.path.join(dirname, 'types.go'))
decl = """
package types
import (
"reflect"
"github.com/Juniper/contrail-go-api"
)
var (
TypeMap = map[string]reflect.Type {
"""
file.write(decl)
for ident in self._identifier_map.values():
decl = '\t\t"%s": reflect.TypeOf(%s{}),\n' % \
(ident.getName(), ident.getCppName())
file.write(decl)
decl = """
}
)
func init() {
contrail.RegisterTypeMap(TypeMap)
}
"""
file.write(decl)
def _GenerateObject(self, ident, filename):
""" Generate the class corresponding to an IF-MAP Identifier
defined in the schema.
"""
file = self._parser.makeFile(filename)
header = """//
// Automatically generated. DO NOT EDIT.
//
package types
import (
"encoding/json"
"github.com/Juniper/contrail-go-api"
)
"""
file.write(header)
self._GenerateConstFlags(ident, file)
self._GenerateObjectStruct(ident, file)
self._GenerateGenericMethods(ident, file)
self._GeneratePropertyMethods(ident, file)
self._GenerateChildrenMethods(ident, file)
self._GenerateRefsMethods(ident, file)
self._GenerateBackRefsMethods(ident, file)
self._GenerateMarshalJSON(ident, file)
self._GenerateUnmarshalJSON(ident, file)
self._GenerateUpdate(ident, file)
self._GenerateUpdateReferences(ident, file)
self._GenerateClientAuxMethods(ident, file)
# end _GenerateObject
def _GenerateStructType(self, ctype, filename):
file = self._parser.makeFile(filename)
header = """//
// Automatically generated. DO NOT EDIT.
//
package types
"""
file.write(header)
self._GenerateCType(ctype, file)
# end _GenerateStructType
def _GenerateCType(self, ctype, file):
for deptype in ctype.getDependentTypes():
if deptype.getName() in self._top_level_map:
continue
self._GenerateCType(deptype, file)
decl = """
type %(camel)s struct {
""" % {'camel': ctype.getName()}
file.write(decl)
for member in ctype.getDataMembers():
camel = CamelCase(member.membername)
ptrType = False
if member.isComplex:
mtype = member.xsd_object.getType()
if not member.isSequence:
ptrType = True
else:
mtype = getGoLangType(member.xsd_object.getType())
if member.isSequence:
mtype = '[]' + mtype
decl = '\t%s %s%s `json:"%s,omitempty"`\n' % \
(camel, '*' if ptrType else '', mtype, member.membername)
file.write(decl)
file.write('}\n')
# Generate methods (add/delete/clear/set) for sequence fields
for member in ctype.getDataMembers():
if not member.isSequence:
continue
membertype = member.xsd_object.getType()
if not member.isComplex:
membertype = getGoLangType(membertype)
decl = """
func (obj *%(typecamel)s) Add%(fieldcamel)s(value %(ptr)s%(fieldtype)s) {
obj.%(member)s = append(obj.%(member)s, %(ptr)svalue)
}
""" \
% {'typecamel': ctype.getName(),
'fieldcamel': CamelCase(member.membername),
'fieldtype': membertype,
'ptr': '*' if member.isComplex else '',
'member': CamelCase(member.membername),
}
file.write(decl)
# end _GenerateCType
def _ExamineInnerTypes(self, inner_type_map, top_level, ctype):
""" Examine all the dependent types of a given top_level type
(recursivly) in order to determine which types are referred to
more than once.
The ones that are get promoted to top-level.
"""
for deptype in ctype.getDependentTypes():
mtype = deptype.getName()
if mtype in inner_type_map:
xset = inner_type_map[mtype]
if top_level not in xset:
xset.append(top_level)
else:
inner_type_map[mtype] = [top_level]
self._ExamineInnerTypes(inner_type_map, top_level, deptype)
def _PromoteInnerTypes(self):
inner_type_map = {}
for ctype in self._top_level_map.values():
self._ExamineInnerTypes(inner_type_map, ctype, ctype)
while True:
promoted = []
for itype, typeset in inner_type_map.iteritems():
if len(typeset) == 1:
continue
# print "promote %s" % itype
# for typ in typeset:
# print " %s" % typ.getName()
self._top_level_map[itype] = self._type_map[itype]
promoted.append(itype)
if len(promoted) == 0:
break
for itype in promoted:
del inner_type_map[itype]
ctype = self._type_map[itype]
self._ExamineInnerTypes(inner_type_map, ctype, ctype)
def _IdentifierLinks(self, ident):
""" Returns the list of all the links (children, refs, back_refs)
of a specific identifier.
"""
fields = []
for link_info in ident.getLinksInfo():
if ident.isLinkRef(link_info):
suffix = '_refs'
elif ident.isLinkHas(link_info):
suffix = 's'
else:
suffix = '_refs'
link_to = ident.getLinkTo(link_info)
fields.append(link_to.getCIdentifierName() + suffix)
for back_link in ident.getBackLinksInfo():
link_from = ident.getBackLinkFrom(back_link)
fields.append(link_from.getCIdentifierName() + '_back_refs')
return fields
def _GenerateConstFlags(self, ident, file):
""" Emit a const declaration with a flag per struct field used to
record which fields have been modified.
"""
file.write("\nconst (")
first = True
fields = [prop.getCIdentifierName() for prop in ident.getProperties()]
fields.extend(self._IdentifierLinks(ident))
for field in fields:
file.write("\n\t%s_%s" % (ident.getCIdentifierName(), field))
if first:
file.write(" uint64 = 1 << iota")
first = False
file.write("\n)\n")
# end _GenerateConstFlags
def _GenerateObjectStruct(self, ident, file):
""" Generate the golang struct type definition for an Identifier.
"""
decl = """
type %(camel)s struct {
contrail.ObjectBase
""" % {"camel": ident.getCppName()}
file.write(decl)
for prop in ident.getProperties():
decl = '\t%s %s\n' % \
(prop.getCIdentifierName(), prop.getGoLangTypename())
file.write(decl)
ctype = prop.getCType()
if ctype:
ctypename = ctype.getName()
self._top_level_map[ctypename] = self._type_map[ctypename]
for link_info in ident.getLinksInfo():
if ident.isLinkHas(link_info):
child = ident.getLinkTo(link_info)
decl = '\t%ss contrail.ReferenceList\n' % \
child.getCIdentifierName()
file.write(decl)
else:
link_to = ident.getLinkTo(link_info)
decl = '\t%s_refs contrail.ReferenceList\n' % \
link_to.getCIdentifierName()
file.write(decl)
datatype = self._getAttrType(ident, link_info)
if datatype:
self._top_level_map[datatype] = self._type_map[datatype]
for back_link in ident.getBackLinksInfo():
link_from = ident.getBackLinkFrom(back_link)
decl = '\t%s_back_refs contrail.ReferenceList\n' % \
link_from.getCIdentifierName()
file.write(decl)
decl = """ valid uint64
modified uint64
originalMap map[string]contrail.ReferenceList
}
"""
file.write(decl)
# end _GenerateObjectStruct
def _GenerateGenericMethods(self, ident, file):
""" Methods that do not iterate through the Identifier's fields.
"""
parent_fqn = ""
parent_type = ""
parents = ident.getParents()
if parents:
(parent, meta) = parents[0]
quoted_list = map(lambda x: '"%s"' % x, parent.getDefaultFQName())
parent_fqn = ', '.join(quoted_list)
parent_type = parent.getName()
decl = """
func (obj *%(camel)s) GetType() string {
return "%(typename)s"
}
func (obj *%(camel)s) GetDefaultParent() []string {
name := []string{%(parent_fqn)s}
return name
}
func (obj *%(camel)s) GetDefaultParentType() string {
return "%(parent_type)s"
}
func (obj *%(camel)s) SetName(name string) {
obj.VSetName(obj, name)
}
func (obj *%(camel)s) SetParent(parent contrail.IObject) {
obj.VSetParent(obj, parent)
}
func (obj *%(camel)s) addChange(
name string, refList contrail.ReferenceList) {
if obj.originalMap == nil {
obj.originalMap = make(map[string]contrail.ReferenceList)
}
var refCopy contrail.ReferenceList
copy(refCopy, refList)
obj.originalMap[name] = refCopy
}
func (obj *%(camel)s) UpdateDone() {
obj.modified = 0
obj.originalMap = nil
}
""" \
% {"camel": ident.getCppName(),
"typename": ident.getName(),
"parent_fqn": parent_fqn,
"parent_type": parent_type
}
file.write(decl)
# _GenerateGenericMethods
def _GeneratePropertyMethods(self, ident, file):
for prop in ident.getProperties():
decl = """
func (obj *%(typecamel)s) Get%(fieldcamel)s() %(fieldtype)s {
return obj.%(fieldid)s
}
func (obj *%(typecamel)s) Set%(fieldcamel)s(value %(ptr)s%(fieldtype)s) {
obj.%(fieldid)s = %(ptr)svalue
obj.modified |= %(typeid)s_%(fieldid)s
}
""" \
% {'typecamel': ident.getCppName(),
'typeid': ident.getCIdentifierName(),
'fieldcamel': prop.getCppName(),
'fieldid': prop.getCIdentifierName(),
'fieldtype': prop.getGoLangTypename(),
'ptr': '*' if prop.getCType() else ''
}
file.write(decl)
# end _GeneratePropertyMethods
def _GenerateChildrenMethods(self, ident, file):
for link_info in ident.getLinksInfo():
if not ident.isLinkHas(link_info):
continue
child = ident.getLinkTo(link_info)
self._GenerateReferenceRead(ident, child, 's', file)
self._GenerateReferenceAccessor(ident, child, "s", file)
# end _GenerateChildrenMethods
def _GenerateRefsMethods(self, ident, file):
for link_info in ident.getLinksInfo():
if not ident.isLinkRef(link_info):
continue
link_to = ident.getLinkTo(link_info)
self._GenerateReferenceRead(ident, link_to, '_refs', file)
self._GenerateReferenceAccessor(ident, link_to, '_refs', file)
self._GenerateReferenceModifiers(ident, link_info, file)
# end _GenerateRefsMethods
def _GenerateBackRefsMethods(self, ident, file):
for back_link in ident.getBackLinksInfo():
link_from = ident.getBackLinkFrom(back_link)
self._GenerateReferenceRead(ident, link_from, '_back_refs', file)
self._GenerateReferenceAccessor(ident, link_from, '_back_refs',
file)
# end _GenerateBackRefsMethods
def _MethodSuffix(self, suffix):
expr = re.compile(r'_([a-z])')
return expr.sub(lambda x: x.group(1).upper(), suffix)
def _GenerateReferenceRead(self, ident, ref, suffix, file):
decl = """
func (obj *%(typecamel)s) read%(fieldcamel)s%(methodsuffix)s() error {
if !obj.IsTransient() &&
(obj.valid & %(typeid)s_%(fieldid)s%(suffix)s == 0) {
err := obj.GetField(obj, "%(fieldid)s%(suffix)s")
if err != nil {
return err
}
}
return nil
}
""" \
% {'typecamel': ident.getCppName(),
'fieldcamel': ref.getCppName(),
'typeid': ident.getCIdentifierName(),
'fieldid': ref.getCIdentifierName(),
'methodsuffix': self._MethodSuffix(suffix),
'suffix': suffix
}
file.write(decl)
# end _GenerateReferenceRead
def _GenerateReferenceAccessor(self, ident, ref, suffix, file):
decl = """
func (obj *%(typecamel)s) Get%(fieldcamel)s%(methodsuffix)s() (
contrail.ReferenceList, error) {
err := obj.read%(fieldcamel)s%(methodsuffix)s()
if err != nil {
return nil, err
}
return obj.%(fieldid)s%(suffix)s, nil
}
""" \
% {'typecamel': ident.getCppName(),
'fieldcamel': ref.getCppName(),
'fieldid': ref.getCIdentifierName(),
'methodsuffix': self._MethodSuffix(suffix),
'suffix': suffix,
}
file.write(decl)
# end _GenerateReferenceAccessor
def _getAttrType(self, ident, link_info):
xlink = ident.getLink(link_info)
if xlink.getXsdType():
ctype = xlink.getCType()
if ctype is not None:
return ctype.getName()
return None
def _GenerateReferenceModifiers(self, ident, link_info, file):
""" Generate add/delete/clear and set methods.
"""
datatype = self._getAttrType(ident, link_info)
link_to = ident.getLinkTo(link_info)
decl = """
func (obj *%(typecamel)s) Add%(fieldcamel)s(
rhs *%(fieldcamel)s%(datatype)s) error {
err := obj.read%(fieldcamel)sRefs()
if err != nil {
return err
}
if obj.modified & %(typeid)s_%(fieldid)s_refs == 0 {
obj.addChange("%(fieldname)s", obj.%(fieldid)s_refs)
}
ref := contrail.Reference {
rhs.GetFQName(), rhs.GetUuid(), rhs.GetHref(), %(data)s}
obj.%(fieldid)s_refs = append(obj.%(fieldid)s_refs, ref)
obj.modified |= %(typeid)s_%(fieldid)s_refs
return nil
}
func (obj *%(typecamel)s) Delete%(fieldcamel)s(uuid string) error {
err := obj.read%(fieldcamel)sRefs()
if err != nil {
return err
}
if obj.modified & %(typeid)s_%(fieldid)s_refs == 0 {
obj.addChange("%(fieldname)s", obj.%(fieldid)s_refs)
}
for i, ref := range obj.%(fieldid)s_refs {
if ref.Uuid == uuid {
obj.%(fieldid)s_refs = append(
obj.%(fieldid)s_refs[:i],
obj.%(fieldid)s_refs[i+1:]...)
break
}
}
obj.modified |= %(typeid)s_%(fieldid)s_refs
return nil
}
func (obj *%(typecamel)s) Clear%(fieldcamel)s() {
if obj.valid & %(typeid)s_%(fieldid)s_refs != 0 {
obj.addChange("%(fieldname)s", obj.%(fieldid)s_refs)
} else {
obj.addChange("%(fieldname)s", contrail.ReferenceList{})
}
obj.%(fieldid)s_refs = make([]contrail.Reference, 0)
obj.valid |= %(typeid)s_%(fieldid)s_refs
obj.modified |= %(typeid)s_%(fieldid)s_refs
}
func (obj *%(typecamel)s) Set%(fieldcamel)sList(
refList []contrail.ReferencePair) {
obj.Clear%(fieldcamel)s()
obj.%(fieldid)s_refs = make([]contrail.Reference, len(refList))
for i, pair := range refList {
obj.%(fieldid)s_refs[i] = contrail.Reference {
pair.Object.GetFQName(),
pair.Object.GetUuid(),
pair.Object.GetHref(),
pair.Attribute,
}
}
}
""" \
% {'typecamel': ident.getCppName(),
'typeid': ident.getCIdentifierName(),
'fieldcamel': link_to.getCppName(),
'fieldid': link_to.getCIdentifierName(),
'fieldname': link_to.getName(),
'datatype': ', data %s' % datatype if datatype else '',
'data': 'data' if datatype else 'nil',
}
file.write(decl)
# end _GenerateReferenceModifiers
def _GenerateMarshalJSON(self, ident, file):
decl = """
func (obj *%(camel)s) MarshalJSON() ([]byte, error) {
msg := map[string]*json.RawMessage {
}
err := obj.MarshalCommon(msg)
if err != nil {
return nil, err
}
""" % {'camel': ident.getCppName()}
file.write(decl)
for prop in ident.getProperties():
decl = """
if obj.modified & %(typeid)s_%(fieldid)s != 0 {
var value json.RawMessage
value, err := json.Marshal(&obj.%(fieldid)s)
if err != nil {
return nil, err
}
msg["%(fieldid)s"] = &value
}
""" \
% {'typeid': ident.getCIdentifierName(),
'fieldid': prop.getCIdentifierName()}
file.write(decl)
for link_info in ident.getLinksInfo():
if not ident.isLinkRef(link_info):
continue
link_to = ident.getLinkTo(link_info)
decl = """
if len(obj.%(fieldid)s_refs) > 0 {
var value json.RawMessage
value, err := json.Marshal(&obj.%(fieldid)s_refs)
if err != nil {
return nil, err
}
msg["%(fieldid)s_refs"] = &value
}
""" % {'fieldid': link_to.getCIdentifierName()}
file.write(decl)
decl = """
return json.Marshal(msg)
}
"""
file.write(decl)
# end _GenerateMarshalJSON
def _GenerateUnmarshalJSON(self, ident, file):
decl = """
func (obj *%(camel)s) UnmarshalJSON(body []byte) error {
var m map[string]json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
err = obj.UnmarshalCommon(m)
if err != nil {
return err
}
for key, value := range m {
switch key {""" % {'camel': ident.getCppName()}
file.write(decl)
fields = [prop.getCIdentifierName() for prop in ident.getProperties()]
typedrefs = []
for link_info in ident.getLinksInfo():
if ident.isLinkRef(link_info):
suffix = '_refs'
elif ident.isLinkHas(link_info):
suffix = 's'
else:
suffix = '_refs'
link_to = ident.getLinkTo(link_info)
name = link_to.getCIdentifierName() + suffix
attrtype = self._getAttrType(ident, link_info)
if attrtype:
typedrefs.append((name, attrtype))
else:
fields.append(name)
for back_link in ident.getBackLinksInfo():
link_from = ident.getBackLinkFrom(back_link)
name = link_from.getCIdentifierName() + '_back_refs'
attrtype = self._getAttrType(ident, back_link)
if attrtype:
typedrefs.append((name, attrtype))
else:
fields.append(name)
for field in fields:
decl = """
case "%(field)s":
err = json.Unmarshal(value, &obj.%(field)s)
if err == nil {
obj.valid |= %(typeid)s_%(field)s
}
break""" % {'typeid': ident.getCIdentifierName(),
'field': field}
file.write(decl)
for field, attrtype in typedrefs:
decl = """
case "%(field)s": {
type ReferenceElement struct {
To []string
Uuid string
Href string
Attr %(typename)s
}
var array []ReferenceElement
err = json.Unmarshal(value, &array)
if err != nil {
break
}
obj.valid |= %(typeid)s_%(field)s
obj.%(field)s = make(contrail.ReferenceList, 0)
for _, element := range array {
ref := contrail.Reference {
element.To,
element.Uuid,
element.Href,
element.Attr,
}
obj.%(field)s = append(obj.%(field)s, ref)
}
break
}""" % {'typeid': ident.getCIdentifierName(),
'field': field, 'typename': attrtype}
file.write(decl)
decl = """
}
if err != nil {
return err
}
}
return nil
}
"""
file.write(decl)
# end _GenerateUnmarshalJSON
def _GenerateUpdate(self, ident, file):
"""
"""
decl = """
func (obj *%(camel)s) UpdateObject() ([]byte, error) {
msg := map[string]*json.RawMessage {
}
err := obj.MarshalId(msg)
if err != nil {
return nil, err
}
""" % {'camel': ident.getCppName()}
file.write(decl)
for prop in ident.getProperties():
decl = """
if obj.modified & %(typeid)s_%(fieldid)s != 0 {
var value json.RawMessage
value, err := json.Marshal(&obj.%(fieldid)s)
if err != nil {
return nil, err
}
msg["%(fieldid)s"] = &value
}
""" \
% {'typeid': ident.getCIdentifierName(),
'fieldid': prop.getCIdentifierName()}
file.write(decl)
for link_info in ident.getLinksInfo():
if not ident.isLinkRef(link_info):
continue
link_to = ident.getLinkTo(link_info)
decl = """
if obj.modified & %(typeid)s_%(fieldid)s_refs != 0 {
if len(obj.%(fieldid)s_refs) == 0 {
var value json.RawMessage
value, err := json.Marshal(
make([]contrail.Reference, 0))
if err != nil {
return nil, err
}
msg["%(fieldid)s_refs"] = &value
} else {
prev := obj.originalMap["%(fieldname)s"]
if len(prev) == 0 {
var value json.RawMessage
value, err := json.Marshal(
&obj.%(fieldid)s_refs)
if err != nil {
return nil, err
}
msg["%(fieldid)s_refs"] = &value
}
}
}
""" \
% {'typeid': ident.getCIdentifierName(),
'fieldid': link_to.getCIdentifierName(),
'fieldname': link_to.getName()}
file.write(decl)
decl = """
return json.Marshal(msg)
}
"""
file.write(decl)
# end _GenerateUpdate
def _GenerateUpdateReferences(self, ident, file):
""" Method that triggers the generation of ref-update requests.
For any reference list marked as modified, generate the delta
between current and original data via ObjectBase.UpdateReference.
"""
decl = """
func (obj *%(camel)s) UpdateReferences() error {
""" % {'camel': ident.getCppName()}
file.write(decl)
for link_info in ident.getLinksInfo():
if not ident.isLinkRef(link_info):
continue
link_to = ident.getLinkTo(link_info)
decl = """
if obj.modified & %(typeid)s_%(fieldid)s_refs != 0 {
err := obj.UpdateReference(
obj, "%(fieldname)s",
obj.%(fieldid)s_refs,
obj.originalMap["%(fieldname)s"])
if err != nil {
return err
}
}
""" \
% {'typeid': ident.getCIdentifierName(),
'fieldid': link_to.getCIdentifierName(),
'fieldname': link_to.getName()}
file.write(decl)
decl = """
return nil
}
"""
file.write(decl)
# end _GenerateUpdateReferences
def _GenerateClientAuxMethods(self, ident, file):
"""
ApiClient methods that return a struct type rather than an interface.
"""
decl = """
func %(camel)sByName(c contrail.ApiClient, fqn string) (*%(camel)s, error) {
obj, err := c.FindByName("%(typeid)s", fqn)
if err != nil {
return nil, err
}
return obj.(*%(camel)s), nil
}
func %(camel)sByUuid(c contrail.ApiClient, uuid string) (*%(camel)s, error) {
obj, err := c.FindByUuid("%(typeid)s", uuid)
if err != nil {
return nil, err
}
return obj.(*%(camel)s), nil
}
""" % {'camel': ident.getCppName(), 'typeid': ident.getName()}
file.write(decl)
# end _GenerateClientAuxMethods
def Generate(self, dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
elif not os.path.isdir(dirname):
print "-o option must specify directory"
sys.exit(1)
self._GenerateTypeMap(dirname)
for ident in self._identifier_map.values():
filename = os.path.join(
dirname, ident.getCIdentifierName() + ".go")
self._GenerateObject(ident, filename)
self._PromoteInnerTypes()
for ctype in self._top_level_map.values():
filename = os.path.join(
dirname, ctype.getCIdentifierName() + ".go")
self._GenerateStructType(ctype, filename)
|
|
from breathe.renderer.rst.doxygen.base import Renderer
class DoxygenTypeSubRenderer(Renderer):
def render(self):
compound_renderer = self.renderer_factory.create_renderer(self.data_object.compounddef)
return compound_renderer.render()
class CompoundDefTypeSubRenderer(Renderer):
section_titles = [
("user-defined", "User Defined"),
("public-type", "Public Type"),
("public-func", "Public Functions"),
("public-attrib", "Public Members"),
("public-slot", "Public Slot"),
("signal", "Signal"),
("dcop-func", "DCOP Function"),
("property", "Property"),
("event", "Event"),
("public-static-func", "Public Static Functons"),
("public-static-attrib", "Public Static Attributes"),
("protected-type", "Protected Types"),
("protected-func", "Protected Functions"),
("protected-attrib", "Protected Attributes"),
("protected-slot", "Protected Slots"),
("protected-static-func", "Protected Static Functions"),
("protected-static-attrib", "Protected Static Attributes"),
("package-type", "Package Types"),
("package-attrib", "Package Attributes"),
("package-static-func", "Package Static Functions"),
("package-static-attrib", "Package Static Attributes"),
("private-type", "Private Types"),
("private-func", "Private Functions"),
("private-attrib", "Private Members"),
("private-slot", "Private Slots"),
("private-static-func", "Private Static Functions"),
("private-static-attrib", "Private Static Attributes"),
("friend", "Friends"),
("related", "Related"),
("define", "Defines"),
("prototype", "Prototypes"),
("typedef", "Typedefs"),
("enum", "Enums"),
("func", "Functions"),
("var", "Variables"),
]
def extend_nodelist(self, nodelist, section, title, section_nodelists):
# Add title and contents if found
if section_nodelists.has_key(section):
nodelist.append(self.node_factory.emphasis(text=title))
nodelist.append(self.node_factory.block_quote("", *section_nodelists[section]))
def render(self):
section_nodelists = {}
# Get all sub sections
for sectiondef in self.data_object.sectiondef:
kind = sectiondef.kind
renderer = self.renderer_factory.create_renderer(sectiondef)
subnodes = renderer.render()
section_nodelists[kind] = subnodes
nodelist = []
if self.data_object.briefdescription:
renderer = self.renderer_factory.create_renderer(self.data_object.briefdescription)
nodelist.append(self.node_factory.paragraph("", "", *renderer.render()))
if self.data_object.detaileddescription:
renderer = self.renderer_factory.create_renderer(self.data_object.detaileddescription)
nodelist.append(self.node_factory.paragraph("", "", *renderer.render()))
# Order the results in an appropriate manner
for entry in self.section_titles:
self.extend_nodelist(nodelist, entry[0], entry[1], section_nodelists)
self.extend_nodelist(nodelist, "", "", section_nodelists)
return [self.node_factory.block_quote("", *nodelist)]
class SectionDefTypeSubRenderer(Renderer):
def render(self):
defs = []
# Get all the memberdef info
for memberdef in self.data_object.memberdef:
renderer = self.renderer_factory.create_renderer(memberdef)
defs.extend(renderer.render())
if defs:
return [self.node_factory.definition_list("", *defs)]
# Return with information about which section this is
return []
class MemberDefTypeSubRenderer(Renderer):
def render(self):
kind = []
# Variable type or function return type
if self.data_object.type_:
renderer = self.renderer_factory.create_renderer(self.data_object.type_)
kind = renderer.render()
name = self.node_factory.strong(text=self.data_object.name)
args = []
args.extend(kind)
args.extend([self.node_factory.Text(" "), name])
# This test should be done in the RendererFactory
if self.data_object.kind == "function":
# Get the function arguments
args.append(self.node_factory.Text("("))
for i, parameter in enumerate(self.data_object.param):
if i: args.append(self.node_factory.Text(", "))
renderer = self.renderer_factory.create_renderer(parameter)
args.extend(renderer.render())
args.append(self.node_factory.Text(")"))
term = self.node_factory.term("","", *args)
description_nodes = []
if self.data_object.briefdescription:
renderer = self.renderer_factory.create_renderer(self.data_object.briefdescription)
description_nodes.append(self.node_factory.paragraph("", "", *renderer.render()))
if self.data_object.detaileddescription:
renderer = self.renderer_factory.create_renderer(self.data_object.detaileddescription)
description_nodes.append(self.node_factory.paragraph( "", "", *renderer.render()))
definition = self.node_factory.definition("", *description_nodes)
refid = "%s%s" % (self.project_info.name(), self.data_object.id)
target = self.node_factory.target(refid=refid, ids=[refid], names=[refid])
# Tell the document about our target
try:
self.document.note_explicit_target(target)
except Exception, e:
print "Failed to register id: %s. It is probably a duplicate." % refid
# Build the list item
nodelist = [target, self.node_factory.definition_list_item("",term, definition)]
return nodelist
class DescriptionTypeSubRenderer(Renderer):
def render(self):
nodelist = []
# Get description in rst_nodes if possible
for item in self.data_object.content_:
renderer = self.renderer_factory.create_renderer(item)
nodelist.extend(renderer.render())
return nodelist
class LinkedTextTypeSubRenderer(Renderer):
def render(self):
nodelist = []
# Recursively process where possible
for i in self.data_object.content_:
renderer = self.renderer_factory.create_renderer(i)
nodelist.extend(renderer.render())
nodelist.append(self.node_factory.Text(" "))
return nodelist
class ParamTypeSubRenderer(Renderer):
def render(self):
nodelist = []
# Parameter type
if self.data_object.type_:
renderer = self.renderer_factory.create_renderer(self.data_object.type_)
nodelist.extend(renderer.render())
# Parameter name
if self.data_object.declname:
nodelist.append(self.node_factory.Text(self.data_object.declname))
if self.data_object.defname:
nodelist.append(self.node_factory.Text(self.data_object.defname))
# Default value
if self.data_object.defval:
nodelist.append(self.node_factory.Text(" = "))
renderer = self.renderer_factory.create_renderer(self.data_object.defval)
nodelist.extend(renderer.render())
return nodelist
class DocRefTextTypeSubRenderer(Renderer):
def render(self):
nodelist = []
for item in self.data_object.content_:
renderer = self.renderer_factory.create_renderer(item)
nodelist.extend(renderer.render())
refid = "%s%s" % (self.project_info.name(), self.data_object.refid)
nodelist = [
self.node_factory.pending_xref(
"",
reftype="ref",
refid=refid,
refdoc=None,
reftarget=refid,
refcaption=refid,
*nodelist
)
]
return nodelist
class DocParaTypeSubRenderer(Renderer):
def render(self):
nodelist = []
for entry in self.data_object.parameterlist:
renderer = self.renderer_factory.create_renderer(entry)
nodelist.extend(renderer.render())
for item in self.data_object.content:
renderer = self.renderer_factory.create_renderer(item)
nodelist.extend(renderer.render())
def_list_items = []
for item in self.data_object.simplesects:
renderer = self.renderer_factory.create_renderer(item)
def_list_items.extend(renderer.render())
if def_list_items:
nodelist.append(self.node_factory.definition_list("", *def_list_items))
return nodelist
class DocParamListTypeSubRenderer(Renderer):
lookup = {
"param" : "Parameters",
"exception" : "Exceptions",
}
def render(self):
nodelist = []
for entry in self.data_object.parameteritem:
renderer = self.renderer_factory.create_renderer(entry)
nodelist.extend(renderer.render())
name = self.lookup[self.data_object.kind]
title = self.node_factory.emphasis("", self.node_factory.Text(name))
return [title,self.node_factory.bullet_list("", *nodelist)]
class DocParamListItemSubRenderer(Renderer):
def render(self):
nodelist = []
for entry in self.data_object.parameternamelist:
renderer = self.renderer_factory.create_renderer(entry)
nodelist.extend(renderer.render())
term = self.node_factory.literal("","", *nodelist)
separator = self.node_factory.Text(" - ")
nodelist = []
if self.data_object.parameterdescription:
renderer = self.renderer_factory.create_renderer(self.data_object.parameterdescription)
nodelist.extend(renderer.render())
return [self.node_factory.list_item("", term, separator, *nodelist)]
class DocParamNameListSubRenderer(Renderer):
def render(self):
nodelist = []
for entry in self.data_object.parametername:
renderer = self.renderer_factory.create_renderer(entry)
nodelist.extend(renderer.render())
return nodelist
class DocParamNameSubRenderer(Renderer):
def render(self):
nodelist = []
for item in self.data_object.content_:
renderer = self.renderer_factory.create_renderer(item)
nodelist.extend(renderer.render())
return nodelist
class DocSect1TypeSubRenderer(Renderer):
def render(self):
return []
class DocSimpleSectTypeSubRenderer(Renderer):
def render(self):
text = self.node_factory.Text(self.data_object.kind.capitalize())
emphasis = self.node_factory.emphasis("", text)
term = self.node_factory.term("","", emphasis)
nodelist = []
for item in self.data_object.para:
renderer = self.renderer_factory.create_renderer(item)
nodelist.append(self.node_factory.paragraph("", "", *renderer.render()))
definition = self.node_factory.definition("", *nodelist)
return [self.node_factory.definition_list_item("", term, definition)]
class MixedContainerRenderer(Renderer):
def render(self):
renderer = self.renderer_factory.create_renderer(self.data_object.getValue())
return renderer.render()
|
|
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .loading import *
def logsumexp(values):
biggest = np.max(values)
x = values - biggest
result = np.log(np.sum(np.exp(x))) + biggest
return result
def logdiffexp(x1, x2):
biggest = x1
xx1 = x1 - biggest
xx2 = x2 - biggest
result = np.log(np.exp(xx1) - np.exp(xx2)) + biggest
return result
def postprocess(temperature=1., numResampleLogX=1, plot=True, loaded=[], \
cut=0., save=True, zoom_in=True, compression_bias_min=1., verbose=True,\
compression_scatter=0., moreSamples=1., compression_assert=None, single_precision=False):
if len(loaded) == 0:
levels_orig = np.atleast_2d(my_loadtxt("levels.txt"))
sample_info = np.atleast_2d(my_loadtxt("sample_info.txt"))
else:
levels_orig, sample_info = loaded[0], loaded[1]
# Remove regularisation from levels_orig if we asked for it
if compression_assert is not None:
levels_orig[1:,0] = -np.cumsum(compression_assert*np.ones(levels_orig.shape[0] - 1))
cut = int(cut*sample_info.shape[0])
sample_info = sample_info[cut:, :]
if plot:
plt.figure(1)
plt.plot(sample_info[:,0], "k")
plt.xlabel("Iteration")
plt.ylabel("Level")
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(np.diff(levels_orig[:,0]), "k")
plt.ylabel("Compression")
plt.xlabel("Level")
xlim = plt.gca().get_xlim()
plt.axhline(-1., color='g')
plt.axhline(-np.log(10.), color='g', linestyle="--")
plt.ylim(ymax=0.05)
plt.subplot(2,1,2)
good = np.nonzero(levels_orig[:,4] > 0)[0]
plt.plot(levels_orig[good,3]/levels_orig[good,4], "ko-")
plt.xlim(xlim)
plt.ylim([0., 1.])
plt.xlabel("Level")
plt.ylabel("MH Acceptance")
# Convert to lists of tuples
logl_levels = [(levels_orig[i,1], levels_orig[i, 2]) for i in range(0, levels_orig.shape[0])] # logl, tiebreaker
logl_samples = [(sample_info[i, 1], sample_info[i, 2], i) for i in range(0, sample_info.shape[0])] # logl, tiebreaker, id
logx_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logp_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logP_samples = np.zeros((sample_info.shape[0], numResampleLogX))
P_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logz_estimates = np.zeros((numResampleLogX, 1))
H_estimates = np.zeros((numResampleLogX, 1))
# Find sandwiching level for each sample
sandwich = sample_info[:,0].copy().astype('int')
for i in range(0, sample_info.shape[0]):
while sandwich[i] < levels_orig.shape[0]-1 and logl_samples[i] > logl_levels[sandwich[i] + 1]:
sandwich[i] += 1
for z in range(0, numResampleLogX):
# Make a monte carlo perturbation of the level compressions
levels = levels_orig.copy()
compressions = -np.diff(levels[:,0])
compressions *= compression_bias_min + (1. - compression_bias_min)*np.random.rand()
compressions *= np.exp(compression_scatter*np.random.randn(compressions.size))
levels[1:, 0] = -compressions
levels[:, 0] = np.cumsum(levels[:,0])
# For each level
for i in range(0, levels.shape[0]):
# Find the samples sandwiched by this level
which = np.nonzero(sandwich == i)[0]
logl_samples_thisLevel = [] # (logl, tieBreaker, ID)
for j in range(0, len(which)):
logl_samples_thisLevel.append(copy.deepcopy(logl_samples[which[j]]))
logl_samples_thisLevel = sorted(logl_samples_thisLevel)
N = len(logl_samples_thisLevel)
# Generate intermediate logx values
logx_max = levels[i, 0]
if i == levels.shape[0]-1:
logx_min = -1E300
else:
logx_min = levels[i+1, 0]
Umin = np.exp(logx_min - logx_max)
if N == 0 or numResampleLogX > 1:
U = Umin + (1. - Umin)*np.random.rand(len(which))
else:
U = Umin + (1. - Umin)*np.linspace(1./(N+1), 1. - 1./(N+1), N)
logx_samples_thisLevel = np.sort(logx_max + np.log(U))[::-1]
for j in range(0, which.size):
logx_samples[logl_samples_thisLevel[j][2]][z] = logx_samples_thisLevel[j]
if j != which.size - 1:
left = logx_samples_thisLevel[j+1]
elif i == levels.shape[0]-1:
left = -1E300
else:
left = levels[i+1][0]
if j != 0:
right = logx_samples_thisLevel[j-1]
else:
right = levels[i][0]
logp_samples[logl_samples_thisLevel[j][2]][z] = np.log(0.5) + logdiffexp(right, left)
logl = sample_info[:,1]/temperature
logp_samples[:,z] = logp_samples[:,z] - logsumexp(logp_samples[:,z])
logP_samples[:,z] = logp_samples[:,z] + logl
logz_estimates[z] = logsumexp(logP_samples[:,z])
logP_samples[:,z] -= logz_estimates[z]
P_samples[:,z] = np.exp(logP_samples[:,z])
H_estimates[z] = -logz_estimates[z] + np.sum(P_samples[:,z]*logl)
if plot:
plt.figure(3)
plt.subplot(2,1,1)
plt.hold(False)
plt.plot(logx_samples[:,z], sample_info[:,1], 'k.', label='Samples')
plt.hold(True)
plt.plot(levels[1:,0], levels[1:,1], 'g.', label='Levels')
plt.legend(numpoints=1, loc='lower left')
plt.ylabel('log(L)')
plt.title(str(z+1) + "/" + str(numResampleLogX) + ", log(Z) = " + str(logz_estimates[z][0]))
# Use all plotted logl values to set ylim
combined_logl = np.hstack([sample_info[:,1], levels[1:, 1]])
combined_logl = np.sort(combined_logl)
lower = combined_logl[int(0.1*combined_logl.size)]
upper = combined_logl[-1]
diff = upper - lower
lower -= 0.05*diff
upper += 0.05*diff
if zoom_in:
plt.ylim([lower, upper])
xlim = plt.gca().get_xlim()
if plot:
plt.subplot(2,1,2)
plt.hold(False)
plt.plot(logx_samples[:,z], P_samples[:,z], 'k.')
plt.ylabel('Posterior Weights')
plt.xlabel('log(X)')
plt.xlim(xlim)
P_samples = np.mean(P_samples, 1)
P_samples = P_samples/np.sum(P_samples)
logz_estimate = np.mean(logz_estimates)
logz_error = np.std(logz_estimates)
H_estimate = np.mean(H_estimates)
H_error = np.std(H_estimates)
ESS = np.exp(-np.sum(P_samples*np.log(P_samples+1E-300)))
errorbar1 = ""
errorbar2 = ""
if numResampleLogX > 1:
errorbar1 += " +- " + str(logz_error)
errorbar2 += " +- " + str(H_error)
if verbose:
print("log(Z) = " + str(logz_estimate) + errorbar1)
print("Information = " + str(H_estimate) + errorbar2 + " nats.")
print("Effective sample size = " + str(ESS))
# Resample to uniform weight
N = int(moreSamples*ESS)
w = P_samples
w = w/np.max(w)
rows = np.empty(N, dtype="int64")
for i in range(0, N):
while True:
which = np.random.randint(sample_info.shape[0])
if np.random.rand() <= w[which]:
break
rows[i] = which + cut
# Get header row
f = open("sample.txt", "r")
line = f.readline()
if line[0] == "#":
header = line[1:]
else:
header = ""
f.close()
sample = loadtxt_rows("sample.txt", set(rows), single_precision)
posterior_sample = None
if single_precision:
posterior_sample = np.empty((N, sample["ncol"]), dtype="float32")
else:
posterior_sample = np.empty((N, sample["ncol"]))
for i in range(0, N):
posterior_sample[i, :] = sample[rows[i]]
if save:
np.savetxt('weights.txt', w)
if single_precision:
np.savetxt("posterior_sample.txt", posterior_sample, fmt="%.7e",\
header=header)
else:
np.savetxt("posterior_sample.txt", posterior_sample,\
header=header)
if plot:
plt.show()
return [logz_estimate, H_estimate, logx_samples]
def postprocess_abc(temperature=1., numResampleLogX=1, plot=True, loaded=[], \
cut=0., save=True, zoom_in=True, compression_bias_min=1., verbose=True,\
compression_scatter=0., moreSamples=1., compression_assert=None, single_precision=False, threshold_fraction=0.8):
if len(loaded) == 0:
levels_orig = np.atleast_2d(my_loadtxt("levels.txt"))
sample_info = np.atleast_2d(my_loadtxt("sample_info.txt"))
else:
levels_orig, sample_info = loaded[0], loaded[1]
# Remove regularisation from levels_orig if we asked for it
if compression_assert is not None:
levels_orig[1:,0] = -np.cumsum(compression_assert*np.ones(levels_orig.shape[0] - 1))
cut = int(cut*sample_info.shape[0])
sample_info = sample_info[cut:, :]
if plot:
plt.figure(1)
plt.plot(sample_info[:,0], "k")
plt.xlabel("Iteration")
plt.ylabel("Level")
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(np.diff(levels_orig[:,0]), "k")
plt.ylabel("Compression")
plt.xlabel("Level")
xlim = plt.gca().get_xlim()
plt.axhline(-1., color='g')
plt.axhline(-np.log(10.), color='g', linestyle="--")
plt.ylim(ymax=0.05)
plt.subplot(2,1,2)
good = np.nonzero(levels_orig[:,4] > 0)[0]
plt.plot(levels_orig[good,3]/levels_orig[good,4], "ko-")
plt.xlim(xlim)
plt.ylim([0., 1.])
plt.xlabel("Level")
plt.ylabel("MH Acceptance")
# Convert to lists of tuples
logl_levels = [(levels_orig[i,1], levels_orig[i, 2]) for i in range(0, levels_orig.shape[0])] # logl, tiebreakercut
logl_samples = [(sample_info[i, 1], sample_info[i, 2], i) for i in range(0, sample_info.shape[0])] # logl, tiebreaker, id
logx_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logp_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logP_samples = np.zeros((sample_info.shape[0], numResampleLogX))
P_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logz_estimates = np.zeros((numResampleLogX, 1))
H_estimates = np.zeros((numResampleLogX, 1))
# Find sandwiching level for each sample
sandwich = sample_info[:,0].copy().astype('int')
for i in range(0, sample_info.shape[0]):
while sandwich[i] < levels_orig.shape[0]-1 and logl_samples[i] > logl_levels[sandwich[i] + 1]:
sandwich[i] += 1
for z in range(0, numResampleLogX):
# Make a monte carlo perturbation of the level compressions
levels = levels_orig.copy()
compressions = -np.diff(levels[:,0])
compressions *= compression_bias_min + (1. - compression_bias_min)*np.random.rand()
compressions *= np.exp(compression_scatter*np.random.randn(compressions.size))
levels[1:, 0] = -compressions
levels[:, 0] = np.cumsum(levels[:,0])
# For each level
for i in range(0, levels.shape[0]):
# Find the samples sandwiched by this level
which = np.nonzero(sandwich == i)[0]
logl_samples_thisLevel = [] # (logl, tieBreaker, ID)
for j in range(0, len(which)):
logl_samples_thisLevel.append(copy.deepcopy(logl_samples[which[j]]))
logl_samples_thisLevel = sorted(logl_samples_thisLevel)
N = len(logl_samples_thisLevel)
# Generate intermediate logx values
logx_max = levels[i, 0]
if i == levels.shape[0]-1:
logx_min = -1E300
else:
logx_min = levels[i+1, 0]
Umin = np.exp(logx_min - logx_max)
if N == 0 or numResampleLogX > 1:
U = Umin + (1. - Umin)*np.random.rand(len(which))
else:
U = Umin + (1. - Umin)*np.linspace(1./(N+1), 1. - 1./(N+1), N)
logx_samples_thisLevel = np.sort(logx_max + np.log(U))[::-1]
for j in range(0, which.size):
logx_samples[logl_samples_thisLevel[j][2]][z] = logx_samples_thisLevel[j]
if j != which.size - 1:
left = logx_samples_thisLevel[j+1]
elif i == levels.shape[0]-1:
left = -1E300
else:
left = levels[i+1][0]
if j != 0:
right = logx_samples_thisLevel[j-1]
else:
right = levels[i][0]
logp_samples[logl_samples_thisLevel[j][2]][z] = np.log(0.5) + logdiffexp(right, left)
logl = sample_info[:,1]/temperature
logp_samples[:,z] = logp_samples[:,z] - logsumexp(logp_samples[:,z])
# Define the threshold for ABC, in terms of log(X)
threshold = threshold_fraction*levels[:,0].min()
# Particles below threshold get no posterior weight
logp_samples[logx_samples > threshold] = -1E300
logP_samples[:,z] = logp_samples[:,z] + logl
logz_estimates[z] = logsumexp(logP_samples[:,z])
logP_samples[:,z] -= logz_estimates[z]
P_samples[:,z] = np.exp(logP_samples[:,z])
H_estimates[z] = -logz_estimates[z] + np.sum(P_samples[:,z]*logl)
if plot:
plt.figure(3)
plt.subplot(2,1,1)
plt.hold(False)
plt.plot(logx_samples[:,z], sample_info[:,1], 'k.', label='Samples')
plt.hold(True)
plt.plot(levels[1:,0], levels[1:,1], 'g.', label='Levels')
plt.legend(numpoints=1, loc='lower left')
plt.ylabel('log(L)')
plt.title(str(z+1) + "/" + str(numResampleLogX) + ", log(Z) = " + str(logz_estimates[z][0]))
# Use all plotted logl values to set ylim
combined_logl = np.hstack([sample_info[:,1], levels[1:, 1]])
combined_logl = np.sort(combined_logl)
lower = combined_logl[int(0.1*combined_logl.size)]
upper = combined_logl[-1]
diff = upper - lower
lower -= 0.05*diff
upper += 0.05*diff
if zoom_in:
plt.ylim([lower, upper])
xlim = plt.gca().get_xlim()
if plot:
plt.subplot(2,1,2)
plt.hold(False)
plt.plot(logx_samples[:,z], P_samples[:,z], 'k.')
plt.ylabel('Posterior Weights')
plt.xlabel('log(X)')
plt.xlim(xlim)
P_samples = np.mean(P_samples, 1)
P_samples = P_samples/np.sum(P_samples)
logz_estimate = np.mean(logz_estimates)
logz_error = np.std(logz_estimates)
H_estimate = np.mean(H_estimates)
H_error = np.std(H_estimates)
ESS = np.exp(-np.sum(P_samples*np.log(P_samples+1E-300)))
errorbar1 = ""
errorbar2 = ""
if numResampleLogX > 1:
errorbar1 += " +- " + str(logz_error)
errorbar2 += " +- " + str(H_error)
if verbose:
print("log(Z) = " + str(logz_estimate) + errorbar1)
print("Information = " + str(H_estimate) + errorbar2 + " nats.")
print("Effective sample size = " + str(ESS))
# Resample to uniform weight
N = int(moreSamples*ESS)
w = P_samples
w = w/np.max(w)
rows = np.empty(N, dtype="int64")
for i in range(0, N):
while True:
which = np.random.randint(sample_info.shape[0])
if np.random.rand() <= w[which]:
break
rows[i] = which + cut
sample = loadtxt_rows("sample.txt", set(rows), single_precision)
posterior_sample = None
if single_precision:
posterior_sample = np.empty((N, sample["ncol"]), dtype="float32")
else:
posterior_sample = np.empty((N, sample["ncol"]))
for i in range(0, N):
posterior_sample[i, :] = sample[rows[i]]
if save:
np.savetxt('weights.txt', w)
if single_precision:
np.savetxt("posterior_sample.txt", posterior_sample, fmt="%.7e")
else:
np.savetxt("posterior_sample.txt", posterior_sample)
if plot:
plt.show()
return [logz_estimate, H_estimate, logx_samples]
def diffusion_plot():
"""
Plot a nice per-particle diffusion plot.
"""
sample_info = np.atleast_2d(my_loadtxt('sample_info.txt'))
ID = sample_info[:,3].astype('int')
j = sample_info[:,0].astype('int')
ii = np.arange(1, sample_info.shape[0] + 1)
for i in range(0, ID.max() + 1):
which = np.nonzero(ID == i)[0]
plt.plot(ii[which], j[which])
plt.xlabel('Iteration')
plt.ylabel('Level')
plt.show()
def levels_plot():
"""
Plot the differences between the logl values of the levels.
"""
levels = my_loadtxt('levels.txt')
plt.plot(np.log10(np.diff(levels[:,1])), "ko-")
plt.ylim([-1, 4])
plt.axhline(0., color='g', linewidth=2)
plt.axhline(np.log10(np.log(10.)), color='g')
plt.axhline(np.log10(0.8), color='g', linestyle='--')
plt.xlabel('Level')
plt.ylabel('$\\log_{10}$(Delta log likelihood)')
plt.show()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import copy
import datetime
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import multi_cell_list
from nova import context
from nova import exception
from nova import objects
from nova import test
class TestUtils(test.NoDBTestCase):
def test_compare_simple(self):
dt1 = datetime.datetime(2015, 11, 5, 20, 30, 00)
dt2 = datetime.datetime(1955, 10, 25, 1, 21, 00)
inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456, 'key4': dt1}
inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123, 'key4': dt2}
# Equal key0, inst == inst2
ctx = multi_cell_list.RecordSortContext(['key0'], ['asc'])
self.assertEqual(0, ctx.compare_records(inst1, inst2))
# Equal key0, inst == inst2 (direction should not matter)
ctx = multi_cell_list.RecordSortContext(['key0'], ['desc'])
self.assertEqual(0, ctx.compare_records(inst1, inst2))
# Ascending by key1, inst1 < inst2
ctx = multi_cell_list.RecordSortContext(['key1'], ['asc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
# Descending by key1, inst2 < inst1
ctx = multi_cell_list.RecordSortContext(['key1'], ['desc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
# Ascending by key2, inst2 < inst1
ctx = multi_cell_list.RecordSortContext(['key2'], ['asc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
# Descending by key2, inst1 < inst2
ctx = multi_cell_list.RecordSortContext(['key2'], ['desc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
# Ascending by key4, inst1 > inst2
ctx = multi_cell_list.RecordSortContext(['key4'], ['asc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
# Descending by key4, inst1 < inst2
ctx = multi_cell_list.RecordSortContext(['key4'], ['desc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
def test_compare_multiple(self):
# key0 should not affect ordering, but key1 should
inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}
inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}
# Should be equivalent to ascending by key1
ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'asc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
# Should be equivalent to descending by key1
ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'desc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
def test_wrapper(self):
inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}
inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}
ctx = context.RequestContext()
ctx.cell_uuid = uuids.cell
# Should sort by key1
sort_ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'asc'])
iw1 = multi_cell_list.RecordWrapper(ctx, sort_ctx, inst1)
iw2 = multi_cell_list.RecordWrapper(ctx, sort_ctx, inst2)
# Check this both ways to make sure we're comparing against -1
# and not just nonzero return from cmp()
self.assertTrue(iw1 < iw2)
self.assertFalse(iw2 < iw1)
# Should sort reverse by key1
sort_ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'desc'])
iw1 = multi_cell_list.RecordWrapper(ctx, sort_ctx, inst1)
iw2 = multi_cell_list.RecordWrapper(ctx, sort_ctx, inst2)
# Check this both ways to make sure we're comparing against -1
# and not just nonzero return from cmp()
self.assertTrue(iw1 > iw2)
self.assertFalse(iw2 > iw1)
# Make sure we can tell which cell a request came from
self.assertEqual(uuids.cell, iw1.cell_uuid)
def test_wrapper_sentinels(self):
inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}
ctx = context.RequestContext()
ctx.cell_uuid = uuids.cell
sort_ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'asc'])
iw1 = multi_cell_list.RecordWrapper(ctx, sort_ctx, inst1)
# Wrappers with sentinels
iw2 = multi_cell_list.RecordWrapper(ctx, sort_ctx,
context.did_not_respond_sentinel)
iw3 = multi_cell_list.RecordWrapper(ctx, sort_ctx,
context.raised_exception_sentinel)
# NOTE(danms): The sentinel wrappers always win
self.assertTrue(iw2 < iw1)
self.assertTrue(iw3 < iw1)
self.assertFalse(iw1 < iw2)
self.assertFalse(iw1 < iw3)
# NOTE(danms): Comparing two wrappers with sentinels will always return
# True for less-than because we're just naive about always favoring the
# left hand side. This is fine for our purposes but put it here to make
# it explicit.
self.assertTrue(iw2 < iw3)
self.assertTrue(iw3 < iw2)
def test_query_wrapper_success(self):
def test(ctx, data):
for thing in data:
yield thing
self.assertEqual([1, 2, 3],
list(multi_cell_list.query_wrapper(
None, test, [1, 2, 3])))
def test_query_wrapper_timeout(self):
def test(ctx):
raise exception.CellTimeout
self.assertEqual([context.did_not_respond_sentinel],
[x._db_record for x in
multi_cell_list.query_wrapper(
mock.MagicMock(), test)])
def test_query_wrapper_fail(self):
def test(ctx):
raise test.TestingException
self.assertEqual([context.raised_exception_sentinel],
[x._db_record for x in
multi_cell_list.query_wrapper(
mock.MagicMock(), test)])
class TestListContext(multi_cell_list.RecordSortContext):
def compare_records(self, rec1, rec2):
return -1
class TestLister(multi_cell_list.CrossCellLister):
CONTEXT_CLS = TestListContext
def __init__(self, data, sort_keys, sort_dirs,
cells=None, batch_size=None):
self._data = data
self._count_by_cell = {}
super(TestLister, self).__init__(self.CONTEXT_CLS(sort_keys,
sort_dirs),
cells=cells, batch_size=batch_size)
@property
def marker_identifier(self):
return 'id'
def _method_called(self, ctx, method, arg):
self._count_by_cell.setdefault(ctx.cell_uuid, {})
self._count_by_cell[ctx.cell_uuid].setdefault(method, [])
self._count_by_cell[ctx.cell_uuid][method].append(arg)
def call_summary(self, method):
results = {
'total': 0,
'count_by_cell': [],
'limit_by_cell': [],
'total_by_cell': [],
'called_in_cell': [],
}
for i, cell in enumerate(self._count_by_cell):
if method not in self._count_by_cell[cell]:
continue
results['total'] += len(self._count_by_cell[cell][method])
# List of number of calls in each cell
results['count_by_cell'].append(
len(self._count_by_cell[cell][method]))
# List of limits used in calls to each cell
results['limit_by_cell'].append(
self._count_by_cell[cell][method])
try:
# List of total results fetched from each cell
results['total_by_cell'].append(sum(
self._count_by_cell[cell][method]))
except TypeError:
# Don't do this for non-integer args
pass
results['called_in_cell'].append(cell)
results['count_by_cell'].sort()
results['limit_by_cell'].sort()
results['total_by_cell'].sort()
results['called_in_cell'].sort()
return results
def get_marker_record(self, ctx, marker):
self._method_called(ctx, 'get_marker_record', marker)
# Always assume this came from the second cell
cell = self.cells[1]
return cell.uuid, self._data[0]
def get_marker_by_values(self, ctx, values):
self._method_called(ctx, 'get_marker_by_values', values)
return self._data[0]
def get_by_filters(self, ctx, filters, limit, marker, **kwargs):
self._method_called(ctx, 'get_by_filters', limit)
if 'batch_size' in kwargs:
count = min(kwargs['batch_size'], limit)
else:
count = limit
batch = self._data[:count]
self._data = self._data[count:]
return batch
@contextmanager
def target_cell_cheater(context, target_cell):
# In order to help us do accounting, we need to mimic the real
# behavior where at least cell_uuid gets set on the context, which
# doesn't happen in the simple test fixture.
context = copy.deepcopy(context)
context.cell_uuid = target_cell.uuid
yield context
@mock.patch('nova.context.target_cell', new=target_cell_cheater)
class TestBatching(test.NoDBTestCase):
def setUp(self):
super(TestBatching, self).setUp()
self._data = [{'id': 'foo-%i' % i}
for i in range(0, 1000)]
self._cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i),
name='cell%i' % i)
for i in range(0, 10)]
def test_batches_not_needed(self):
lister = TestLister(self._data, [], [],
cells=self._cells, batch_size=10)
ctx = context.RequestContext()
res = list(lister.get_records_sorted(ctx, {}, 5, None))
self.assertEqual(5, len(res))
summary = lister.call_summary('get_by_filters')
# We only needed one batch per cell to hit the total,
# so we should have the same number of calls as cells
self.assertEqual(len(self._cells), summary['total'])
# One call per cell, hitting all cells
self.assertEqual(len(self._cells), len(summary['count_by_cell']))
self.assertTrue(all([
cell_count == 1 for cell_count in summary['count_by_cell']]))
def test_batches(self):
lister = TestLister(self._data, [], [],
cells=self._cells, batch_size=10)
ctx = context.RequestContext()
res = list(lister.get_records_sorted(ctx, {}, 500, None))
self.assertEqual(500, len(res))
summary = lister.call_summary('get_by_filters')
# Since we got everything from one cell (due to how things are sorting)
# we should have made 500 / 10 calls to one cell, and 1 call to
# the rest
calls_expected = [1 for cell in self._cells[1:]] + [500 / 10]
self.assertEqual(calls_expected, summary['count_by_cell'])
# Since we got everything from one cell (due to how things are sorting)
# we should have received 500 from one cell and 10 from the rest
count_expected = [10 for cell in self._cells[1:]] + [500]
self.assertEqual(count_expected, summary['total_by_cell'])
# Since we got everything from one cell (due to how things are sorting)
# we should have a bunch of calls for batches of 10, one each for
# every cell except the one that served the bulk of the requests which
# should have 500 / 10 batches of 10.
limit_expected = ([[10] for cell in self._cells[1:]] +
[[10 for i in range(0, 500 // 10)]])
self.assertEqual(limit_expected, summary['limit_by_cell'])
def test_no_batches(self):
lister = TestLister(self._data, [], [],
cells=self._cells)
ctx = context.RequestContext()
res = list(lister.get_records_sorted(ctx, {}, 50, None))
self.assertEqual(50, len(res))
summary = lister.call_summary('get_by_filters')
# Since we used no batches we should have one call per cell
calls_expected = [1 for cell in self._cells]
self.assertEqual(calls_expected, summary['count_by_cell'])
# Since we used no batches, each cell should have returned 50 results
count_expected = [50 for cell in self._cells]
self.assertEqual(count_expected, summary['total_by_cell'])
# Since we used no batches, each cell call should be for $limit
limit_expected = [[count] for count in count_expected]
self.assertEqual(limit_expected, summary['limit_by_cell'])
class FailureListContext(multi_cell_list.RecordSortContext):
def compare_records(self, rec1, rec2):
return 0
class FailureLister(TestLister):
CONTEXT_CLS = FailureListContext
def __init__(self, *a, **k):
super(FailureLister, self).__init__(*a, **k)
self._fails = {}
def set_fails(self, cell, fails):
self._fails[cell] = fails
def get_by_filters(self, ctx, *a, **k):
try:
action = self._fails[ctx.cell_uuid].pop(0)
except (IndexError, KeyError):
action = None
if action == context.did_not_respond_sentinel:
raise exception.CellTimeout
elif action == context.raised_exception_sentinel:
raise test.TestingException
else:
return super(FailureLister, self).get_by_filters(ctx, *a, **k)
@mock.patch('nova.context.target_cell', new=target_cell_cheater)
class TestBaseClass(test.NoDBTestCase):
def test_with_failing_cells(self):
data = [{'id': 'foo-%i' % i} for i in range(0, 100)]
cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i),
name='cell%i' % i)
for i in range(0, 3)]
lister = FailureLister(data, [], [], cells=cells)
# Two of the cells will fail, one with timeout and one
# with an error
lister.set_fails(uuids.cell0, [context.did_not_respond_sentinel])
lister.set_fails(uuids.cell1, [context.raised_exception_sentinel])
ctx = context.RequestContext()
result = lister.get_records_sorted(ctx, {}, 50, None, batch_size=10)
# We should still have 50 results since there are enough from the
# good cells to fill our limit.
self.assertEqual(50, len(list(result)))
# Make sure the counts line up
self.assertEqual(1, len(lister.cells_failed))
self.assertEqual(1, len(lister.cells_timed_out))
self.assertEqual(1, len(lister.cells_responded))
def test_with_failing_middle_cells(self):
data = [{'id': 'foo-%i' % i} for i in range(0, 100)]
cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i),
name='cell%i' % i)
for i in range(0, 3)]
lister = FailureLister(data, [], [], cells=cells)
# One cell will succeed and then time out, one will fail immediately,
# and the last will always work
lister.set_fails(uuids.cell0, [None, context.did_not_respond_sentinel])
lister.set_fails(uuids.cell1, [context.raised_exception_sentinel])
ctx = context.RequestContext()
result = lister.get_records_sorted(ctx, {}, 50, None,
batch_size=5)
# We should still have 50 results since there are enough from the
# good cells to fill our limit.
self.assertEqual(50, len(list(result)))
# Make sure the counts line up
self.assertEqual(1, len(lister.cells_responded))
self.assertEqual(1, len(lister.cells_failed))
self.assertEqual(1, len(lister.cells_timed_out))
def test_marker_cell_not_requeried(self):
data = [{'id': 'foo-%i' % i} for i in range(0, 100)]
cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i),
name='cell%i' % i)
for i in range(0, 3)]
lister = TestLister(data, [], [], cells=cells)
ctx = context.RequestContext()
result = list(lister.get_records_sorted(ctx, {}, 10, None))
result = list(lister.get_records_sorted(ctx, {}, 10, result[-1]['id']))
# get_marker_record() is called untargeted and its result defines which
# cell we skip.
gmr_summary = lister.call_summary('get_marker_record')
self.assertEqual([None], gmr_summary['called_in_cell'])
# All cells other than the second one should have been called for
# a local marker
gmbv_summary = lister.call_summary('get_marker_by_values')
self.assertEqual(sorted([cell.uuid for cell in cells
if cell.uuid != uuids.cell1]),
gmbv_summary['called_in_cell'])
|
|
import os
import errno
import fnmatch
import stat
try:
from . import msg, utils
from .exc_fmt import str_e
assert msg and str_e and utils
except ImportError:
import msg
from exc_fmt import str_e
IGNORE_FILES = ['.gitignore', '.hgignore', '.flooignore']
HIDDEN_WHITELIST = ['.floo'] + IGNORE_FILES
BLACKLIST = [
'.DS_Store',
'.git',
'.svn',
'.hg',
]
# TODO: grab global git ignores:
# gitconfig_file = popen("git config -z --get core.excludesfile", "r");
DEFAULT_IGNORES = [
'#*',
'*.o',
'*.pyc',
'*~',
'extern/',
'node_modules/',
'tmp',
'vendor/',
]
MAX_FILE_SIZE = 1024 * 1024 * 5
IS_IG_IGNORED = 1
IS_IG_CHECK_CHILD = 2
def create_flooignore(path):
flooignore = os.path.join(path, '.flooignore')
# A very short race condition, but whatever.
if os.path.exists(flooignore):
return
try:
with open(flooignore, 'w') as fd:
fd.write('\n'.join(DEFAULT_IGNORES))
except Exception as e:
msg.error('Error creating default .flooignore: ', str_e(e))
def create_ignore_tree(path):
create_flooignore(path)
ig = Ignore(path)
ig.ignores['/DEFAULT/'] = BLACKLIST
ig.recurse(ig)
return ig
class Ignore(object):
def __init__(self, path, parent=None):
self.parent = parent
self.size = 0
self.total_size = 0
self.children = {}
self.files = []
self.ignores = {
'/TOO_BIG/': []
}
self.path = utils.unfuck_path(path)
def recurse(self, root):
try:
paths = os.listdir(self.path)
except OSError as e:
if e.errno != errno.ENOTDIR:
msg.error('Error listing path ', self.path, ': ', str_e(e))
return
except Exception as e:
msg.error('Error listing path ', self.path, ': ', str_e(e))
return
msg.debug('Initializing ignores for ', self.path)
for ignore_file in IGNORE_FILES:
try:
self.load(ignore_file)
except Exception:
pass
for p in paths:
if p == '.' or p == '..':
continue
if p in BLACKLIST:
msg.log('Ignoring blacklisted file ', p)
continue
p_path = os.path.join(self.path, p)
try:
s = os.stat(p_path)
except Exception as e:
msg.error('Error stat()ing path ', p_path, ': ', str_e(e))
continue
if stat.S_ISREG(s.st_mode) and p in HIDDEN_WHITELIST:
# Don't count these whitelisted files in size
self.files.append(p_path)
continue
is_dir = stat.S_ISDIR(s.st_mode)
if root.is_ignored(p_path, is_dir, True):
continue
if is_dir:
ig = Ignore(p_path, self)
self.children[p] = ig
ig.recurse(root)
self.total_size += ig.total_size
continue
if stat.S_ISREG(s.st_mode):
if s.st_size > (MAX_FILE_SIZE):
self.ignores['/TOO_BIG/'].append(p)
msg.log(self.is_ignored_message(p_path, p, '/TOO_BIG/', False))
else:
self.size += s.st_size
self.total_size += s.st_size
self.files.append(p_path)
def load(self, ignore_file):
with open(os.path.join(self.path, ignore_file), 'r') as fd:
ignores = fd.read()
rules = []
for ignore in ignores.split('\n'):
ignore = ignore.strip()
if len(ignore) == 0:
continue
if ignore[0] == '#':
continue
if ignore == '!':
continue
msg.debug('Adding ', ignore, ' to ignore patterns')
rules.insert(0, ignore)
self.ignores[ignore_file] = rules
def get_children(self):
children = list(self.children.values())
for c in self.children.values():
children += c.get_children()
return children
def list_paths(self):
for f in self.files:
yield os.path.join(self.path, f)
for c in self.children.values():
for p in c.list_paths():
yield p
def is_ignored_message(self, rel_path, pattern, ignore_file, exclude):
path = os.path.join(self.path, rel_path)
exclude_msg = ''
if exclude:
exclude_msg = '__NOT__ '
if ignore_file == '/TOO_BIG/':
return '%s %signored because it is too big (more than %s bytes)' % (path, exclude_msg, MAX_FILE_SIZE)
return '%s %signored by pattern %s in %s' % (path, exclude_msg, pattern, os.path.join(self.path, ignore_file))
def is_ignored(self, path, is_dir=None, log=False):
if is_dir is None:
try:
s = os.stat(path)
except Exception as e:
msg.error('Error lstat()ing path ', path, ': ', str_e(e))
return True
is_dir = stat.S_ISDIR(s.st_mode)
rel_path = os.path.relpath(path, self.path).replace(os.sep, '/')
return self._is_ignored(rel_path, is_dir, log)
def _is_ignored(self, rel_path, is_dir, log):
base_path, file_name = os.path.split(rel_path)
if not is_dir and file_name in HIDDEN_WHITELIST:
return False
for ignore_file, patterns in self.ignores.items():
for pattern in patterns:
orig_pattern = pattern
exclude = False
match = False
if pattern[0] == "!":
exclude = True
pattern = pattern[1:]
if not pattern:
continue
if pattern[0] == '/':
match = fnmatch.fnmatch(rel_path, pattern[1:])
else:
if len(pattern) > 0 and pattern[-1] == '/':
if is_dir:
pattern = pattern[:-1]
if fnmatch.fnmatch(file_name, pattern):
match = True
elif fnmatch.fnmatch(rel_path, pattern):
match = True
if match:
if log:
msg.log(self.is_ignored_message(rel_path, orig_pattern, ignore_file, exclude))
if exclude:
return False
return True
split = rel_path.split("/", 1)
if len(split) != 2:
return False
name, new_path = split
ig = self.children.get(name)
if ig:
return ig._is_ignored(new_path, is_dir, log)
return False
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/traffic-engineering/tlvs/tlv/link/sub-tlvs/sub-tlv/unreserved-bandwidths/unreserved-bandwidth/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the unreserved
bandwidth of the link being described
"""
__slots__ = ("_path_helper", "_extmethods", "__priority", "__unreserved_bandwidth")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__unreserved_bandwidth = YANGDynClass(
base=RestrictedClassType(
base_type=bitarray, restriction_dict={"length": ["32"]}
),
is_leaf=True,
yang_name="unreserved-bandwidth",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:ieeefloat32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"traffic-engineering",
"tlvs",
"tlv",
"link",
"sub-tlvs",
"sub-tlv",
"unreserved-bandwidths",
"unreserved-bandwidth",
"state",
]
def _get_priority(self):
"""
Getter method for priority, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths/unreserved_bandwidth/state/priority (uint8)
YANG Description: The priority level being described
"""
return self.__priority
def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths/unreserved_bandwidth/state/priority (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
YANG Description: The priority level being described
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """priority must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..7']}), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__priority = t
if hasattr(self, "_set"):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_unreserved_bandwidth(self):
"""
Getter method for unreserved_bandwidth, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths/unreserved_bandwidth/state/unreserved_bandwidth (oc-types:ieeefloat32)
YANG Description: The unreserved bandwidth for at priority level P, where P is
equal to the priority of the current list entry. The reservable
bandwidth at priority P is equal to the sum of the reservable
bandwidth at all levels 0..P.
"""
return self.__unreserved_bandwidth
def _set_unreserved_bandwidth(self, v, load=False):
"""
Setter method for unreserved_bandwidth, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths/unreserved_bandwidth/state/unreserved_bandwidth (oc-types:ieeefloat32)
If this variable is read-only (config: false) in the
source YANG file, then _set_unreserved_bandwidth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_unreserved_bandwidth() directly.
YANG Description: The unreserved bandwidth for at priority level P, where P is
equal to the priority of the current list entry. The reservable
bandwidth at priority P is equal to the sum of the reservable
bandwidth at all levels 0..P.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=bitarray, restriction_dict={"length": ["32"]}
),
is_leaf=True,
yang_name="unreserved-bandwidth",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:ieeefloat32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """unreserved_bandwidth must be of a type compatible with oc-types:ieeefloat32""",
"defined-type": "oc-types:ieeefloat32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=bitarray, restriction_dict={'length': ['32']}), is_leaf=True, yang_name="unreserved-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:ieeefloat32', is_config=False)""",
}
)
self.__unreserved_bandwidth = t
if hasattr(self, "_set"):
self._set()
def _unset_unreserved_bandwidth(self):
self.__unreserved_bandwidth = YANGDynClass(
base=RestrictedClassType(
base_type=bitarray, restriction_dict={"length": ["32"]}
),
is_leaf=True,
yang_name="unreserved-bandwidth",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:ieeefloat32",
is_config=False,
)
priority = __builtin__.property(_get_priority)
unreserved_bandwidth = __builtin__.property(_get_unreserved_bandwidth)
_pyangbind_elements = OrderedDict(
[("priority", priority), ("unreserved_bandwidth", unreserved_bandwidth)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/traffic-engineering/tlvs/tlv/link/sub-tlvs/sub-tlv/unreserved-bandwidths/unreserved-bandwidth/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the unreserved
bandwidth of the link being described
"""
__slots__ = ("_path_helper", "_extmethods", "__priority", "__unreserved_bandwidth")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__unreserved_bandwidth = YANGDynClass(
base=RestrictedClassType(
base_type=bitarray, restriction_dict={"length": ["32"]}
),
is_leaf=True,
yang_name="unreserved-bandwidth",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:ieeefloat32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"traffic-engineering",
"tlvs",
"tlv",
"link",
"sub-tlvs",
"sub-tlv",
"unreserved-bandwidths",
"unreserved-bandwidth",
"state",
]
def _get_priority(self):
"""
Getter method for priority, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths/unreserved_bandwidth/state/priority (uint8)
YANG Description: The priority level being described
"""
return self.__priority
def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths/unreserved_bandwidth/state/priority (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
YANG Description: The priority level being described
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """priority must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..7']}), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__priority = t
if hasattr(self, "_set"):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..7"]},
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_unreserved_bandwidth(self):
"""
Getter method for unreserved_bandwidth, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths/unreserved_bandwidth/state/unreserved_bandwidth (oc-types:ieeefloat32)
YANG Description: The unreserved bandwidth for at priority level P, where P is
equal to the priority of the current list entry. The reservable
bandwidth at priority P is equal to the sum of the reservable
bandwidth at all levels 0..P.
"""
return self.__unreserved_bandwidth
def _set_unreserved_bandwidth(self, v, load=False):
"""
Setter method for unreserved_bandwidth, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/unreserved_bandwidths/unreserved_bandwidth/state/unreserved_bandwidth (oc-types:ieeefloat32)
If this variable is read-only (config: false) in the
source YANG file, then _set_unreserved_bandwidth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_unreserved_bandwidth() directly.
YANG Description: The unreserved bandwidth for at priority level P, where P is
equal to the priority of the current list entry. The reservable
bandwidth at priority P is equal to the sum of the reservable
bandwidth at all levels 0..P.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=bitarray, restriction_dict={"length": ["32"]}
),
is_leaf=True,
yang_name="unreserved-bandwidth",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:ieeefloat32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """unreserved_bandwidth must be of a type compatible with oc-types:ieeefloat32""",
"defined-type": "oc-types:ieeefloat32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=bitarray, restriction_dict={'length': ['32']}), is_leaf=True, yang_name="unreserved-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:ieeefloat32', is_config=False)""",
}
)
self.__unreserved_bandwidth = t
if hasattr(self, "_set"):
self._set()
def _unset_unreserved_bandwidth(self):
self.__unreserved_bandwidth = YANGDynClass(
base=RestrictedClassType(
base_type=bitarray, restriction_dict={"length": ["32"]}
),
is_leaf=True,
yang_name="unreserved-bandwidth",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:ieeefloat32",
is_config=False,
)
priority = __builtin__.property(_get_priority)
unreserved_bandwidth = __builtin__.property(_get_unreserved_bandwidth)
_pyangbind_elements = OrderedDict(
[("priority", priority), ("unreserved_bandwidth", unreserved_bandwidth)]
)
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A simple filesystem-backed store
"""
import errno
import logging
import os
import stat
import jsonschema
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import units
from six.moves import urllib
import glance_store
from glance_store import capabilities
from glance_store.common import utils
import glance_store.driver
from glance_store import exceptions
from glance_store.i18n import _, _LE, _LW
import glance_store.location
LOG = logging.getLogger(__name__)
_FILESYSTEM_CONFIGS = [
cfg.StrOpt('filesystem_store_datadir',
default='/var/lib/glance/images',
help="""
Directory to which the filesystem backend store writes images.
Upon start up, Glance creates the directory if it doesn't already
exist and verifies write access to the user under which
``glance-api`` runs. If the write access isn't available, a
``BadStoreConfiguration`` exception is raised and the filesystem
store may not be available for adding new images.
NOTE: This directory is used only when filesystem store is used as a
storage backend. Either ``filesystem_store_datadir`` or
``filesystem_store_datadirs`` option must be specified in
``glance-api.conf``. If both options are specified, a
``BadStoreConfiguration`` will be raised and the filesystem store
may not be available for adding new images.
Possible values:
* A valid path to a directory
Related options:
* ``filesystem_store_datadirs``
* ``filesystem_store_file_perm``
"""),
cfg.MultiStrOpt('filesystem_store_datadirs',
help="""
List of directories and their priorities to which the filesystem
backend store writes images.
The filesystem store can be configured to store images in multiple
directories as opposed to using a single directory specified by the
``filesystem_store_datadir`` configuration option. When using
multiple directories, each directory can be given an optional
priority to specify the preference order in which they should
be used. Priority is an integer that is concatenated to the
directory path with a colon where a higher value indicates higher
priority. When two directories have the same priority, the directory
with most free space is used. When no priority is specified, it
defaults to zero.
More information on configuring filesystem store with multiple store
directories can be found at
https://docs.openstack.org/glance/latest/configuration/configuring.html
NOTE: This directory is used only when filesystem store is used as a
storage backend. Either ``filesystem_store_datadir`` or
``filesystem_store_datadirs`` option must be specified in
``glance-api.conf``. If both options are specified, a
``BadStoreConfiguration`` will be raised and the filesystem store
may not be available for adding new images.
Possible values:
* List of strings of the following form:
* ``<a valid directory path>:<optional integer priority>``
Related options:
* ``filesystem_store_datadir``
* ``filesystem_store_file_perm``
"""),
cfg.StrOpt('filesystem_store_metadata_file',
help="""
Filesystem store metadata file.
The path to a file which contains the metadata to be returned with any location
associated with the filesystem store. Once this option is set, it is used for
new images created afterward only - previously existing images are not
affected.
The file must contain a valid JSON object. The object should contain the keys
``id`` and ``mountpoint``. The value for both keys should be a string.
Possible values:
* A valid path to the store metadata file
Related options:
* None
"""),
cfg.IntOpt('filesystem_store_file_perm',
default=0,
help="""
File access permissions for the image files.
Set the intended file access permissions for image data. This provides
a way to enable other services, e.g. Nova, to consume images directly
from the filesystem store. The users running the services that are
intended to be given access to could be made a member of the group
that owns the files created. Assigning a value less then or equal to
zero for this configuration option signifies that no changes be made
to the default permissions. This value will be decoded as an octal
digit.
For more information, please refer the documentation at
https://docs.openstack.org/glance/latest/configuration/configuring.html
Possible values:
* A valid file access permission
* Zero
* Any negative integer
Related options:
* None
"""),
cfg.IntOpt('filesystem_store_chunk_size',
default=64 * units.Ki,
min=1,
help="""
Chunk size, in bytes.
The chunk size used when reading or writing image files. Raising this value
may improve the throughput but it may also slightly increase the memory usage
when handling a large number of requests.
Possible Values:
* Any positive integer value
Related options:
* None
"""),
cfg.BoolOpt('filesystem_thin_provisioning',
default=False,
help="""
Enable or not thin provisioning in this backend.
This configuration option enable the feature of not really write null byte
sequences on the filesystem, the holes who can appear will automatically
be interpreted by the filesystem as null bytes, and do not really consume
your storage.
Enabling this feature will also speed up image upload and save network traffic
in addition to save space in the backend, as null bytes sequences are not
sent over the network.
Possible Values:
* True
* False
Related options:
* None
"""),
]
MULTI_FILESYSTEM_METADATA_SCHEMA = {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"mountpoint": {"type": "string"}
},
"required": ["id", "mountpoint"],
}
}
class StoreLocation(glance_store.location.StoreLocation):
"""Class describing a Filesystem URI."""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'file')
self.path = self.specs.get('path')
def get_uri(self):
return "file://%s" % self.path
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python.
"""
pieces = urllib.parse.urlparse(uri)
self.validate_schemas(uri, valid_schemas=('file://', 'filesystem://'))
self.scheme = pieces.scheme
path = (pieces.netloc + pieces.path).strip()
if path == '':
reason = _("No path specified in URI")
LOG.info(reason)
raise exceptions.BadStoreUri(message=reason)
self.path = path
class ChunkedFile(object):
"""
We send this back to the Glance API server as
something that can iterate over a large file
"""
def __init__(self, filepath, offset=0, chunk_size=4096,
partial_length=None):
self.filepath = filepath
self.chunk_size = chunk_size
self.partial_length = partial_length
self.partial = self.partial_length is not None
self.fp = open(self.filepath, 'rb')
if offset:
self.fp.seek(offset)
def __iter__(self):
"""Return an iterator over the image file."""
try:
if self.fp:
while True:
if self.partial:
size = min(self.chunk_size, self.partial_length)
else:
size = self.chunk_size
chunk = self.fp.read(size)
if chunk:
yield chunk
if self.partial:
self.partial_length -= len(chunk)
if self.partial_length <= 0:
break
else:
break
finally:
self.close()
def close(self):
"""Close the internal file pointer"""
if self.fp:
self.fp.close()
self.fp = None
class Store(glance_store.driver.Store):
_CAPABILITIES = (capabilities.BitMasks.READ_RANDOM |
capabilities.BitMasks.WRITE_ACCESS |
capabilities.BitMasks.DRIVER_REUSABLE)
OPTIONS = _FILESYSTEM_CONFIGS
FILESYSTEM_STORE_METADATA = None
def get_schemes(self):
return ('file', 'filesystem')
def _check_write_permission(self, datadir):
"""
Checks if directory created to write image files has
write permission.
:datadir is a directory path in which glance wites image files.
:raises: BadStoreConfiguration exception if datadir is read-only.
"""
if not os.access(datadir, os.W_OK):
msg = (_("Permission to write in %s denied") % datadir)
LOG.exception(msg)
raise exceptions.BadStoreConfiguration(
store_name="filesystem", reason=msg)
def _set_exec_permission(self, datadir):
"""
Set the execution permission of owner-group and/or other-users to
image directory if the image file which contained needs relevant
access permissions.
:datadir is a directory path in which glance writes image files.
"""
if self.backend_group:
fstore_perm = getattr(
self.conf, self.backend_group).filesystem_store_file_perm
else:
fstore_perm = self.conf.glance_store.filesystem_store_file_perm
if fstore_perm <= 0:
return
try:
mode = os.stat(datadir)[stat.ST_MODE]
perm = int(str(fstore_perm), 8)
if perm & stat.S_IRWXO > 0:
if not mode & stat.S_IXOTH:
# chmod o+x
mode |= stat.S_IXOTH
os.chmod(datadir, mode)
if perm & stat.S_IRWXG > 0:
if not mode & stat.S_IXGRP:
# chmod g+x
os.chmod(datadir, mode | stat.S_IXGRP)
except (IOError, OSError):
LOG.warning(_LW("Unable to set execution permission of "
"owner-group and/or other-users to datadir: %s")
% datadir)
def _create_image_directories(self, directory_paths):
"""
Create directories to write image files if
it does not exist.
:directory_paths is a list of directories belonging to glance store.
:raises: BadStoreConfiguration exception if creating a directory fails.
"""
for datadir in directory_paths:
if os.path.exists(datadir):
self._check_write_permission(datadir)
self._set_exec_permission(datadir)
else:
msg = _("Directory to write image files does not exist "
"(%s). Creating.") % datadir
LOG.info(msg)
try:
os.makedirs(datadir)
self._check_write_permission(datadir)
self._set_exec_permission(datadir)
except (IOError, OSError):
if os.path.exists(datadir):
# NOTE(markwash): If the path now exists, some other
# process must have beat us in the race condition.
# But it doesn't hurt, so we can safely ignore
# the error.
self._check_write_permission(datadir)
self._set_exec_permission(datadir)
continue
reason = _("Unable to create datadir: %s") % datadir
LOG.error(reason)
raise exceptions.BadStoreConfiguration(
store_name="filesystem", reason=reason)
def _validate_metadata(self, metadata_file):
"""Validate metadata against json schema.
If metadata is valid then cache metadata and use it when
creating new image.
:param metadata_file: JSON metadata file path
:raises: BadStoreConfiguration exception if metadata is not valid.
"""
try:
with open(metadata_file, 'rb') as fptr:
metadata = jsonutils.load(fptr)
if isinstance(metadata, dict):
# If metadata is of type dictionary
# i.e. - it contains only one mountpoint
# then convert it to list of dictionary.
metadata = [metadata]
# Validate metadata against json schema
jsonschema.validate(metadata, MULTI_FILESYSTEM_METADATA_SCHEMA)
glance_store.check_location_metadata(metadata)
self.FILESYSTEM_STORE_METADATA = metadata
except (jsonschema.exceptions.ValidationError,
exceptions.BackendException, ValueError) as vee:
err_msg = encodeutils.exception_to_unicode(vee)
reason = _('The JSON in the metadata file %(file)s is '
'not valid and it can not be used: '
'%(vee)s.') % dict(file=metadata_file,
vee=err_msg)
LOG.error(reason)
raise exceptions.BadStoreConfiguration(
store_name="filesystem", reason=reason)
except IOError as ioe:
err_msg = encodeutils.exception_to_unicode(ioe)
reason = _('The path for the metadata file %(file)s could '
'not be accessed: '
'%(ioe)s.') % dict(file=metadata_file,
ioe=err_msg)
LOG.error(reason)
raise exceptions.BadStoreConfiguration(
store_name="filesystem", reason=reason)
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exceptions.BadStoreConfiguration`
"""
if self.backend_group:
store_conf = getattr(self.conf, self.backend_group)
else:
store_conf = self.conf.glance_store
fdir = store_conf.filesystem_store_datadir
fdirs = store_conf.filesystem_store_datadirs
fstore_perm = store_conf.filesystem_store_file_perm
meta_file = store_conf.filesystem_store_metadata_file
self.thin_provisioning = store_conf.\
filesystem_thin_provisioning
self.chunk_size = store_conf.filesystem_store_chunk_size
self.READ_CHUNKSIZE = self.chunk_size
self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE
if not (fdir or fdirs):
reason = (_("Specify at least 'filesystem_store_datadir' or "
"'filesystem_store_datadirs' option"))
LOG.error(reason)
raise exceptions.BadStoreConfiguration(store_name="filesystem",
reason=reason)
if fdir and fdirs:
reason = (_("Specify either 'filesystem_store_datadir' or "
"'filesystem_store_datadirs' option"))
LOG.error(reason)
raise exceptions.BadStoreConfiguration(store_name="filesystem",
reason=reason)
if fstore_perm > 0:
perm = int(str(fstore_perm), 8)
if not perm & stat.S_IRUSR:
reason = _LE("Specified an invalid "
"'filesystem_store_file_perm' option which "
"could make image file to be unaccessible by "
"glance service.")
LOG.error(reason)
reason = _("Invalid 'filesystem_store_file_perm' option.")
raise exceptions.BadStoreConfiguration(store_name="filesystem",
reason=reason)
self.multiple_datadirs = False
directory_paths = set()
if fdir:
self.datadir = fdir
directory_paths.add(self.datadir)
else:
self.multiple_datadirs = True
self.priority_data_map = {}
for datadir in fdirs:
(datadir_path,
priority) = self._get_datadir_path_and_priority(datadir)
priority_paths = self.priority_data_map.setdefault(
priority, [])
self._check_directory_paths(datadir_path, directory_paths,
priority_paths)
directory_paths.add(datadir_path)
priority_paths.append(datadir_path)
self.priority_list = sorted(self.priority_data_map,
reverse=True)
self._create_image_directories(directory_paths)
if self.backend_group:
self._set_url_prefix()
if meta_file:
self._validate_metadata(meta_file)
def _set_url_prefix(self):
path = self._find_best_datadir(0)
self._url_prefix = "%s://%s" % ('file', path)
def _check_directory_paths(self, datadir_path, directory_paths,
priority_paths):
"""
Checks if directory_path is already present in directory_paths.
:datadir_path is directory path.
:datadir_paths is set of all directory paths.
:raises: BadStoreConfiguration exception if same directory path is
already present in directory_paths.
"""
if datadir_path in directory_paths:
msg = (_("Directory %(datadir_path)s specified "
"multiple times in filesystem_store_datadirs "
"option of filesystem configuration") %
{'datadir_path': datadir_path})
# If present with different priority it's a bad configuration
if datadir_path not in priority_paths:
LOG.exception(msg)
raise exceptions.BadStoreConfiguration(
store_name="filesystem", reason=msg)
# Present with same prio (exact duplicate) only deserves a warning
LOG.warning(msg)
def _get_datadir_path_and_priority(self, datadir):
"""
Gets directory paths and its priority from
filesystem_store_datadirs option in glance-api.conf.
:param datadir: is directory path with its priority.
:returns: datadir_path as directory path
priority as priority associated with datadir_path
:raises: BadStoreConfiguration exception if priority is invalid or
empty directory path is specified.
"""
priority = 0
parts = [part.strip() for part in datadir.rsplit(":", 1)]
datadir_path = parts[0]
if len(parts) == 2 and parts[1]:
try:
priority = int(parts[1])
except ValueError:
msg = (_("Invalid priority value %(priority)s in "
"filesystem configuration") % {'priority': priority})
LOG.exception(msg)
raise exceptions.BadStoreConfiguration(
store_name="filesystem", reason=msg)
if not datadir_path:
msg = _("Invalid directory specified in filesystem configuration")
LOG.exception(msg)
raise exceptions.BadStoreConfiguration(
store_name="filesystem", reason=msg)
return datadir_path, priority
@staticmethod
def _resolve_location(location):
filepath = location.store_location.path
if not os.path.exists(filepath):
raise exceptions.NotFound(image=filepath)
filesize = os.path.getsize(filepath)
return filepath, filesize
def _get_metadata(self, filepath):
"""Return metadata dictionary.
If metadata is provided as list of dictionaries then return
metadata as dictionary containing 'id' and 'mountpoint'.
If there are multiple nfs directories (mountpoints) configured
for glance, then we need to create metadata JSON file as list
of dictionaries containing all mountpoints with unique id.
But Nova will not be able to find in which directory (mountpoint)
image is present if we store list of dictionary(containing mountpoints)
in glance image metadata. So if there are multiple mountpoints then
we will return dict containing exact mountpoint where image is stored.
If image path does not start with any of the 'mountpoint' provided
in metadata JSON file then error is logged and empty
dictionary is returned.
:param filepath: Path of image on store
:returns: metadata dictionary
"""
if self.FILESYSTEM_STORE_METADATA:
for image_meta in self.FILESYSTEM_STORE_METADATA:
if filepath.startswith(image_meta['mountpoint']):
return image_meta
reason = (_LE("The image path %(path)s does not match with "
"any of the mountpoint defined in "
"metadata: %(metadata)s. An empty dictionary "
"will be returned to the client.")
% dict(path=filepath,
metadata=self.FILESYSTEM_STORE_METADATA))
LOG.error(reason)
return {}
@capabilities.check
def get(self, location, offset=0, chunk_size=None, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: `glance_store.exceptions.NotFound` if image does not exist
"""
filepath, filesize = self._resolve_location(location)
msg = _("Found image at %s. Returning in ChunkedFile.") % filepath
LOG.debug(msg)
return (ChunkedFile(filepath,
offset=offset,
chunk_size=self.READ_CHUNKSIZE,
partial_length=chunk_size),
chunk_size or filesize)
def get_size(self, location, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file and returns the image size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: `glance_store.exceptions.NotFound` if image does not exist
:rtype: int
"""
filepath, filesize = self._resolve_location(location)
msg = _("Found image at %s.") % filepath
LOG.debug(msg)
return filesize
@capabilities.check
def delete(self, location, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file to delete
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: NotFound if image does not exist
:raises: Forbidden if cannot delete because of permissions
"""
loc = location.store_location
fn = loc.path
if os.path.exists(fn):
try:
LOG.debug(_("Deleting image at %(fn)s"), {'fn': fn})
os.unlink(fn)
except OSError:
raise exceptions.Forbidden(
message=(_("You cannot delete file %s") % fn))
else:
raise exceptions.NotFound(image=fn)
def _get_capacity_info(self, mount_point):
"""Calculates total available space for given mount point.
:mount_point is path of glance data directory
"""
# Calculate total available space
stvfs_result = os.statvfs(mount_point)
total_available_space = stvfs_result.f_bavail * stvfs_result.f_bsize
return max(0, total_available_space)
def _find_best_datadir(self, image_size):
"""Finds the best datadir by priority and free space.
Traverse directories returning the first one that has sufficient
free space, in priority order. If two suitable directories have
the same priority, choose the one with the most free space
available.
:param image_size: size of image being uploaded.
:returns: best_datadir as directory path of the best priority datadir.
:raises: exceptions.StorageFull if there is no datadir in
self.priority_data_map that can accommodate the image.
"""
if not self.multiple_datadirs:
return self.datadir
best_datadir = None
max_free_space = 0
for priority in self.priority_list:
for datadir in self.priority_data_map.get(priority):
free_space = self._get_capacity_info(datadir)
if free_space >= image_size and free_space > max_free_space:
max_free_space = free_space
best_datadir = datadir
# If datadir is found which can accommodate image and has maximum
# free space for the given priority then break the loop,
# else continue to lookup further.
if best_datadir:
break
else:
msg = (_("There is no enough disk space left on the image "
"storage media. requested=%s") % image_size)
LOG.exception(msg)
raise exceptions.StorageFull(message=msg)
return best_datadir
@glance_store.driver.back_compat_add
@capabilities.check
def add(self, image_id, image_file, image_size, hashing_algo, context=None,
verifier=None):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:param hashing_algo: A hashlib algorithm identifier (string)
:param context: The request context
:param verifier: An object used to verify signatures for images
:returns: tuple of: (1) URL in backing store, (2) bytes written,
(3) checksum, (4) multihash value, and (5) a dictionary
with storage system specific information
:raises: `glance_store.exceptions.Duplicate` if the image already
exists
:note:: By default, the backend writes the image data to a file
`/<DATADIR>/<ID>`, where <DATADIR> is the value of
the filesystem_store_datadir configuration option and <ID>
is the supplied image ID.
"""
datadir = self._find_best_datadir(image_size)
filepath = os.path.join(datadir, str(image_id))
if os.path.exists(filepath):
raise exceptions.Duplicate(image=filepath)
os_hash_value = utils.get_hasher(hashing_algo, False)
checksum = utils.get_hasher('md5', False)
bytes_written = 0
try:
with open(filepath, 'wb') as f:
for buf in utils.chunkreadable(image_file,
self.WRITE_CHUNKSIZE):
bytes_written += len(buf)
os_hash_value.update(buf)
checksum.update(buf)
if verifier:
verifier.update(buf)
if self.thin_provisioning and not any(buf):
f.truncate(bytes_written)
f.seek(0, os.SEEK_END)
else:
f.write(buf)
except IOError as e:
if e.errno != errno.EACCES:
self._delete_partial(filepath, image_id)
errors = {errno.EFBIG: exceptions.StorageFull(),
errno.ENOSPC: exceptions.StorageFull(),
errno.EACCES: exceptions.StorageWriteDenied()}
raise errors.get(e.errno, e)
except Exception:
with excutils.save_and_reraise_exception():
self._delete_partial(filepath, image_id)
hash_hex = os_hash_value.hexdigest()
checksum_hex = checksum.hexdigest()
metadata = self._get_metadata(filepath)
LOG.debug(("Wrote %(bytes_written)d bytes to %(filepath)s with "
"checksum %(checksum_hex)s and multihash %(hash_hex)s"),
{'bytes_written': bytes_written,
'filepath': filepath,
'checksum_hex': checksum_hex,
'hash_hex': hash_hex})
if self.backend_group:
fstore_perm = getattr(
self.conf, self.backend_group).filesystem_store_file_perm
else:
fstore_perm = self.conf.glance_store.filesystem_store_file_perm
if fstore_perm > 0:
perm = int(str(fstore_perm), 8)
try:
os.chmod(filepath, perm)
except (IOError, OSError):
LOG.warning(_LW("Unable to set permission to image: %s") %
filepath)
# Add store backend information to location metadata
if self.backend_group:
metadata['store'] = u"%s" % self.backend_group
return ('file://%s' % filepath,
bytes_written,
checksum_hex,
hash_hex,
metadata)
@staticmethod
def _delete_partial(filepath, iid):
try:
os.unlink(filepath)
except Exception as e:
msg = _('Unable to remove partial image '
'data for image %(iid)s: %(e)s')
LOG.error(msg % dict(iid=iid,
e=encodeutils.exception_to_unicode(e)))
|
|
"""
Functions
---------
.. autosummary::
:toctree: generated/
fmin_l_bfgs_b
"""
## License for the Python wrapper
## ==============================
## Copyright (c) 2004 David M. Cooke <[email protected]>
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
## DEALINGS IN THE SOFTWARE.
## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, asarray, float64, int32, zeros
from . import _lbfgsb
from .optimize import (approx_fprime, MemoizeJac, OptimizeResult,
_check_unknown_options, wrap_function,
_approx_fprime_helper)
from scipy.sparse.linalg import LinearOperator
__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
approx_grad=0,
bounds=None, m=10, factr=1e7, pgtol=1e-5,
epsilon=1e-8,
iprint=-1, maxfun=15000, maxiter=15000, disp=None,
callback=None, maxls=20):
"""
Minimize a function func using the L-BFGS-B algorithm.
Parameters
----------
func : callable f(x,*args)
Function to minimise.
x0 : ndarray
Initial guess.
fprime : callable fprime(x,*args), optional
The gradient of `func`. If None, then `func` returns the function
value and the gradient (``f, g = func(x, *args)``), unless
`approx_grad` is True in which case `func` returns only ``f``.
args : sequence, optional
Arguments to pass to `func` and `fprime`.
approx_grad : bool, optional
Whether to approximate the gradient numerically (in which case
`func` returns only the function value).
bounds : list, optional
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None or +-inf for one of ``min`` or
``max`` when there is no bound in that direction.
m : int, optional
The maximum number of variable metric corrections
used to define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms in an
approximation to it.)
factr : float, optional
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where ``eps`` is the machine precision, which is automatically
generated by the code. Typical values for `factr` are: 1e12 for
low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
high accuracy.
pgtol : float, optional
The iteration will stop when
``max{|proj g_i | i = 1, ..., n} <= pgtol``
where ``pg_i`` is the i-th component of the projected gradient.
epsilon : float, optional
Step size used when `approx_grad` is True, for numerically
calculating the gradient
iprint : int, optional
Controls the frequency of output. ``iprint < 0`` means no output;
``iprint == 0`` means write messages to stdout; ``iprint > 1`` in
addition means write logging information to a file named
``iterate.dat`` in the current working directory.
disp : int, optional
If zero, then no output. If a positive number, then this over-rides
`iprint` (i.e., `iprint` gets the value of `disp`).
maxfun : int, optional
Maximum number of function evaluations.
maxiter : int, optional
Maximum number of iterations.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
maxls : int, optional
Maximum number of line search steps (per iteration). Default is 20.
Returns
-------
x : array_like
Estimated position of the minimum.
f : float
Value of `func` at the minimum.
d : dict
Information dictionary.
* d['warnflag'] is
- 0 if converged,
- 1 if too many function evaluations or too many iterations,
- 2 if stopped for another reason, given in d['task']
* d['grad'] is the gradient at the minimum (should be 0 ish)
* d['funcalls'] is the number of function calls made.
* d['nit'] is the number of iterations.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'L-BFGS-B' `method` in particular.
Notes
-----
License of L-BFGS-B (FORTRAN code):
The version included here (in fortran code) is 3.0
(released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
and Jorge Nocedal <[email protected]>. It carries the following
condition for use:
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below. This software is released
under the BSD License.
References
----------
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
Constrained Optimization, (1995), SIAM Journal on Scientific and
Statistical Computing, 16, 5, pp. 1190-1208.
* C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (1997),
ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
* J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (2011),
ACM Transactions on Mathematical Software, 38, 1.
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
# build options
if disp is None:
disp = iprint
opts = {'disp': disp,
'iprint': iprint,
'maxcor': m,
'ftol': factr * np.finfo(float).eps,
'gtol': pgtol,
'eps': epsilon,
'maxfun': maxfun,
'maxiter': maxiter,
'callback': callback,
'maxls': maxls}
res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
**opts)
d = {'grad': res['jac'],
'task': res['message'],
'funcalls': res['nfev'],
'nit': res['nit'],
'warnflag': res['status']}
f = res['fun']
x = res['x']
return x, f, d
def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
disp=None, maxcor=10, ftol=2.2204460492503131e-09,
gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
iprint=-1, callback=None, maxls=20, **unknown_options):
"""
Minimize a scalar function of one or more variables using the L-BFGS-B
algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxcor : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms
in an approximation to it.)
factr : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps``
is the machine precision, which is automatically generated by
the code. Typical values for `factr` are: 1e12 for low
accuracy; 1e7 for moderate accuracy; 10.0 for extremely high
accuracy.
ftol : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
gtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= gtol`` where ``pg_i`` is the i-th component of the
projected gradient.
eps : float
Step size used for numerical approximation of the jacobian.
disp : int
Set to True to print convergence messages.
maxfun : int
Maximum number of function evaluations.
maxiter : int
Maximum number of iterations.
maxls : int, optional
Maximum number of line search steps (per iteration). Default is 20.
"""
_check_unknown_options(unknown_options)
m = maxcor
epsilon = eps
pgtol = gtol
factr = ftol / np.finfo(float).eps
x0 = asarray(x0).ravel()
n, = x0.shape
if bounds is None:
bounds = [(None, None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
# unbounded variables must use None, not +-inf, for optimizer to work properly
bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds]
if disp is not None:
if disp == 0:
iprint = -1
else:
iprint = disp
n_function_evals, fun = wrap_function(fun, ())
if jac is None:
def func_and_grad(x):
f = fun(x, *args)
g = _approx_fprime_helper(x, fun, epsilon, args=args, f0=f)
return f, g
else:
def func_and_grad(x):
f = fun(x, *args)
g = jac(x, *args)
return f, g
nbd = zeros(n, int32)
low_bnd = zeros(n, float64)
upper_bnd = zeros(n, float64)
bounds_map = {(None, None): 0,
(1, None): 1,
(1, 1): 2,
(None, 1): 3}
for i in range(0, n):
l, u = bounds[i]
if l is not None:
low_bnd[i] = l
l = 1
if u is not None:
upper_bnd[i] = u
u = 1
nbd[i] = bounds_map[l, u]
if not maxls > 0:
raise ValueError('maxls must be positive.')
x = array(x0, float64)
f = array(0.0, float64)
g = zeros((n,), float64)
wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
iwa = zeros(3*n, int32)
task = zeros(1, 'S60')
csave = zeros(1, 'S60')
lsave = zeros(4, int32)
isave = zeros(44, int32)
dsave = zeros(29, float64)
task[:] = 'START'
n_iterations = 0
while 1:
# x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
_lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
pgtol, wa, iwa, task, iprint, csave, lsave,
isave, dsave, maxls)
task_str = task.tostring()
if task_str.startswith(b'FG'):
if n_function_evals[0] > maxfun:
task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
'EXCEEDS LIMIT')
else:
# minimization routine wants f and g at the current x
# Overwrite f and g:
f, g = func_and_grad(x)
elif task_str.startswith(b'NEW_X'):
# new iteration
if n_iterations > maxiter:
task[:] = 'STOP: TOTAL NO. of ITERATIONS EXCEEDS LIMIT'
else:
n_iterations += 1
if callback is not None:
callback(x)
else:
break
task_str = task.tostring().strip(b'\x00').strip()
if task_str.startswith(b'CONV'):
warnflag = 0
elif n_function_evals[0] > maxfun:
warnflag = 1
elif n_iterations > maxiter:
warnflag = 1
else:
warnflag = 2
# These two portions of the workspace are described in the mainlb
# subroutine in lbfgsb.f. See line 363.
s = wa[0: m*n].reshape(m, n)
y = wa[m*n: 2*m*n].reshape(m, n)
# See lbfgsb.f line 160 for this portion of the workspace.
# isave(31) = the total number of BFGS updates prior the current iteration;
n_bfgs_updates = isave[30]
n_corrs = min(n_bfgs_updates, maxcor)
hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
return OptimizeResult(fun=f, jac=g, nfev=n_function_evals[0],
nit=n_iterations, status=warnflag, message=task_str,
x=x, success=(warnflag == 0), hess_inv=hess_inv)
class LbfgsInvHessProduct(LinearOperator):
"""Linear operator for the L-BFGS approximate inverse Hessian.
This operator computes the product of a vector with the approximate inverse
of the Hessian of the objective function, using the L-BFGS limited
memory approximation to the inverse Hessian, accumulated during the
optimization.
Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
interface.
Parameters
----------
sk : array_like, shape=(n_corr, n)
Array of `n_corr` most recent updates to the solution vector.
(See [1]).
yk : array_like, shape=(n_corr, n)
Array of `n_corr` most recent updates to the gradient. (See [1]).
References
----------
.. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
storage." Mathematics of computation 35.151 (1980): 773-782.
"""
def __init__(self, sk, yk):
"""Construct the operator."""
if sk.shape != yk.shape or sk.ndim != 2:
raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
n_corrs, n = sk.shape
super(LbfgsInvHessProduct, self).__init__(
dtype=np.float64, shape=(n, n))
self.sk = sk
self.yk = yk
self.n_corrs = n_corrs
self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
def _matvec(self, x):
"""Efficient matrix-vector multiply with the BFGS matrices.
This calculation is described in Section (4) of [1].
Parameters
----------
x : ndarray
An array with shape (n,) or (n,1).
Returns
-------
y : ndarray
The matrix-vector product
"""
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
q = np.array(x, dtype=self.dtype, copy=True)
if q.ndim == 2 and q.shape[1] == 1:
q = q.reshape(-1)
alpha = np.zeros(n_corrs)
for i in range(n_corrs-1, -1, -1):
alpha[i] = rho[i] * np.dot(s[i], q)
q = q - alpha[i]*y[i]
r = q
for i in range(n_corrs):
beta = rho[i] * np.dot(y[i], r)
r = r + s[i] * (alpha[i] - beta)
return r
def todense(self):
"""Return a dense array representation of this operator.
Returns
-------
arr : ndarray, shape=(n, n)
An array with the same shape and containing
the same data represented by this `LinearOperator`.
"""
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
I = np.eye(*self.shape, dtype=self.dtype)
Hk = I
for i in range(n_corrs):
A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
s[i][np.newaxis, :])
return Hk
if __name__ == '__main__':
def func(x):
f = 0.25 * (x[0] - 1) ** 2
for i in range(1, x.shape[0]):
f += (x[i] - x[i-1] ** 2) ** 2
f *= 4
return f
def grad(x):
g = zeros(x.shape, float64)
t1 = x[1] - x[0] ** 2
g[0] = 2 * (x[0] - 1) - 16 * x[0] * t1
for i in range(1, g.shape[0] - 1):
t2 = t1
t1 = x[i + 1] - x[i] ** 2
g[i] = 8 * t2 - 16*x[i] * t1
g[-1] = 8 * t1
return g
def func_and_grad(x):
return func(x), grad(x)
class Problem(object):
def fun(self, x):
return func_and_grad(x)
factr = 1e7
pgtol = 1e-5
n = 25
m = 10
bounds = [(None, None)] * n
for i in range(0, n, 2):
bounds[i] = (1.0, 100)
for i in range(1, n, 2):
bounds[i] = (-100, 100)
x0 = zeros((n,), float64)
x0[:] = 3
x, f, d = fmin_l_bfgs_b(func, x0, fprime=grad, m=m,
factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
x, f, d = fmin_l_bfgs_b(func, x0, approx_grad=1,
m=m, factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
x, f, d = fmin_l_bfgs_b(func_and_grad, x0, approx_grad=0,
m=m, factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
p = Problem()
x, f, d = fmin_l_bfgs_b(p.fun, x0, approx_grad=0,
m=m, factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import uuid
from django.conf import settings
from django.contrib import auth
from django import test
from django.test.utils import override_settings
from django.urls import reverse
from keystoneauth1 import exceptions as keystone_exceptions
from keystoneauth1.identity import v3 as v3_auth
from keystoneauth1 import session
from keystoneclient.v3 import client as client_v3
from keystoneclient.v3 import projects
from openstack_auth.plugin import password
from openstack_auth.tests import data_v3
from openstack_auth import utils
DEFAULT_DOMAIN = settings.OPENSTACK_KEYSTONE_DEFAULT_DOMAIN
# NOTE(e0ne): it's copy-pasted from horizon.test.helpers module until we
# figure out how to avoid this.
class IsA(object):
"""Class to compare param is a specified class."""
def __init__(self, cls):
self.cls = cls
def __eq__(self, other):
return isinstance(other, self.cls)
class SwitchProviderTests(test.TestCase):
interface = None
def setUp(self):
super().setUp()
params = {
'OPENSTACK_API_VERSIONS': {'identity': 3},
'OPENSTACK_KEYSTONE_URL': "http://localhost/identity/v3",
}
if self.interface:
params['OPENSTACK_ENDPOINT_TYPE'] = self.interface
override = self.settings(**params)
override.enable()
self.addCleanup(override.disable)
self.data = data_v3.generate_test_data()
self.ks_client_module = client_v3
def get_form_data(self, user):
return {'region': "default",
'domain': DEFAULT_DOMAIN,
'password': user.password,
'username': user.name}
@mock.patch.object(v3_auth, 'Keystone2Keystone')
@mock.patch.object(client_v3, 'Client')
@mock.patch.object(v3_auth, 'Token')
@mock.patch.object(v3_auth, 'Password')
def test_switch_keystone_provider_remote_fail(
self, mock_password, mock_token, mock_client, mock_k2k,
):
target_provider = 'k2kserviceprovider'
self.data = data_v3.generate_test_data(service_providers=True)
self.sp_data = data_v3.generate_test_data(endpoint='http://sp2')
projects = [self.data.project_one, self.data.project_two]
user = self.data.user
form_data = self.get_form_data(user)
auth_password = mock.Mock(
auth_url=settings.OPENSTACK_KEYSTONE_URL)
auth_password.get_access.side_effect = [
self.data.unscoped_access_info
]
mock_password.return_value = auth_password
auth_token_domain = mock.Mock(
auth_url=settings.OPENSTACK_KEYSTONE_URL)
auth_token_project1 = mock.Mock(
auth_url=settings.OPENSTACK_KEYSTONE_URL)
auth_token_unscoped = mock.Mock(
auth_url=settings.OPENSTACK_KEYSTONE_URL)
auth_token_project2 = mock.Mock(
auth_url=settings.OPENSTACK_KEYSTONE_URL)
mock_token.side_effect = [auth_token_domain,
auth_token_project1,
auth_token_unscoped,
auth_token_project2]
auth_token_domain.get_access.return_value = \
self.data.domain_scoped_access_info
auth_token_project1.get_access.return_value = \
self.data.unscoped_access_info
auth_token_unscoped.get_access.return_value = \
self.data.unscoped_access_info
auth_token_project2.get_access.return_value = \
self.data.unscoped_access_info
auth_token_project2.get_sp_auth_url.return_value = \
'https://k2kserviceprovider/sp_url'
client_domain = mock.Mock()
client_project1 = mock.Mock()
client_unscoped = mock.Mock()
mock_client.side_effect = [client_domain,
client_project1,
client_unscoped]
client_domain.projects.list.return_value = projects
client_unscoped.projects.list.return_value = projects
# let the K2K plugin fail when logging in
auth_k2k = mock.Mock()
auth_k2k.get_access.side_effect = \
keystone_exceptions.AuthorizationFailure
mock_k2k.return_value = auth_k2k
# Log in
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
# Switch
url = reverse('switch_keystone_provider', args=[target_provider])
form_data['keystone_provider'] = target_provider
response = self.client.get(url, form_data, follow=True)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
# Assert that provider has not changed because of failure
self.assertEqual(self.client.session['keystone_provider_id'],
'localkeystone')
# These should never change
self.assertEqual(self.client.session['k2k_base_unscoped_token'],
self.data.unscoped_access_info.auth_token)
self.assertEqual(self.client.session['k2k_auth_url'],
settings.OPENSTACK_KEYSTONE_URL)
mock_password.assert_called_once_with(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
password=self.data.user.password,
username=self.data.user.name,
user_domain_name=DEFAULT_DOMAIN,
unscoped=True,
)
auth_password.get_access.assert_called_once_with(IsA(session.Session))
mock_client.assert_has_calls([
mock.call(
session=IsA(session.Session),
auth=auth_password,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_project1,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_unscoped,
),
])
self.assertEqual(3, mock_client.call_count)
client_domain.projects.list.assert_called_once_with(user=user.id)
client_unscoped.projects.list.assert_called_once_with(user=user.id)
mock_token.assert_has_calls([
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
domain_name=DEFAULT_DOMAIN,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=self.data.project_one.id,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=None,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=self.data.project_one.id,
reauthenticate=False,
),
])
self.assertEqual(4, mock_token.call_count)
auth_token_domain.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_project1.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_unscoped.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_project2.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_project2.get_sp_auth_url.assert_called_once_with(
IsA(session.Session), target_provider)
mock_k2k.assert_called_once_with(
base_plugin=auth_token_project2,
service_provider=target_provider,
)
auth_k2k.get_access.assert_called_once_with(IsA(session.Session))
@mock.patch.object(v3_auth, 'Keystone2Keystone')
@mock.patch.object(client_v3, 'Client')
@mock.patch.object(v3_auth, 'Token')
@mock.patch.object(v3_auth, 'Password')
def test_switch_keystone_provider_remote(
self, mock_password, mock_token, mock_client, mock_k2k,
):
keystone_url = settings.OPENSTACK_KEYSTONE_URL
target_provider = 'k2kserviceprovider'
self.data = data_v3.generate_test_data(service_providers=True)
self.sp_data = data_v3.generate_test_data(endpoint='http://sp2')
projects = [self.data.project_one, self.data.project_two]
sp_projects = [self.sp_data.project_one, self.sp_data.project_two]
domains = []
user = self.data.user
form_data = self.get_form_data(user)
auth_password = mock.Mock(auth_url=keystone_url)
mock_password.return_value = auth_password
auth_password.get_access.return_value = self.data.unscoped_access_info
auth_token_domain = mock.Mock()
auth_token_scoped_1 = mock.Mock()
auth_token_unscoped = mock.Mock(auth_url=keystone_url)
auth_token_scoped_2 = mock.Mock()
auth_token_sp_unscoped = mock.Mock(auth_url=keystone_url)
auth_token_sp_scoped = mock.Mock()
mock_token.side_effect = [
auth_token_domain,
auth_token_scoped_1,
auth_token_unscoped,
auth_token_scoped_2,
auth_token_sp_unscoped,
auth_token_sp_scoped,
]
auth_token_domain.get_access.return_value = \
self.data.domain_scoped_access_info
auth_token_scoped_1.get_access.return_value = \
self.data.unscoped_access_info
auth_token_unscoped.get_access.return_value = \
self.data.unscoped_access_info
auth_token_scoped_2.get_access.return_value = \
settings.OPENSTACK_KEYSTONE_URL
auth_token_scoped_2.get_sp_auth_url.return_value = \
'https://k2kserviceprovider/sp_url'
auth_token_sp_unscoped.get_access.return_value = \
self.sp_data.federated_unscoped_access_info
auth_token_sp_scoped.get_access.return_value = \
self.sp_data.federated_unscoped_access_info
client_domain = mock.Mock()
client_scoped = mock.Mock()
client_unscoped = mock.Mock()
client_sp_unscoped_1 = mock.Mock()
client_sp_unscoped_2 = mock.Mock()
client_sp_scoped = mock.Mock()
mock_client.side_effect = [
client_domain,
client_scoped,
client_unscoped,
client_sp_unscoped_1,
client_sp_unscoped_2,
client_sp_scoped,
]
client_domain.projects.list.return_value = projects
client_unscoped.projects.list.return_value = projects
client_sp_unscoped_1.auth.domains.return_value = domains
client_sp_unscoped_2.federation.projects.list.return_value = \
sp_projects
auth_k2k = mock.Mock(
auth_url='http://service_provider_endp/identity/v3')
mock_k2k.return_value = auth_k2k
auth_k2k.get_access.return_value = self.sp_data.unscoped_access_info
# Log in
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
# Switch
url = reverse('switch_keystone_provider', args=[target_provider])
form_data['keystone_provider'] = target_provider
response = self.client.get(url, form_data, follow=True)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
# Assert keystone provider has changed
self.assertEqual(self.client.session['keystone_provider_id'],
target_provider)
# These should not change
self.assertEqual(self.client.session['k2k_base_unscoped_token'],
self.data.unscoped_access_info.auth_token)
self.assertEqual(self.client.session['k2k_auth_url'],
settings.OPENSTACK_KEYSTONE_URL)
mock_password.assert_called_once_with(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
password=self.data.user.password,
username=self.data.user.name,
user_domain_name=DEFAULT_DOMAIN,
unscoped=True,
)
auth_password.get_access.assert_called_once_with(IsA(session.Session))
mock_client.assert_has_calls([
mock.call(
session=IsA(session.Session),
auth=auth_password,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_scoped_1,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_unscoped,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_sp_unscoped,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_sp_unscoped,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_sp_scoped,
),
])
self.assertEqual(6, mock_client.call_count)
client_domain.projects.list.assert_called_once_with(user=user.id)
client_unscoped.projects.list.assert_called_once_with(user=user.id)
client_sp_unscoped_1.auth.domains.assert_called_once_with()
client_sp_unscoped_2.federation.projects.list.assert_called_once_with()
client_scoped.assert_not_called()
client_sp_scoped.assert_not_called()
mock_token.assert_has_calls([
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
domain_name=DEFAULT_DOMAIN,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=self.data.project_one.id,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=None,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=self.data.project_one.id,
reauthenticate=False,
),
mock.call(
auth_url='http://service_provider_endp/identity/v3',
token=self.sp_data.federated_unscoped_access_info.auth_token,
project_id=None,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.sp_data.federated_unscoped_access_info.auth_token,
project_id=self.sp_data.project_one.id,
reauthenticate=False,
),
])
self.assertEqual(6, mock_token.call_count)
auth_token_domain.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_scoped_1.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_unscoped.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_scoped_2.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_scoped_2.get_sp_auth_url.assert_called_once_with(
IsA(session.Session), target_provider)
auth_token_sp_unscoped.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_sp_scoped.get_access.assert_called_once_with(
IsA(session.Session))
mock_k2k.assert_called_once_with(
base_plugin=auth_token_scoped_2,
service_provider=target_provider,
)
auth_k2k.get_access.assert_called_once_with(IsA(session.Session))
@mock.patch.object(client_v3, 'Client')
@mock.patch.object(v3_auth, 'Token')
@mock.patch.object(v3_auth, 'Password')
def test_switch_keystone_provider_local(
self, mock_password, mock_token, mock_client
):
self.data = data_v3.generate_test_data(service_providers=True)
keystone_url = settings.OPENSTACK_KEYSTONE_URL
keystone_provider = 'localkeystone'
projects = [self.data.project_one, self.data.project_two]
domains = []
user = self.data.user
form_data = self.get_form_data(user)
auth_password = mock.Mock(
auth_url=settings.OPENSTACK_KEYSTONE_URL)
mock_password.return_value = auth_password
auth_password.get_access.return_value = self.data.unscoped_access_info
auth_token_domain = mock.Mock(auth_url=keystone_url)
auth_token_scoped_1 = mock.Mock(auth_url=keystone_url)
auth_token_unscoped_1 = mock.Mock(auth_url=keystone_url)
auth_token_scoped_2 = mock.Mock(auth_url=keystone_url)
auth_token_unscoped_2 = mock.Mock(auth_url=keystone_url)
mock_token.side_effect = [
auth_token_domain,
auth_token_scoped_1,
auth_token_unscoped_1,
auth_token_unscoped_2,
auth_token_scoped_2,
]
auth_token_domain.get_access.return_value = \
self.data.domain_scoped_access_info
for _auth in [auth_token_scoped_1, auth_token_unscoped_1,
auth_token_unscoped_2, auth_token_scoped_2]:
_auth.get_access.return_value = self.data.unscoped_access_info
client_domain = mock.Mock()
client_scoped_1 = mock.Mock()
client_unscoped_1 = mock.Mock()
client_unscoped_2 = mock.Mock()
client_scoped_2 = mock.Mock()
mock_client.side_effect = [
client_domain,
client_scoped_1,
client_unscoped_1,
client_unscoped_2,
client_scoped_2,
]
client_domain.projects.list.return_value = projects
client_unscoped_1.auth.domains.return_value = domains
client_unscoped_2.projects.list.return_value = projects
# Log in
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
# Switch
url = reverse('switch_keystone_provider', args=[keystone_provider])
form_data['keystone_provider'] = keystone_provider
response = self.client.get(url, form_data, follow=True)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
# Assert nothing has changed since we are going from local to local
self.assertEqual(self.client.session['keystone_provider_id'],
keystone_provider)
self.assertEqual(self.client.session['k2k_base_unscoped_token'],
self.data.unscoped_access_info.auth_token)
self.assertEqual(self.client.session['k2k_auth_url'],
settings.OPENSTACK_KEYSTONE_URL)
mock_password.assert_called_once_with(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
password=self.data.user.password,
username=self.data.user.name,
user_domain_name=DEFAULT_DOMAIN,
unscoped=True,
)
auth_password.get_access.assert_called_once_with(IsA(session.Session))
mock_client.assert_has_calls([
mock.call(
session=IsA(session.Session),
auth=auth_password,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_scoped_1,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_unscoped_2,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_unscoped_2,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_scoped_2,
)
])
self.assertEqual(5, mock_client.call_count)
client_domain.projects.list.assert_called_once_with(user=user.id)
client_scoped_1.assert_not_called()
client_unscoped_1.auth.domains.assert_called_once_with()
client_unscoped_2.projects.list.assert_called_once_with(user=user.id)
client_scoped_2.assert_not_called()
mock_token.assert_has_calls([
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
domain_name=DEFAULT_DOMAIN,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=self.data.project_one.id,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=None,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=None,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=self.data.project_one.id,
reauthenticate=False,
),
])
self.assertEqual(5, mock_token.call_count)
auth_token_domain.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_scoped_1.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_unscoped_1.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_unscoped_2.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_scoped_2.get_access.assert_called_once_with(
IsA(session.Session))
@mock.patch.object(client_v3, 'Client')
@mock.patch.object(v3_auth, 'Token')
@mock.patch.object(v3_auth, 'Password')
def test_switch_keystone_provider_local_fail(
self, mock_password, mock_token, mock_client
):
self.data = data_v3.generate_test_data(service_providers=True)
keystone_provider = 'localkeystone'
user = self.data.user
form_data = self.get_form_data(user)
# mock authenticate
auth_password = mock.Mock(
auth_url=settings.OPENSTACK_KEYSTONE_URL)
mock_password.return_value = auth_password
auth_password.get_access.return_value = self.data.unscoped_access_info
auth_token_domain = mock.Mock()
auth_token_project = mock.Mock()
auth_token_unscoped = mock.Mock()
mock_token.side_effect = [
auth_token_domain,
auth_token_project,
auth_token_unscoped,
]
auth_token_domain.get_access.return_value = \
self.data.domain_scoped_access_info
auth_token_project.get_access.return_value = \
self.data.unscoped_access_info
auth_token_unscoped.get_access.side_effect = \
keystone_exceptions.AuthorizationFailure
client_domain = mock.Mock()
client_project = mock.Mock()
mock_client.side_effect = [
client_domain,
client_project,
]
client_domain.projects.list.return_value = [
self.data.project_one, self.data.project_two
]
# Log in
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
# Switch
url = reverse('switch_keystone_provider', args=[keystone_provider])
form_data['keystone_provider'] = keystone_provider
response = self.client.get(url, form_data, follow=True)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
# Assert
self.assertEqual(self.client.session['keystone_provider_id'],
keystone_provider)
self.assertEqual(self.client.session['k2k_base_unscoped_token'],
self.data.unscoped_access_info.auth_token)
self.assertEqual(self.client.session['k2k_auth_url'],
settings.OPENSTACK_KEYSTONE_URL)
mock_password.assert_called_once_with(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
password=self.data.user.password,
username=self.data.user.name,
user_domain_name=DEFAULT_DOMAIN,
unscoped=True,
)
auth_password.get_access.assert_called_once_with(IsA(session.Session))
mock_client.assert_has_calls([
mock.call(
auth=auth_password,
session=IsA(session.Session),
),
mock.call(
auth=auth_token_project,
session=IsA(session.Session),
),
])
self.assertEqual(2, mock_client.call_count)
client_domain.projects.list.assert_called_once_with(user=user.id)
client_project.assert_not_called()
mock_token.assert_has_calls([
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
domain_name=DEFAULT_DOMAIN,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=self.data.project_one.id,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=None,
reauthenticate=False,
),
])
self.assertEqual(3, mock_token.call_count)
auth_token_domain.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_project.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_unscoped.get_access.assert_called_once_with(
IsA(session.Session))
class SwitchProviderTestsPublicURL(SwitchProviderTests):
interface = 'publicURL'
class SwitchProviderTestsInternalURL(SwitchProviderTests):
interface = 'internalURL'
class SwitchProviderTestsAdminURL(SwitchProviderTests):
interface = 'adminURL'
class OpenStackAuthTestsWebSSO(test.TestCase):
def setUp(self):
super().setUp()
self.data = data_v3.generate_test_data()
self.ks_client_module = client_v3
self.idp_id = uuid.uuid4().hex
self.idp_oidc_id = uuid.uuid4().hex
self.idp_saml2_id = uuid.uuid4().hex
settings.OPENSTACK_API_VERSIONS['identity'] = 3
settings.OPENSTACK_KEYSTONE_URL = 'http://localhost/identity/v3'
settings.WEBSSO_ENABLED = True
settings.WEBSSO_CHOICES = (
('credentials', 'Keystone Credentials'),
('oidc', 'OpenID Connect'),
('saml2', 'Security Assertion Markup Language'),
(self.idp_oidc_id, 'IDP OIDC'),
(self.idp_saml2_id, 'IDP SAML2')
)
settings.WEBSSO_IDP_MAPPING = {
self.idp_oidc_id: (self.idp_id, 'oidc'),
self.idp_saml2_id: (self.idp_id, 'saml2')
}
def test_login_form(self):
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'credentials')
self.assertContains(response, 'oidc')
self.assertContains(response, 'saml2')
self.assertContains(response, self.idp_oidc_id)
self.assertContains(response, self.idp_saml2_id)
def test_websso_redirect_by_protocol(self):
origin = 'http://testserver/auth/websso/'
protocol = 'oidc'
redirect_url = ('%s/auth/OS-FEDERATION/websso/%s?origin=%s' %
(settings.OPENSTACK_KEYSTONE_URL, protocol, origin))
form_data = {'auth_type': protocol,
'region': 'default'}
url = reverse('login')
# POST to the page and redirect to keystone.
response = self.client.post(url, form_data)
self.assertRedirects(response, redirect_url, status_code=302,
target_status_code=404)
def test_websso_redirect_by_idp(self):
origin = 'http://testserver/auth/websso/'
protocol = 'oidc'
redirect_url = ('%s/auth/OS-FEDERATION/identity_providers/%s'
'/protocols/%s/websso?origin=%s' %
(settings.OPENSTACK_KEYSTONE_URL, self.idp_id,
protocol, origin))
form_data = {'auth_type': self.idp_oidc_id,
'region': 'default'}
url = reverse('login')
# POST to the page and redirect to keystone.
response = self.client.post(url, form_data)
self.assertRedirects(response, redirect_url, status_code=302,
target_status_code=404)
@override_settings(WEBSSO_KEYSTONE_URL='http://keystone-public/identity/v3')
def test_websso_redirect_using_websso_keystone_url(self):
origin = 'http://testserver/auth/websso/'
protocol = 'oidc'
redirect_url = ('%s/auth/OS-FEDERATION/identity_providers/%s'
'/protocols/%s/websso?origin=%s' %
(settings.WEBSSO_KEYSTONE_URL, self.idp_id,
protocol, origin))
form_data = {'auth_type': self.idp_oidc_id,
'region': 'default'}
url = reverse('login')
# POST to the page and redirect to keystone.
response = self.client.post(url, form_data)
# verify that the request was sent back to WEBSSO_KEYSTONE_URL
self.assertRedirects(response, redirect_url, status_code=302,
target_status_code=404)
@mock.patch.object(client_v3, 'Client')
@mock.patch.object(v3_auth, 'Token')
def test_websso_login(self, mock_token, mock_client):
keystone_url = settings.OPENSTACK_KEYSTONE_URL
form_data = {
'token': self.data.federated_unscoped_access_info.auth_token,
}
auth_token_unscoped = mock.Mock(auth_url=keystone_url)
auth_token_scoped = mock.Mock(auth_url=keystone_url)
mock_token.side_effect = [
auth_token_unscoped,
auth_token_scoped,
]
auth_token_unscoped.get_access.return_value = \
self.data.federated_unscoped_access_info
auth_token_scoped.get_access.return_value = \
self.data.unscoped_access_info
client_unscoped_1 = mock.Mock()
client_unscoped_2 = mock.Mock()
client_scoped = mock.Mock()
mock_client.side_effect = [
client_unscoped_1,
client_unscoped_2,
client_scoped,
]
client_unscoped_1.auth.domains.return_value = []
client_unscoped_2.federation.projects.list.return_value = [
self.data.project_one, self.data.project_two
]
url = reverse('websso')
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
mock_token.assert_has_calls([
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.federated_unscoped_access_info.auth_token,
project_id=None,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token,
project_id=self.data.project_one.id,
reauthenticate=False,
),
])
self.assertEqual(2, mock_token.call_count)
auth_token_unscoped.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_scoped.get_access.assert_called_once_with(
IsA(session.Session))
mock_client.assert_has_calls([
mock.call(
auth=auth_token_unscoped,
session=IsA(session.Session),
),
mock.call(
auth=auth_token_unscoped,
session=IsA(session.Session),
),
mock.call(
auth=auth_token_scoped,
session=IsA(session.Session),
),
])
self.assertEqual(3, mock_client.call_count)
client_unscoped_1.auth.domains.assert_called_once_with()
client_unscoped_2.federation.projects.list.assert_called_once_with()
client_scoped.assert_not_called()
@mock.patch.object(client_v3, 'Client')
@mock.patch.object(v3_auth, 'Token')
@override_settings(
OPENSTACK_KEYSTONE_URL='http://auth.openstack.org/identity/v3')
def test_websso_login_with_auth_in_url(self, mock_token, mock_client):
keystone_url = settings.OPENSTACK_KEYSTONE_URL
form_data = {
'token': self.data.federated_unscoped_access_info.auth_token,
}
auth_token_unscoped = mock.Mock(auth_url=keystone_url)
auth_token_scoped = mock.Mock(auth_url=keystone_url)
mock_token.side_effect = [
auth_token_unscoped,
auth_token_scoped,
]
auth_token_unscoped.get_access.return_value = \
self.data.federated_unscoped_access_info
auth_token_scoped.get_access.return_value = \
self.data.unscoped_access_info
client_unscoped_1 = mock.Mock()
client_unscoped_2 = mock.Mock()
client_scoped = mock.Mock()
mock_client.side_effect = [
client_unscoped_1,
client_unscoped_2,
client_scoped,
]
client_unscoped_1.auth.domains.return_value = []
client_unscoped_2.federation.projects.list.return_value = [
self.data.project_one, self.data.project_two
]
url = reverse('websso')
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
# validate token flow
mock_token.assert_has_calls([
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.federated_unscoped_access_info.auth_token,
project_id=None,
reauthenticate=False,
),
mock.call(
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.federated_unscoped_access_info.auth_token,
project_id=self.data.project_one.id,
reauthenticate=False,
),
])
self.assertEqual(2, mock_token.call_count)
auth_token_unscoped.get_access.assert_called_once_with(
IsA(session.Session))
auth_token_scoped.get_access.assert_called_once_with(
IsA(session.Session))
mock_client.assert_has_calls([
mock.call(
session=IsA(session.Session),
auth=auth_token_unscoped,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_unscoped,
),
mock.call(
session=IsA(session.Session),
auth=auth_token_scoped,
),
])
self.assertEqual(3, mock_client.call_count)
client_unscoped_1.auth.domains.assert_called_once_with()
client_unscoped_2.federation.projects.list.assert_called_once_with()
client_scoped.assert_not_called()
@override_settings(WEBSSO_DEFAULT_REDIRECT=True)
@override_settings(WEBSSO_DEFAULT_REDIRECT_PROTOCOL='oidc')
@override_settings(
WEBSSO_DEFAULT_REDIRECT_REGION=settings.OPENSTACK_KEYSTONE_URL)
def test_websso_login_default_redirect(self):
origin = 'http://testserver/auth/websso/'
protocol = 'oidc'
redirect_url = ('%s/auth/OS-FEDERATION/websso/%s?origin=%s' %
(settings.OPENSTACK_KEYSTONE_URL, protocol, origin))
url = reverse('login')
# POST to the page and redirect to keystone.
response = self.client.get(url)
self.assertRedirects(response, redirect_url, status_code=302,
target_status_code=404)
@override_settings(WEBSSO_DEFAULT_REDIRECT=True)
@override_settings(WEBSSO_DEFAULT_REDIRECT_LOGOUT='http://idptest/logout')
def test_websso_logout_default_redirect(self):
settings.WEBSSO_DEFAULT_REDIRECT = True
settings.WEBSSO_DEFAULT_REDIRECT_LOGOUT = 'http://idptest/logout'
url = reverse('logout')
# POST to the page and redirect to logout method from idp.
response = self.client.get(url)
self.assertRedirects(response, settings.WEBSSO_DEFAULT_REDIRECT_LOGOUT,
status_code=302, target_status_code=301)
class OpenStackAuthTests(test.TestCase):
interface = None
def setUp(self):
super().setUp()
params = {
'OPENSTACK_API_VERSIONS': {'identity': 3},
'OPENSTACK_KEYSTONE_URL': "http://localhost/identity/v3",
}
if self.interface:
params['OPENSTACK_ENDPOINT_TYPE'] = self.interface
override = self.settings(**params)
override.enable()
self.addCleanup(override.disable)
self.data = data_v3.generate_test_data()
def get_form_data(self, user):
return {'region': "default",
'domain': DEFAULT_DOMAIN,
'password': user.password,
'username': user.name}
@mock.patch('keystoneauth1.identity.v3.Token.get_access')
@mock.patch('keystoneauth1.identity.v3.Password.get_access')
@mock.patch('keystoneclient.v3.client.Client')
def test_login(self, mock_client, mock_get_access, mock_get_access_token):
projects = [self.data.project_one, self.data.project_two]
user = self.data.user
form_data = self.get_form_data(user)
url = reverse('login')
mock_get_access.return_value = self.data.unscoped_access_info
mock_client.return_value.projects.list.return_value = projects
# TODO(stephenfin): What is the return type of this method?
mock_get_access_token.return_value = self.data.unscoped_access_info
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
@mock.patch('keystoneauth1.identity.v3.Password.get_access')
def test_invalid_credentials(self, mock_get_access):
user = self.data.user
form_data = self.get_form_data(user)
form_data['password'] = "invalid"
url = reverse('login')
mock_get_access.side_effect = keystone_exceptions.Unauthorized(401)
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response, "Invalid credentials.")
mock_get_access.assert_called_once_with(IsA(session.Session))
@mock.patch('keystoneauth1.identity.v3.Password.get_access')
def test_exception(self, mock_get_access):
user = self.data.user
form_data = self.get_form_data(user)
url = reverse('login')
mock_get_access.side_effect = \
keystone_exceptions.ClientException('error 500')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response,
("An error occurred authenticating. Please try "
"again later."))
mock_get_access.assert_called_once_with(IsA(session.Session))
@mock.patch('keystoneauth1.identity.v3.Password.get_access')
def test_password_expired(self, mock_get_access):
user = self.data.user
form_data = self.get_form_data(user)
url = reverse('login')
class ExpiredException(keystone_exceptions.Unauthorized):
http_status = 401
message = ("The password is expired and needs to be changed"
" for user: %s." % user.id)
mock_get_access.side_effect = ExpiredException()
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
# This fails with TemplateDoesNotExist for some reason.
# self.assertRedirects(response, reverse('password', args=[user.id]))
# so instead we check for the redirect manually:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/password/%s/" % user.id)
mock_get_access.assert_called_once_with(IsA(session.Session))
def test_login_form_multidomain(self):
override = self.settings(OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT=True)
override.enable()
self.addCleanup(override.disable)
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'id="id_domain"')
self.assertContains(response, 'name="domain"')
@override_settings(
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT=True,
OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN=True,
OPENSTACK_KEYSTONE_DOMAIN_CHOICES=(('Default', 'Default'),)
)
def test_login_form_multidomain_dropdown(self):
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'id="id_domain"')
self.assertContains(response, 'name="domain"')
self.assertContains(response, 'option value="Default"')
@mock.patch.object(projects.ProjectManager, 'list')
def test_tenant_sorting(self, mock_project_list):
projects = [self.data.project_two, self.data.project_one]
expected_projects = [self.data.project_one, self.data.project_two]
user = self.data.user
mock_project_list.return_value = projects
project_list = utils.get_project_list(
user_id=user.id,
auth_url=settings.OPENSTACK_KEYSTONE_URL,
token=self.data.unscoped_access_info.auth_token)
self.assertEqual(project_list, expected_projects)
mock_project_list.assert_called_once()
@mock.patch.object(v3_auth.Token, 'get_access')
@mock.patch.object(password.PasswordPlugin, 'list_projects')
@mock.patch.object(v3_auth.Password, 'get_access')
def test_login_with_disabled_project(self, mock_get_access,
mock_project_list,
mock_get_access_token):
# Test to validate that authentication will not try to get
# scoped token for disabled project.
projects = [self.data.project_two, self.data.project_one]
user = self.data.user
mock_get_access.return_value = self.data.unscoped_access_info
mock_project_list.return_value = projects
mock_get_access_token.return_value = self.data.unscoped_access_info
form_data = self.get_form_data(user)
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
mock_get_access.assert_called_once_with(IsA(session.Session))
mock_get_access_token.assert_called_with(IsA(session.Session))
mock_project_list.assert_called_once_with(
IsA(session.Session),
IsA(v3_auth.Password),
self.data.unscoped_access_info)
@mock.patch.object(v3_auth.Token, 'get_access')
@mock.patch.object(password.PasswordPlugin, 'list_projects')
@mock.patch.object(v3_auth.Password, 'get_access')
def test_no_enabled_projects(self, mock_get_access, mock_project_list,
mock_get_access_token):
projects = [self.data.project_two]
user = self.data.user
mock_get_access.return_value = self.data.unscoped_access_info
mock_project_list.return_value = projects
mock_get_access_token.return_value = self.data.unscoped_access_info
form_data = self.get_form_data(user)
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
mock_get_access.assert_called_once_with(IsA(session.Session))
mock_get_access_token.assert_called_with(IsA(session.Session))
mock_project_list.assert_called_once_with(
IsA(session.Session),
IsA(v3_auth.Password),
self.data.unscoped_access_info)
@mock.patch.object(v3_auth.Token, 'get_access')
@mock.patch.object(password.PasswordPlugin, 'list_projects')
@mock.patch.object(v3_auth.Password, 'get_access')
def test_no_projects(self, mock_get_access, mock_project_list,
mock_get_access_token):
user = self.data.user
form_data = self.get_form_data(user)
mock_get_access.return_value = self.data.unscoped_access_info
mock_get_access_token.return_value = self.data.unscoped_access_info
mock_project_list.return_value = []
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
mock_get_access.assert_called_once_with(IsA(session.Session))
mock_get_access_token.assert_called_with(IsA(session.Session))
mock_project_list.assert_called_once_with(
IsA(session.Session),
IsA(v3_auth.Password),
self.data.unscoped_access_info)
@mock.patch.object(v3_auth.Token, 'get_access')
@mock.patch.object(projects.ProjectManager, 'list')
@mock.patch.object(v3_auth.Password, 'get_access')
def test_fail_projects(self, mock_get_access, mock_project_list,
mock_get_access_token):
user = self.data.user
form_data = self.get_form_data(user)
mock_get_access.return_value = self.data.unscoped_access_info
mock_get_access_token.return_value = self.data.unscoped_access_info
mock_project_list.side_effect = keystone_exceptions.AuthorizationFailure
url = reverse('login')
# GET the page to set the test cookie.
response = self.client.get(url, form_data)
self.assertEqual(response.status_code, 200)
# POST to the page to log in.
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'auth/login.html')
self.assertContains(response,
'Unable to retrieve authorized projects.')
mock_get_access.assert_called_once_with(IsA(session.Session))
mock_get_access_token.assert_called_with(IsA(session.Session))
mock_project_list.assert_called_once_with(user=user.id)
@mock.patch.object(v3_auth.Token, 'get_access')
@mock.patch.object(password.PasswordPlugin, 'list_projects')
@mock.patch.object(v3_auth.Password, 'get_access')
def test_switch(self, mock_get_access, mock_project_list,
mock_get_access_token,
next=None):
project = self.data.project_two
projects = [self.data.project_one, self.data.project_two]
user = self.data.user
scoped = self.data.scoped_access_info
form_data = self.get_form_data(user)
mock_get_access.return_value = self.data.unscoped_access_info
mock_get_access_token.return_value = scoped
mock_project_list.return_value = projects
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
url = reverse('switch_tenants', args=[project.id])
scoped._project['id'] = self.data.project_two.id
if next:
form_data.update({auth.REDIRECT_FIELD_NAME: next})
response = self.client.get(url, form_data)
if next:
expected_url = next
self.assertEqual(response['location'], expected_url)
else:
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
self.assertEqual(self.client.session['token'].project['id'],
scoped.project_id)
mock_get_access.assert_called_once_with(IsA(session.Session))
mock_get_access_token.assert_called_with(IsA(session.Session))
mock_project_list.assert_called_once_with(
IsA(session.Session),
IsA(v3_auth.Password),
self.data.unscoped_access_info)
def test_switch_with_next(self):
self.test_switch(next='/next_url')
@mock.patch.object(v3_auth.Token, 'get_access')
@mock.patch.object(password.PasswordPlugin, 'list_projects')
@mock.patch.object(v3_auth.Password, 'get_access')
def test_switch_region(self, mock_get_access, mock_project_list,
mock_get_access_token,
next=None):
projects = [self.data.project_one, self.data.project_two]
user = self.data.user
scoped = self.data.unscoped_access_info
sc = self.data.service_catalog
form_data = self.get_form_data(user)
mock_get_access.return_value = self.data.unscoped_access_info
mock_get_access_token.return_value = scoped
mock_project_list.return_value = projects
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
old_region = sc.get_endpoints()['compute'][0]['region']
self.assertEqual(self.client.session['services_region'], old_region)
region = sc.get_endpoints()['compute'][1]['region']
url = reverse('switch_services_region', args=[region])
form_data['region_name'] = region
if next:
form_data.update({auth.REDIRECT_FIELD_NAME: next})
response = self.client.get(url, form_data)
if next:
expected_url = next
self.assertEqual(response['location'], expected_url)
else:
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
self.assertEqual(self.client.session['services_region'], region)
mock_get_access.assert_called_once_with(IsA(session.Session))
mock_get_access_token.assert_called_with(IsA(session.Session))
mock_project_list.assert_called_once_with(
IsA(session.Session),
IsA(v3_auth.Password),
self.data.unscoped_access_info)
def test_switch_region_with_next(self, next=None):
self.test_switch_region(next='/next_url')
@mock.patch.object(v3_auth.Token, 'get_access')
@mock.patch.object(password.PasswordPlugin, 'list_projects')
@mock.patch.object(v3_auth.Password, 'get_access')
def test_switch_system_scope(self, mock_get_access, mock_project_list,
mock_get_access_token,
next=None):
projects = []
user = self.data.user
scoped = self.data.unscoped_access_info
form_data = self.get_form_data(user)
mock_get_access.return_value = self.data.unscoped_access_info
mock_get_access_token.return_value = scoped
mock_project_list.return_value = projects
url = reverse('login')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, form_data)
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
self.assertFalse(self.client.session['token'].system_scoped)
url = reverse('switch_system_scope')
if next:
form_data.update({auth.REDIRECT_FIELD_NAME: next})
response = self.client.get(url, form_data)
if next:
expected_url = next
self.assertEqual(response['location'], expected_url)
else:
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)
self.assertNotEqual(False, self.client.session['token'].system_scoped)
mock_get_access.assert_called_once_with(IsA(session.Session))
mock_get_access_token.assert_called_with(IsA(session.Session))
mock_project_list.assert_called_once_with(
IsA(session.Session),
IsA(v3_auth.Password),
self.data.unscoped_access_info)
class OpenStackAuthTestsPublicURL(OpenStackAuthTests):
interface = 'publicURL'
class OpenStackAuthTestsInternalURL(OpenStackAuthTests):
interface = 'internalURL'
class OpenStackAuthTestsAdminURL(OpenStackAuthTests):
interface = 'adminURL'
|
|
import math
import random
import pyglet
from pyglet import gl
WIDTH = 800
HEIGHT = 600
ROTATION_SPEED = 200
ACCELERATION = 300
SPACESHIP_RADIUS = 40
START_LIVES = 3
SHOOT_DELAY = 0.3
LASER_SPEED = 500
LASER_RADIUS = 5
COLLISION_SPEED_FACTOR = 0.2
ASTEROID_SPEED = 100
ASTEROID_ROTATION_SPEED = 3
ASTEROID_RADIUSES = {
1: 8,
2: 15,
3: 20,
4: 42,
}
def load_image(filename):
image = pyglet.image.load(filename)
image.anchor_x = image.width // 2
image.anchor_y = image.height // 2
return image
# Images from www.kenney.nl, thank you!
# Public Domain (Creative Commons CC-0)
spaceship_img = load_image('assets/PNG/playerShip2_red.png')
laser_img = load_image('assets/PNG/Lasers/laserBlue06.png')
space_img = load_image('assets/Backgrounds/blue.png')
life_img = load_image('assets/PNG/UI/playerLife2_red.png')
asteroid_images = {
1: [load_image('assets/PNG/Meteors/meteorGrey_tiny1.png'),
load_image('assets/PNG/Meteors/meteorGrey_tiny2.png'),
],
2: [load_image('assets/PNG/Meteors/meteorGrey_small1.png'),
load_image('assets/PNG/Meteors/meteorGrey_small2.png'),
],
3: [load_image('assets/PNG/Meteors/meteorGrey_med1.png'),
load_image('assets/PNG/Meteors/meteorGrey_med2.png'),
],
4: [load_image('assets/PNG/Meteors/meteorGrey_big1.png'),
load_image('assets/PNG/Meteors/meteorGrey_big3.png'),
load_image('assets/PNG/Meteors/meteorGrey_big4.png'),
],
}
life_sprite = pyglet.sprite.Sprite(life_img)
exhaust_images = []
for i in range(25):
name = 'assets2/PNG/White puff/whitePuff{:02}.png'.format(i)
exhaust_images.append(load_image(name))
def circle(x, y, radius):
iterations = 20
s = math.sin(2*math.pi / iterations)
c = math.cos(2*math.pi / iterations)
dx, dy = radius, 0
gl.glBegin(gl.GL_LINE_STRIP)
for i in range(iterations+1):
gl.glVertex2f(x+dx, y+dy)
dx, dy = (dx*c - dy*s), (dy*c + dx*s)
gl.glEnd()
pressed_keys = set()
objects = []
main_batch = pyglet.graphics.Batch()
exhaust_batch = pyglet.graphics.Batch()
pyglet.font.add_file('assets/Bonus/kenvector_future_thin.ttf')
level_label = pyglet.text.Label('Loading...', x=10, y=10,
font_name='Kenvector Future Thin',
)
class SpaceObject:
kind = None
def __init__(self, window, image, x, y, radius, rotation=0,
x_speed=0, y_speed=0, batch=main_batch):
self.x = x
self.y = y
self.x_speed = x_speed
self.y_speed = y_speed
self.rotation = rotation
self.radius = radius
self.sprite = pyglet.sprite.Sprite(image, batch=batch)
self.window = window
def tick(self, dt):
self.x += dt * self.x_speed
self.y += dt * self.y_speed
if self.x < 0:
self.x += self.window.width
if self.y < 0:
self.y += self.window.height
if self.x > self.window.width:
self.x -= self.window.width
if self.y > self.window.height:
self.y -= self.window.height
self.sprite.rotation = 90 - self.rotation
self.sprite.x = self.x
self.sprite.y = self.y
def hit_by_spaceship(self, spaceship):
return
def hit_by_laser(self, laser):
return
def delete(self):
if self in objects:
objects.remove(self)
self.sprite.delete()
class Spaceship(SpaceObject):
def __init__(self, window, rotation=0):
super().__init__(window=window,
image=spaceship_img,
x=window.width / 2,
y=window.height / 2,
rotation=rotation,
radius=SPACESHIP_RADIUS,
)
self.shoot_timer = 0
self.lives = START_LIVES - 1
self.invincibility_timer = 0
self.size = 5
self.level = 0
def tick(self, dt):
if pyglet.window.key.LEFT in pressed_keys:
self.rotation += dt * ROTATION_SPEED
objects.append(make_exhaust(self, -1))
if pyglet.window.key.RIGHT in pressed_keys:
self.rotation -= dt * ROTATION_SPEED
objects.append(make_exhaust(self, 1))
if pyglet.window.key.UP in pressed_keys:
rotation_radians = math.radians(self.rotation)
self.x_speed += dt * ACCELERATION * math.cos(rotation_radians)
self.y_speed += dt * ACCELERATION * math.sin(rotation_radians)
objects.append(make_exhaust(self))
if pyglet.window.key.SPACE in pressed_keys:
if self.shoot_timer < 0:
self.shoot()
self.shoot_timer = SHOOT_DELAY
super().tick(dt)
self.shoot_timer -= dt
self.invincibility_timer -= dt
if self.invincibility_timer < 0:
self.sprite.opacity = 255
for obj in list(objects):
if overlaps(self, obj):
obj.hit_by_spaceship(self)
else:
self.sprite.opacity = 255 * (
abs(math.sin(self.invincibility_timer * 5)) *
max(0, min(2.5 - self.invincibility_timer, 1))
)
def shoot(self):
laser = Laser(self.window, rotation=self.rotation,
x=self.x, y=self.y,
ship_x_speed=self.x_speed, ship_y_speed=self.y_speed,
)
objects.append(laser)
def destroy(self, asteroid):
for i in range(50):
t = random.uniform(0, math.pi*2)
rx = random.uniform(0, 100) * math.cos(t)
ry = random.uniform(0, 100) * math.sin(t)
debris = Debris(self.window, x=self.x, y=self.y,
x_speed=self.x_speed + rx * 3,
y_speed=self.y_speed + ry * 3,
time_to_live=4,
)
objects.append(debris)
explosion = Exhaust(self.window, x=self.x, y=self.y,
x_speed=self.x_speed + rx * 2,
y_speed=self.y_speed + ry * 2,
die_speed=1,
)
objects.append(explosion)
self.lives -= 1
if self.lives < 0:
self.delete()
pyglet.text.Label('Game over', anchor_x='center',
x=window.width/2, y=window.height/2,
font_name='Kenvector Future Thin',
font_size=50,
batch=main_batch,
)
else:
self.invincibility_timer = 3
self.x = self.window.width / 2
self.y = self.window.height / 2
self.x_speed = 0
self.y_speed = 0
pressed_keys.clear()
class Asteroid(SpaceObject):
kind = 'asteroid'
def __init__(self, window, size):
# Asteroids start at edges of the screen, so they don't
# initially collide with the ship
edge = random.choice(['horizontal', 'vertical'])
if edge == 'vertical':
x = 0
y = random.randrange(window.height)
else:
x = random.randrange(window.width)
y = 0
super().__init__(window=window,
image=random.choice(asteroid_images[size]),
x=x, y=y,
x_speed=random.uniform(-ASTEROID_SPEED, ASTEROID_SPEED),
y_speed=random.uniform(-ASTEROID_SPEED, ASTEROID_SPEED),
rotation=random.uniform(0, 360),
radius=ASTEROID_RADIUSES[size],
)
self.size = size
self.rotation_speed = random.uniform(-ASTEROID_ROTATION_SPEED,
ASTEROID_ROTATION_SPEED)
def tick(self, dt):
self.rotation += self.rotation_speed
super().tick(dt)
def hit_by_spaceship(self, spaceship):
spaceship.destroy(self)
def hit_by_laser(self, laser):
split_x_speed = -laser.y_speed * COLLISION_SPEED_FACTOR
split_y_speed = laser.x_speed * COLLISION_SPEED_FACTOR
for i in range(2):
if self.size > 1:
asteroid = Asteroid(self.window, self.size-1)
asteroid.x = self.x
asteroid.y = self.y
asteroid.x_speed = self.x_speed + split_x_speed
asteroid.y_speed = self.y_speed + split_y_speed
objects.append(asteroid)
for j in range(self.size):
x_rand_speed = random.uniform(-40, 40)
y_rand_speed = random.uniform(-40, 40)
debris = Debris(self.window, x=self.x, y=self.y,
x_speed=self.x_speed - split_y_speed + x_rand_speed,
y_speed=self.y_speed + split_x_speed + y_rand_speed,
time_to_live=self.size,
)
objects.append(debris)
split_x_speed = -split_x_speed
split_y_speed = -split_y_speed
laser.delete()
self.delete()
for obj in objects:
if obj.kind == 'asteroid':
return
# No asteroids!
start_level()
class Laser(SpaceObject):
def __init__(self, window, x, y, ship_x_speed, ship_y_speed, rotation):
rotation_radians = math.radians(rotation)
x_speed = ship_x_speed + LASER_SPEED * math.cos(rotation_radians)
y_speed = ship_y_speed + LASER_SPEED * math.sin(rotation_radians)
super().__init__(window,
x=x + math.cos(rotation_radians),
y=y + math.sin(rotation_radians),
x_speed=x_speed,
y_speed=y_speed,
rotation=rotation,
image=laser_img,
radius=LASER_RADIUS,
)
self.time_to_live = max(window.width, window.height) / LASER_SPEED
def tick(self, dt):
super().tick(dt)
for obj in list(objects):
if overlaps(self, obj):
obj.hit_by_laser(self)
self.time_to_live -= dt
if self.time_to_live < 0:
self.delete()
def make_exhaust(spaceship, side=0):
rotation_radians = math.radians(spaceship.rotation + side * 15)
x_unit = -math.cos(rotation_radians)
y_unit = -math.sin(rotation_radians)
start_radians = math.radians(spaceship.rotation - side * 48)
x_start = -math.cos(start_radians)
y_start = -math.sin(start_radians)
if side:
speed = 100
die_speed = 4
else:
speed = 200
die_speed = 3
x_rand_speed = random.uniform(-20,20)
y_rand_speed = random.uniform(-20,20)
return Exhaust(spaceship.window,
x=spaceship.x + x_start * 43,
y=spaceship.y + y_start * 43,
x_speed=spaceship.x_speed + x_unit*speed + x_rand_speed,
y_speed=spaceship.y_speed + y_unit*speed + y_rand_speed,
die_speed=die_speed,
small=side,
)
class Exhaust(SpaceObject):
def __init__(self, window, x, y, x_speed, y_speed, die_speed, small=False):
super().__init__(window,
x=x, y=y, x_speed=x_speed, y_speed=y_speed,
image=random.choice(exhaust_images),
radius=0,
batch=exhaust_batch,
)
self.time_to_live = 2
self.die_speed = die_speed
if small:
self.sprite.scale = 1/30
else:
self.sprite.scale = 1/10
def tick(self, dt):
super().tick(dt)
self.time_to_live -= dt * self.die_speed
if self.time_to_live > 1:
t = self.time_to_live - 1
self.sprite.color = (255, 255 * t, 0)
elif self.time_to_live > 0:
t = self.time_to_live
self.sprite.color = (255 * t, 0, 0)
self.sprite.opacity = 255 * t
else:
self.delete()
class Debris(SpaceObject):
def __init__(self, window, x, y, x_speed, y_speed, time_to_live):
super().__init__(window, x=x, y=y, x_speed=x_speed, y_speed=y_speed,
image=random.choice(asteroid_images[1]),
radius=0,
batch=exhaust_batch,
)
self.time_to_live = time_to_live
self.die_speed = 3
self.sprite.color = 0, 0, 0
self.sprite.scale = 1/2
self.rotation_speed = random.uniform(-20, 20)
def tick(self, dt):
self.rotation += self.rotation_speed * dt
super().tick(dt)
self.time_to_live -= dt * self.die_speed
if self.time_to_live > 1:
self.sprite.opacity = 255
elif self.time_to_live > 0:
self.sprite.opacity = 255 * self.time_to_live
else:
self.delete()
def distance(a, b, wrap_size):
result = abs(a - b)
if result > wrap_size / 2:
result = wrap_size - result
return result
def overlaps(a, b):
distance_squared = (distance(a.x, b.x, window.width) ** 2 +
distance(a.y, b.y, window.height) ** 2)
max_distance_squared = (a.radius + b.radius) ** 2
return distance_squared < max_distance_squared
def tick(dt):
for obj in objects:
obj.tick(dt)
def draw():
window.clear()
for x in range(0, window.width+space_img.width, space_img.width):
for y in range(0, window.height+space_img.height, space_img.height):
space_img.blit(x=x, y=y)
exhaust_batch.draw()
for x_offset in (-window.width, 0, window.width):
for y_offset in (-window.height, 0, window.height):
gl.glPushMatrix()
gl.glTranslatef(x_offset, y_offset, 0)
main_batch.draw()
gl.glPopMatrix()
for i in range(ship.lives):
life_sprite.y = 40
life_sprite.x = 30 + 40 * i
life_sprite.draw()
level_label.draw()
def key_pressed(key, mod):
pressed_keys.add(key)
def key_released(key, mod):
pressed_keys.discard(key)
def start_level():
ship.level += 1
ship.lives += 1
for i in range(ship.level):
objects.append(Asteroid(window, 4))
level_label.text = 'Level {}'.format(ship.level)
window = pyglet.window.Window(width=WIDTH, height=HEIGHT)
window.push_handlers(
on_draw=draw,
on_key_press=key_pressed,
on_key_release=key_released,
)
pyglet.clock.schedule(tick)
ship = Spaceship(window)
objects.append(ship)
start_level()
pyglet.app.run()
|
|
import os
from .routing_actions import *
from .routing_modifiers import *
from .routing_routers import *
from .routing_subjects import *
from .routing_vars import *
from ..base import OptionsGroup
from ..exceptions import ConfigurationError
from ..utils import listify
class RouteRule:
"""Represents a routing rule."""
class vars:
"""Routing variables."""
cookie = VarCookie
geoip = VarGeoip
httptime = VarHttptime
metric = VarMetric
query = VarQuery
request = VarRequest
time = VarTime
uwsgi = VarUwsgi
class var_functions:
"""Functions that can be applied to variables."""
base64 = FuncBase64
hex = FuncHex
lower = FuncLower
math = FuncMath
mime = FuncMime
upper = FuncUpper
class stages:
"""During the request cycle, various stages (aka chains) are processed.
Chains can be "recursive". A recursive chain can be called multiple times
in a request cycle.
"""
REQUEST = ''
"""Applied before the request is passed to the plugin."""
ERROR = 'error'
"""Applied as soon as an HTTP status code is generate. **Recursive chain**."""
RESPONSE = 'response'
"""Applied after the last response header has been generated (just before sending the body)."""
FINAL = 'final'
"""Applied after the response has been sent to the client."""
class subjects:
"""Routing subjects. These can be request's variables or other entities.
.. note:: Non-custom subjects can be pre-optimized (during startup)
and should be used for performance reasons.
"""
custom = SubjectCustom
http_host = SubjectHttpHost
http_referer = SubjectHttpReferer
http_user_agent = SubjectHttpUserAgent
path_info = SubjectPathInfo
query_string = SubjectQueryString
remote_addr = SubjectRemoteAddr
remote_user = SubjectRemoteUser
request_uri = SubjectRequestUri
status = SubjectStatus
class transforms:
"""A transformation is like a filter applied to the response
generated by your application.
Transformations can be chained (the output of a transformation will be the input of the following one)
and can completely overwrite response headers.
* http://uwsgi.readthedocs.io/en/latest/Transformations.html
"""
chunked = ActionChunked
fix_content_len = ActionFixContentLen
flush = ActionFlush
gzip = ActionGzip
template = ActionTemplate
to_file = ActionToFile
upper = ActionUpper
# todo Consider adding the following and some others from sources (incl. plugins):
# xslt, cachestore, memcachedstore, redisstore, rpc, lua
class actions:
"""Actions available for routing rules.
Values returned by actions:
* ``NEXT`` - continue to the next rule
* ``CONTINUE`` - stop scanning the internal routing table and run the request
* ``BREAK`` - stop scanning the internal routing table and close the request
* ``GOTO x`` - go to rule ``x``
"""
add_var_cgi = ActionAddVarCgi
add_var_log = ActionAddVarLog
alarm = ActionAlarm
auth_basic = ActionAuthBasic
auth_ldap = AuthLdap
dir_change = ActionDirChange
do_break = ActionDoBreak
do_continue = ActionDoContinue
do_goto = ActionDoGoto
fix_var_path_info = ActionFixVarPathInfo
header_add = ActionHeaderAdd
header_remove = ActionHeaderRemove
headers_off = ActionHeadersOff
headers_reset = ActionHeadersReset
log = ActionLog
offload_off = ActionOffloadOff
redirect = ActionRedirect
rewrite = ActionRewrite
route_external = ActionRouteExternal
route_uwsgi = ActionRouteUwsgi
send = ActionSend
serve_static = ActionServeStatic
set_harakiri = ActionSetHarakiri
set_script_file = ActionSetScriptFile
set_uwsgi_process_name = ActionSetUwsgiProcessName
set_var_document_root = ActionSetVarDocumentRoot
set_var_path_info = ActionSetVarPathInfo
set_var_remote_addr = ActionSetVarRemoteAddr
set_var_remote_user = ActionSetVarRemoteUser
set_var_request_method = ActionSetVarRequestMethod
set_var_request_uri = ActionSetVarRequestUri
set_var_script_name = ActionSetVarScriptName
set_var_uwsgi_appid = ActionSetVarUwsgiAppid
set_var_uwsgi_home = ActionSetVarUwsgiHome
set_var_uwsgi_scheme = ActionSetVarUwsgiScheme
signal = ActionSignal
# todo Consider adding the following and some others from sources (incl. plugins):
# cachestore, cacheset, memcached,
# router_cache: cache, cache-continue, cachevar, cacheinc, cachedec, cachemul, cachediv
# rpc,
# rpc: call, rpcret, rpcnext, rpcraw, rpcvar,
# access, spnego, radius
# xslt, ssi, gridfs
# cgi: cgi, cgihelper
# router_access: access,
# proxyhttp -router_http, proxyuwsgi -router_uwsgi, xattr -xattr
# router_memcached: memcached, memcached-continue, memcachedstore
# router_redis: redis, redis-continue, redisstore
def __init__(self, action, subject=None, stage=stages.REQUEST):
"""
:param RouteAction action: Action (or transformation) to perfrom.
See ``.actions`` and ``.transforms``.
:param SubjectCustom|SubjectBuiltin|str subject: Subject to verify before action is performed.
See ``.subjects``.
* String values are automatically transformed into ``subjects.path_info``.
* If ``None`` action is performed always w/o subject check.
:param str stage: Stage on which the action needs to be performed.
See ``.stages``.
"""
if subject is None:
subject = 'run' # always run the specified route action
elif isinstance(subject, str):
subject = self.subjects.path_info(subject)
subject_rule = ''
self._custom_subject = isinstance(subject, SubjectCustom)
if self._custom_subject:
subject_rule = subject
subject = 'if-not' if subject.negate else 'if'
elif isinstance(subject, SubjectBuiltin):
subject_rule = subject.regexp
subject = subject.name
self.command_label = f'{stage}-route-label'.strip('-')
self.command = f'{stage}-route-{subject}'.strip('-')
self.value = subject_rule, action
class Routing(OptionsGroup):
"""Routing subsystem.
You can use the internal routing subsystem to dynamically alter the way requests are handled.
.. note:: Since 1.9
* http://uwsgi.readthedocs.io/en/latest/InternalRouting.html
* http://uwsgi.readthedocs.io/en/latest/Transformations.html
"""
route_rule = RouteRule
class routers:
"""Dedicated routers, which can be used with `register_router()`."""
http = RouterHttp
https = RouterHttps
ssl = RouterSsl
fast = RouterFast
raw = RouterRaw
forkpty = RouterForkPty
tuntap = RouterTunTap
class modifiers:
"""Routing modifiers.
* http://uwsgi.readthedocs.io/en/latest/Protocol.html
"""
cache = ModifierCache
cgi = ModifierCgi
cluster_node = ModifierClusterNode
config_from_node = ModifierConfigFromNode
corerouter_signal = ModifierCorerouterSignal
echo = ModifierEcho
eval = ModifierEval
example = ModifierExample
fastfunc = ModifierFastfunc
gccgo = ModifierGccgo
glusterfs = ModifierGlusterfs
gridfs = ModifierGridfs
jvm = ModifierJvm
legion_msg = ModifierLegionMsg
lua = ModifierLua
manage = ModifierManage
manage_path_info = ModifierManagePathInfo
message = ModifierMessage
message_array = ModifierMessageArray
message_marshal = ModifierMessageMarshal
mono = ModifierMono
multicast = ModifierMulticast
multicast_announce = ModifierMulticastAnnounce
persistent_close = ModifierPersistentClose
php = ModifierPhp
ping = ModifierPing
psgi = ModifierPsgi
rack = ModifierRack
rados = ModifierRados
raw = ModifierRaw
reload = ModifierReload
reload_brutal = ModifierReloadBrutal
remote_logging = ModifierRemoteLogging
response = ModifierResponse
rpc = ModifierRpc
signal = ModifierSignal
snmp = ModifierSnmp
spooler = ModifierSpooler
ssi = ModifierSsi
subscription = ModifierSubscription
symcall = ModifierSymcall
v8 = ModifierV8
webdav = ModifierWebdav
wsgi = ModifierWsgi
xslt = ModifierXslt
def use_router(self, router, *, force=None):
"""
:param RouterBase router: Dedicated router object. See `.routers`.
:param bool force: All of the gateways (routers) has to be run under the master process,
supplying this you can try to bypass this limit.
"""
self._set('force-gateway', force, cast=bool)
router._contribute_to_opts(self)
return self._section
def register_route(self, route_rules, *, label=None):
"""Registers a routing rule.
:param RouteRule|list[RouteRule] route_rules:
:param str label: Label to mark the given set of rules.
This can be used in conjunction with ``do_goto`` rule action.
* http://uwsgi.readthedocs.io/en/latest/InternalRouting.html#goto
"""
route_rules = listify(route_rules)
if route_rules and label:
self._set(route_rules[0].command_label, label, multi=True)
for route_rules in route_rules:
self._set(route_rules.command, route_rules.value, multi=True)
return self._section
def print_routing_rules(self):
"""Print out supported routing rules (actions, transforms, etc.)."""
self._set('routers-list', True, cast=bool)
return self._section
def set_error_page(self, status: int, html_fpath: str):
"""Add an error page (html) for managed 403, 404, 500 response.
:param status: HTTP status code.
:param html_fpath: HTML page file path.
"""
statuses = [403, 404, 500]
status = int(status)
if status not in statuses:
raise ConfigurationError(
f"Code `{status}` for `routing.set_error_page()` is unsupported. "
f"Supported: {', '.join(map(str, statuses))}")
self._set(f'error-page-{status}', html_fpath, multi=True)
return self._section
def set_error_pages(self, codes_map: dict = None, *, common_prefix: str = None):
"""Add an error pages for managed 403, 404, 500 responses.
Shortcut for ``.set_error_page()``.
:param codes_map: Status code mapped into an html filepath or
just a filename if common_prefix is used.
If not set, filename containing status code is presumed: 400.html, 500.html, etc.
:param common_prefix: Common path (prefix) for all files.
"""
statuses = [403, 404, 500]
if common_prefix:
if not codes_map:
codes_map = {code: f'{code}.html' for code in statuses}
for code, filename in codes_map.items():
codes_map[code] = os.path.join(common_prefix, filename)
for code, filepath in codes_map.items():
self.set_error_page(code, filepath)
return self._section
def set_geoip_params(self, *, db_country=None, db_city=None):
"""Sets GeoIP parameters.
* http://uwsgi.readthedocs.io/en/latest/GeoIP.html
:param str db_country: Country database file path.
:param str db_city: City database file path. Example: ``GeoLiteCity.dat``.
"""
self._set('geoip-country', db_country, plugin='geoip')
self._set('geoip-city', db_city, plugin='geoip')
return self._section
def header_add(self, name, value):
"""Automatically add HTTP headers to response.
:param str name:
:param str value:
"""
self._set('add-header', f'{name}: {value}', multi=True)
return self._section
def header_remove(self, value):
"""Automatically remove specified HTTP header from the response.
:param str value:
"""
self._set('del-header', value, multi=True)
return self._section
def header_collect(self, name, target_var, *, pull=False):
"""Store the specified response header in a request var
(optionally removing it from the response).
:param str name:
:param str target_var:
:param bool pull: Whether to remove header from response.
"""
self._set(
'pull-header' if pull else 'collect-header',
f'{name} {target_var}',
multi=True
)
return self._section
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os.path
from oslo_config import cfg
from oslo_log import log
import six
from keystone.catalog import core
from keystone import exception
from keystone.i18n import _LC
LOG = log.getLogger(__name__)
CONF = cfg.CONF
def parse_templates(template_lines):
o = {}
for line in template_lines:
if ' = ' not in line:
continue
k, v = line.strip().split(' = ')
if not k.startswith('catalog.'):
continue
parts = k.split('.')
region = parts[1]
# NOTE(termie): object-store insists on having a dash
service = parts[2].replace('_', '-')
key = parts[3]
region_ref = o.get(region, {})
service_ref = region_ref.get(service, {})
service_ref[key] = v
region_ref[service] = service_ref
o[region] = region_ref
return o
class Catalog(core.Driver):
"""A backend that generates endpoints for the Catalog based on templates.
It is usually configured via config entries that look like:
catalog.$REGION.$SERVICE.$key = $value
and is stored in a similar looking hierarchy. Where a value can contain
values to be interpolated by standard python string interpolation that look
like (the % is replaced by a $ due to paste attempting to interpolate on
its own:
http://localhost:$(public_port)s/
When expanding the template it will pass in a dict made up of the conf
instance plus a few additional key-values, notably tenant_id and user_id.
It does not care what the keys and values are but it is worth noting that
keystone_compat will expect certain keys to be there so that it can munge
them into the output format keystone expects. These keys are:
name - the name of the service, most likely repeated for all services of
the same type, across regions.
adminURL - the url of the admin endpoint
publicURL - the url of the public endpoint
internalURL - the url of the internal endpoint
"""
def __init__(self, templates=None):
super(Catalog, self).__init__()
if templates:
self.templates = templates
else:
template_file = CONF.catalog.template_file
if not os.path.exists(template_file):
template_file = CONF.find_file(template_file)
self._load_templates(template_file)
def _load_templates(self, template_file):
try:
with open(template_file) as f:
self.templates = parse_templates(f)
except IOError:
LOG.critical(_LC('Unable to open template file %s'), template_file)
raise
# region crud
def create_region(self, region_ref):
raise exception.NotImplemented()
def list_regions(self, hints):
return [{'id': region_id, 'description': '', 'parent_region_id': ''}
for region_id in self.templates]
def get_region(self, region_id):
if region_id in self.templates:
return {'id': region_id, 'description': '', 'parent_region_id': ''}
raise exception.RegionNotFound(region_id=region_id)
def update_region(self, region_id, region_ref):
raise exception.NotImplemented()
def delete_region(self, region_id):
raise exception.NotImplemented()
# service crud
def create_service(self, service_id, service_ref):
raise exception.NotImplemented()
def _list_services(self, hints):
for region_ref in six.itervalues(self.templates):
for service_type, service_ref in six.iteritems(region_ref):
yield {
'id': service_type,
'enabled': True,
'name': service_ref.get('name', ''),
'description': service_ref.get('description', ''),
'type': service_type,
}
def list_services(self, hints):
return list(self._list_services(hints=None))
def get_service(self, service_id):
for service in self._list_services(hints=None):
if service['id'] == service_id:
return service
raise exception.ServiceNotFound(service_id=service_id)
def update_service(self, service_id, service_ref):
raise exception.NotImplemented()
def delete_service(self, service_id):
raise exception.NotImplemented()
# endpoint crud
def create_endpoint(self, endpoint_id, endpoint_ref):
raise exception.NotImplemented()
def _list_endpoints(self):
for region_id, region_ref in six.iteritems(self.templates):
for service_type, service_ref in six.iteritems(region_ref):
for key in service_ref:
if key.endswith('URL'):
interface = key[:-3]
endpoint_id = ('%s-%s-%s' %
(region_id, service_type, interface))
yield {
'id': endpoint_id,
'service_id': service_type,
'interface': interface,
'url': service_ref[key],
'legacy_endpoint_id': None,
'region_id': region_id,
'enabled': True,
}
def list_endpoints(self, hints):
return list(self._list_endpoints())
def get_endpoint(self, endpoint_id):
for endpoint in self._list_endpoints():
if endpoint['id'] == endpoint_id:
return endpoint
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
def update_endpoint(self, endpoint_id, endpoint_ref):
raise exception.NotImplemented()
def delete_endpoint(self, endpoint_id):
raise exception.NotImplemented()
def get_catalog(self, user_id, tenant_id):
"""Retrieve and format the V2 service catalog.
:param user_id: The id of the user who has been authenticated for
creating service catalog.
:param tenant_id: The id of the project. 'tenant_id' will be None in
the case this being called to create a catalog to go in a domain
scoped token. In this case, any endpoint that requires a tenant_id
as part of their URL will be skipped.
:returns: A nested dict representing the service catalog or an
empty dict.
"""
substitutions = dict(
itertools.chain(CONF.items(), CONF.eventlet_server.items()))
substitutions.update({'user_id': user_id})
silent_keyerror_failures = []
if tenant_id:
substitutions.update({
'tenant_id': tenant_id,
'project_id': tenant_id,
})
else:
silent_keyerror_failures = ['tenant_id', 'project_id', ]
catalog = {}
# TODO(davechen): If there is service with no endpoints, we should
# skip the service instead of keeping it in the catalog.
# see bug #1436704.
for region, region_ref in self.templates.items():
catalog[region] = {}
for service, service_ref in region_ref.items():
service_data = {}
try:
for k, v in service_ref.items():
formatted_value = core.format_url(
v, substitutions,
silent_keyerror_failures=silent_keyerror_failures)
if formatted_value:
service_data[k] = formatted_value
except exception.MalformedEndpoint:
continue # this failure is already logged in format_url()
catalog[region][service] = service_data
return catalog
def add_endpoint_to_project(self, endpoint_id, project_id):
raise exception.NotImplemented()
def remove_endpoint_from_project(self, endpoint_id, project_id):
raise exception.NotImplemented()
def check_endpoint_in_project(self, endpoint_id, project_id):
raise exception.NotImplemented()
def list_endpoints_for_project(self, project_id):
raise exception.NotImplemented()
def list_projects_for_endpoint(self, endpoint_id):
raise exception.NotImplemented()
def delete_association_by_endpoint(self, endpoint_id):
raise exception.NotImplemented()
def delete_association_by_project(self, project_id):
raise exception.NotImplemented()
def create_endpoint_group(self, endpoint_group):
raise exception.NotImplemented()
def get_endpoint_group(self, endpoint_group_id):
raise exception.NotImplemented()
def update_endpoint_group(self, endpoint_group_id, endpoint_group):
raise exception.NotImplemented()
def delete_endpoint_group(self, endpoint_group_id):
raise exception.NotImplemented()
def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
raise exception.NotImplemented()
def get_endpoint_group_in_project(self, endpoint_group_id, project_id):
raise exception.NotImplemented()
def list_endpoint_groups(self):
raise exception.NotImplemented()
def list_endpoint_groups_for_project(self, project_id):
raise exception.NotImplemented()
def list_projects_associated_with_endpoint_group(self, endpoint_group_id):
raise exception.NotImplemented()
def remove_endpoint_group_from_project(self, endpoint_group_id,
project_id):
raise exception.NotImplemented()
def delete_endpoint_group_association_by_project(self, project_id):
raise exception.NotImplemented()
|
|
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.cross_validation import train_test_split
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
check_parameters_default_constructible,
check_regressors_classifiers_sparse_data,
check_transformer,
check_clustering,
check_clusterer_compute_labels_predict,
check_regressors_int,
check_regressors_train,
check_regressors_pickle,
check_transformer_sparse_data,
check_transformer_pickle,
check_estimators_nan_inf,
check_classifiers_one_label,
check_classifiers_train,
check_classifiers_classes,
check_classifiers_input_shapes,
check_classifiers_pickle,
check_class_weight_classifiers,
check_class_weight_auto_classifiers,
check_class_weight_auto_linear_classifier,
check_estimators_overwrite_params,
check_estimators_partial_fit_n_features,
check_cluster_overwrite_params,
check_sparsify_binary_classifier,
check_sparsify_multiclass_classifier,
check_classifier_data_not_an_array,
check_regressor_data_not_an_array,
check_transformer_data_not_an_array,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
CROSS_DECOMPOSITION)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_estimators_sparse_data():
# All estimators should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
estimators = all_estimators(type_filter=['classifier', 'regressor'])
for name, Estimator in estimators:
yield check_regressors_classifiers_sparse_data, name, Estimator
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
for name, Transformer in transformers:
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_sparse_data, name, Transformer
yield check_transformer_pickle, name, Transformer
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array, name, Transformer
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer, name, Transformer
def test_estimators_nan_inf():
# Test that all estimators check their input for NaN's and infs
estimators = all_estimators(type_filter=['classifier', 'regressor',
'transformer', 'cluster'])
for name, Estimator in estimators:
if name not in CROSS_DECOMPOSITION + ['Imputer']:
yield check_estimators_nan_inf, name, Estimator
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
clustering = all_estimators(type_filter='cluster')
for name, Alg in clustering:
# test whether any classifier overwrites his init parameters during fit
yield check_cluster_overwrite_params, name, Alg
yield check_clusterer_compute_labels_predict, name, Alg
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering, name, Alg
yield check_estimators_partial_fit_n_features, name, Alg
def test_classifiers():
# test if classifiers can cope with non-consecutive classes
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array, name, Classifier
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label, name, Classifier
yield check_classifiers_classes, name, Classifier
yield check_classifiers_pickle, name, Classifier
yield check_estimators_partial_fit_n_features, name, Classifier
# basic consistency testing
yield check_classifiers_train, name, Classifier
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes, name, Classifier
def test_regressors():
regressors = all_estimators(type_filter='regressor')
# TODO: test with intercept
# TODO: test with multiple responses
for name, Regressor in regressors:
# basic testing
yield check_regressors_train, name, Regressor
yield check_regressor_data_not_an_array, name, Regressor
yield check_estimators_partial_fit_n_features, name, Regressor
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle, name, Regressor
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int, name, Regressor
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield check_class_weight_classifiers, name, Classifier
def test_class_weight_auto_classifiers():
"""Test that class_weight="auto" improves f1-score"""
# This test is broken; its success depends on:
# * a rare fortuitous RNG seed for make_classification; and
# * the use of binary F1 over a seemingly arbitrary positive class for two
# datasets, and weighted average F1 for the third.
# Its expectations need to be clarified and reimplemented.
raise SkipTest('This test requires redefinition')
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if (name != "NuSVC"
# the sparse version has a parameter that doesn't do anything
and not name.startswith("RidgeClassifier")
# RidgeClassifier behaves unexpected
# FIXME!
and not name.endswith("NB")):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
yield (check_class_weight_auto_classifiers, name, Classifier,
X_train, y_train, X_test, y_test, weights)
def test_class_weight_auto_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_auto_linear_classifier, name, Classifier
def test_estimators_overwrite_params():
# test whether any classifier overwrites his init parameters during fit
for est_type in ["classifier", "regressor", "transformer"]:
estimators = all_estimators(type_filter=est_type)
for name, Estimator in estimators:
if (name not in ['CCA', '_CCA', 'PLSCanonical', 'PLSRegression',
'PLSSVD', 'GaussianProcess']):
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params, name, Estimator
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_sparsify_estimators():
#Test if predict with sparsified estimators works.
#Tests regression, binary classification, and multi-class classification.
estimators = all_estimators()
# test regression and binary classification
for name, Estimator in estimators:
try:
Estimator.sparsify
yield check_sparsify_binary_classifier, name, Estimator
except:
pass
# test multiclass classification
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
try:
Classifier.sparsify
yield check_sparsify_multiclass_classifier, name, Classifier
except:
pass
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif name in CROSS_DECOMPOSITION or (
name in ['LinearSVC', 'LogisticRegression']
):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Glance
"""
import logging
import logging.config
import logging.handlers
import os
from oslo.config import cfg
from paste import deploy
from glance.version import version_info as version
paste_deploy_opts = [
cfg.StrOpt('flavor',
help=_('Partial name of a pipeline in your paste configuration '
'file with the service name removed. For example, if '
'your paste section name is '
'[pipeline:glance-api-keystone] use the value '
'"keystone"')),
cfg.StrOpt('config_file',
help=_('Name of the paste configuration file.')),
]
image_format_opts = [
cfg.ListOpt('container_formats',
default=['ami', 'ari', 'aki', 'bare', 'ovf'],
help=_("Supported values for the 'container_format' "
"image attribute"),
deprecated_opts=[cfg.DeprecatedOpt('container_formats',
group='DEFAULT')]),
cfg.ListOpt('disk_formats',
default=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2',
'vdi', 'iso'],
help=_("Supported values for the 'disk_format' "
"image attribute"),
deprecated_opts=[cfg.DeprecatedOpt('disk_formats',
group='DEFAULT')]),
]
task_opts = [
cfg.IntOpt('task_time_to_live',
default=48,
help=_("Time in hours for which a task lives after, either "
"succeeding or failing"),
deprecated_opts=[cfg.DeprecatedOpt('task_time_to_live',
group='DEFAULT')]),
]
common_opts = [
cfg.BoolOpt('allow_additional_image_properties', default=True,
help=_('Whether to allow users to specify image properties '
'beyond what the image schema provides')),
cfg.IntOpt('image_member_quota', default=128,
help=_('Maximum number of image members per image. '
'Negative values evaluate to unlimited.')),
cfg.IntOpt('image_property_quota', default=128,
help=_('Maximum number of properties allowed on an image. '
'Negative values evaluate to unlimited.')),
cfg.IntOpt('image_tag_quota', default=128,
help=_('Maximum number of tags allowed on an image. '
'Negative values evaluate to unlimited.')),
cfg.IntOpt('image_location_quota', default=10,
help=_('Maximum number of locations allowed on an image. '
'Negative values evaluate to unlimited.')),
cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api',
help=_('Python module path of data access API')),
cfg.IntOpt('limit_param_default', default=25,
help=_('Default value for the number of items returned by a '
'request if not specified explicitly in the request')),
cfg.IntOpt('api_limit_max', default=1000,
help=_('Maximum permissible number of items that could be '
'returned by a request')),
cfg.BoolOpt('show_image_direct_url', default=False,
help=_('Whether to include the backend image storage location '
'in image properties. Revealing storage location can '
'be a security risk, so use this setting with '
'caution!')),
cfg.BoolOpt('show_multiple_locations', default=False,
help=_('Whether to include the backend image locations '
'in image properties. Revealing storage location can '
'be a security risk, so use this setting with '
'caution! The overrides show_image_direct_url.')),
cfg.IntOpt('image_size_cap', default=1099511627776,
help=_("Maximum size of image a user can upload in bytes. "
"Defaults to 1099511627776 bytes (1 TB).")),
cfg.IntOpt('user_storage_quota', default=0,
help=_("Set a system wide quota for every user. This value is "
"the total number of bytes that a user can use across "
"all storage systems. A value of 0 means unlimited.")),
cfg.BoolOpt('enable_v1_api', default=True,
help=_("Deploy the v1 OpenStack Images API.")),
cfg.BoolOpt('enable_v2_api', default=True,
help=_("Deploy the v2 OpenStack Images API.")),
cfg.StrOpt('pydev_worker_debug_host', default=None,
help=_('The hostname/IP of the pydev process listening for '
'debug connections')),
cfg.IntOpt('pydev_worker_debug_port', default=5678,
help=_('The port on which a pydev process is listening for '
'connections.')),
cfg.StrOpt('metadata_encryption_key', secret=True,
help=_('Key used for encrypting sensitive metadata while '
'talking to the registry or database.')),
]
CONF = cfg.CONF
CONF.register_opts(paste_deploy_opts, group='paste_deploy')
CONF.register_opts(image_format_opts, group='image_format')
CONF.register_opts(task_opts, group='task')
CONF.register_opts(common_opts)
def parse_args(args=None, usage=None, default_config_files=None):
CONF(args=args,
project='glance',
version=version.cached_version_string(),
usage=usage,
default_config_files=default_config_files)
def parse_cache_args(args=None):
config_files = cfg.find_config_files(project='glance', prog='glance-cache')
parse_args(args=args, default_config_files=config_files)
def _get_deployment_flavor(flavor=None):
"""
Retrieve the paste_deploy.flavor config item, formatted appropriately
for appending to the application name.
:param flavor: if specified, use this setting rather than the
paste_deploy.flavor configuration setting
"""
if not flavor:
flavor = CONF.paste_deploy.flavor
return '' if not flavor else ('-' + flavor)
def _get_paste_config_path():
paste_suffix = '-paste.ini'
conf_suffix = '.conf'
if CONF.config_file:
# Assume paste config is in a paste.ini file corresponding
# to the last config file
path = CONF.config_file[-1].replace(conf_suffix, paste_suffix)
else:
path = CONF.prog + paste_suffix
return CONF.find_file(os.path.basename(path))
def _get_deployment_config_file():
"""
Retrieve the deployment_config_file config item, formatted as an
absolute pathname.
"""
path = CONF.paste_deploy.config_file
if not path:
path = _get_paste_config_path()
if not path:
msg = "Unable to locate paste config file for %s." % CONF.prog
raise RuntimeError(msg)
return os.path.abspath(path)
def load_paste_app(app_name, flavor=None, conf_file=None):
"""
Builds and returns a WSGI app from a paste config file.
We assume the last config file specified in the supplied ConfigOpts
object is the paste config file, if conf_file is None.
:param app_name: name of the application to load
:param flavor: name of the variant of the application to load
:param conf_file: path to the paste config file
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
# append the deployment flavor to the application name,
# in order to identify the appropriate paste pipeline
app_name += _get_deployment_flavor(flavor)
if not conf_file:
conf_file = _get_deployment_config_file()
try:
logger = logging.getLogger(__name__)
logger.debug(_("Loading %(app_name)s from %(conf_file)s"),
{'conf_file': conf_file, 'app_name': app_name})
app = deploy.loadapp("config:%s" % conf_file, name=app_name)
# Log the options used when starting if we're in debug mode...
if CONF.debug:
CONF.log_opt_values(logger, logging.DEBUG)
return app
except (LookupError, ImportError) as e:
msg = (_("Unable to load %(app_name)s from "
"configuration file %(conf_file)s."
"\nGot: %(e)r") % {'app_name': app_name,
'conf_file': conf_file,
'e': e})
logger.error(msg)
raise RuntimeError(msg)
|
|
#!/usr/bin/env python
"""
python %prog [options] <in_schema.xsd> <out_schema.xsd>
Synopsis:
Prepare schema document. Replace include and import elements.
Examples:
python %prog myschema.xsd
python %prog myschema.xsd newschema.xsd
python %prog -f myschema.xsd newschema.xsd
cat infile.xsd | python %prog > outfile.xsd
"""
#
# Imports
import sys
import os
import urllib.request, urllib.error, urllib.parse
import copy
from optparse import OptionParser, Values
import itertools
from copy import deepcopy
from lxml import etree
#
# Globals and constants
#
# Do not modify the following VERSION comments.
# Used by updateversion.py.
##VERSION##
VERSION = '2.12a'
##VERSION##
Namespaces = {'xs': 'http://www.w3.org/2001/XMLSchema'}
Xsd_namespace_uri = 'http://www.w3.org/2001/XMLSchema'
CatalogDict = {}
# the base url to use for all relative paths in the catalog
CatalogBaseUrl = None
def load_catalog(catalogpath):
global CatalogBaseUrl
if catalogpath:
CatalogBaseUrl = os.path.split(catalogpath)[0]
catalog = etree.parse(open(catalogpath))
for elements in catalog.getroot().findall(
"{urn:oasis:names:tc:entity:xmlns:xml:catalog}public"):
CatalogDict[elements.get("publicId")] = elements.get("uri")
#
# Functions for external use
def process_include_files(
infile, outfile, inpath='', catalogpath=None,
fixtypenames=None):
load_catalog(catalogpath)
options = Values({
'force': False,
'fixtypenames': fixtypenames,
})
prep_schema_doc(infile, outfile, inpath, options)
def get_all_root_file_paths(infile, inpath='', catalogpath=None):
load_catalog(catalogpath)
doc1 = etree.parse(infile)
root1 = doc1.getroot()
rootPaths = []
params = Params()
params.parent_url = infile
params.base_url = os.path.split(inpath)[0]
get_root_file_paths(root1, params, rootPaths)
rootPaths.append(inpath)
return rootPaths
#
# Classes
class Params(object):
members = ('base_url', 'already_processed', 'parent_url', )
def __init__(self):
self.base_url = None
self.already_processed = set()
self.parent_url = None
def __setattr__(self, name, value):
if name not in self.members:
raise AttributeError('Class %s has no set-able attribute "%s"' % (
self.__class__.__name__, name, ))
self.__dict__[name] = value
class SchemaIOError(IOError):
pass
class RaiseComplexTypesError(Exception):
pass
#
# Functions for internal use and testing
def clear_includes_and_imports(node):
namespace = node.nsmap[node.prefix]
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
repl = etree.Comment(etree.tostring(child))
repl.tail = '\n'
node.replace(child, repl)
def get_ref_info(node, params):
# first look for the schema location in the catalog, if not
# there, then see if it's specified in the node
namespace = node.get('namespace')
url = None
baseUrl = None
if namespace in CatalogDict:
url = CatalogDict[namespace]
# setup the base url in case the path
# in the catalog was a relative path
baseUrl = CatalogBaseUrl
if not url:
url = node.get('schemaLocation')
if not url:
msg = '*** Warning: missing "schemaLocation" attribute in %s\n' % (
params.parent_url, )
sys.stderr.write(msg)
return (None, None)
# Uncomment the next lines to help track down missing schemaLocation etc.
# print '(resolve_ref) url: %s\n parent-url: %s' % (
# url, params.parent_url, )
if not baseUrl:
baseUrl = params.base_url
if baseUrl and not (
url.startswith('/') or
url.startswith('http:') or
url.startswith('ftp:')):
locn = '%s/%s' % (baseUrl, url, )
schema_name = locn
else:
locn = url
schema_name = url
return locn, schema_name
def resolve_ref(node, params, options):
content = None
locn, schema_name = get_ref_info(node, params)
if locn is not None and not (
locn.startswith('/') or
locn.startswith('http:') or
locn.startswith('ftp:')):
schema_name = os.path.abspath(locn)
if locn is not None:
if schema_name not in params.already_processed:
params.already_processed.add(schema_name)
## print 'trace --'
## print ' url: : %s' % (url, )
## print ' base : %s' % (params.base_url, )
## print ' parent : %s' % (params.parent_url, )
## print ' locn : %s' % (locn, )
## print ' schema_name : %s\n' % (schema_name, )
if locn.startswith('http:') or locn.startswith('ftp:'):
try:
urlfile = urllib.request.urlopen(locn)
content = urlfile.read()
urlfile.close()
params.parent_url = locn
params.base_url = os.path.split(locn)[0]
except urllib.error.HTTPError:
msg = "Can't find file %s referenced in %s." % (
locn, params.parent_url, )
raise SchemaIOError(msg)
else:
if os.path.exists(locn):
infile = open(locn, 'rb')
content = infile.read()
infile.close()
params.parent_url = locn
params.base_url = os.path.split(locn)[0]
if content is None:
msg = "Can't find file %s referenced in %s." % (
locn, params.parent_url, )
raise SchemaIOError(msg)
## if content is None:
## msg = "Can't find file %s referenced in %s." % (
## locn, params.parent_url, )
## raise SchemaIOError(msg)
return content
def collect_inserts(node, params, inserts, options):
namespace = node.nsmap[node.prefix]
roots = []
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
aux_roots = collect_inserts_aux(child, params, inserts, options)
roots.extend(aux_roots)
return roots
def collect_inserts_aux(child, params, inserts, options):
roots = []
save_base_url = params.base_url
string_content = resolve_ref(child, params, options)
if string_content is not None:
root = etree.fromstring(string_content, base_url=params.base_url)
roots.append(root)
for child1 in root:
if not isinstance(child1, etree._Comment):
namespace = child1.nsmap[child1.prefix]
if (child1.tag != '{%s}include' % (namespace, ) and
child1.tag != '{%s' % (namespace, )):
comment = etree.Comment(etree.tostring(child))
comment.tail = '\n'
inserts.append(comment)
inserts.append(child1)
insert_roots = collect_inserts(root, params, inserts, options)
roots.extend(insert_roots)
params.base_url = save_base_url
return roots
def get_root_file_paths(node, params, rootPaths):
namespace = node.nsmap[node.prefix]
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
get_root_file_paths_aux(child, params, rootPaths)
def get_root_file_paths_aux(child, params, rootPaths):
save_base_url = params.base_url
path, _ = get_ref_info(child, params)
string_content = resolve_ref(child, params, None)
if string_content is not None:
root = etree.fromstring(string_content, base_url=params.base_url)
get_root_file_paths(root, params, rootPaths)
if path is not None and path not in rootPaths:
rootPaths.append(path)
params.base_url = save_base_url
def make_file(outFileName, options):
outFile = None
if (not options.force) and os.path.exists(outFileName):
reply = input('File %s exists. Overwrite? (y/n): ' % outFileName)
if reply == 'y':
outFile = open(outFileName, 'w')
else:
outFile = open(outFileName, 'w')
return outFile
def prep_schema_doc(infile, outfile, inpath, options):
doc1 = etree.parse(infile)
root1 = doc1.getroot()
params = Params()
params.parent_url = infile
params.base_url = os.path.split(inpath)[0]
inserts = []
collect_inserts(root1, params, inserts, options)
root2 = copy.copy(root1)
clear_includes_and_imports(root2)
for insert_node in inserts:
root2.append(insert_node)
process_groups(root2)
raise_anon_complextypes(root2)
fix_type_names(root2, options)
doc2 = etree.ElementTree(root2)
doc2.write(outfile)
return doc2
def prep_schema(inpath, outpath, options):
if inpath:
infile = open(inpath, 'r')
else:
infile = sys.stdin
if outpath:
outfile = make_file(outpath, options)
else:
outfile = sys.stdout
if outfile is None:
return
prep_schema_doc(infile, outfile, inpath, options)
if inpath:
infile.close()
if outpath:
outfile.close()
def process_groups(root):
# Get all the xs:group definitions at top level.
defs = root.xpath('./xs:group', namespaces=Namespaces)
defs = [node for node in defs if node.get('name') is not None]
# Get all the xs:group references (below top level).
refs = root.xpath('./*//xs:group', namespaces=Namespaces)
refs = [node for node in refs if node.get('ref') is not None]
# Create a dictionary of the named model groups (definitions).
def_dict = {}
for node in defs:
def_dict[trim_prefix(node.get('name'))] = node
replace_group_defs(def_dict, refs)
def fix_type_names(root, options):
fixnamespec = options.fixtypenames
if fixnamespec:
namespecs = fixnamespec.split(';')
else:
namespecs = []
for namespec in namespecs:
names = namespec.split(':')
if len(names) == 2:
oldname = names[0]
newname = names[1]
elif len(names) == 1:
oldname = names[0]
newname = '%sxx' % (oldname, )
else:
continue
# Change the name (name attribute) of the complexType.
pat = './/%s:complexType[@name="%s"]' % (
root.prefix, oldname)
elements = xpath_find(root, pat)
if len(elements) < 1:
sys.stderr.write(
"\nWarning: fix-type-names can't find complexType '%s'. "
"Exiting.\n\n" % (oldname, ))
sys.exit(1)
if len(elements) < 1:
sys.stderr.write(
"Warning: fix-type-names found more than "
"one complexType '%s'. "
"Changing first." % (oldname, ))
element = elements[0]
element.set('name', newname)
# Change the reference (type attribute) of child elements.
pat = './/%s:element' % (root.prefix, )
elements = xpath_find(root, pat)
for element in elements:
typename = element.get('type')
if not typename:
continue
names = typename.split(':')
if len(names) == 2:
typename = names[1]
elif len(names) == 1:
typename = names[0]
else:
continue
if typename != oldname:
continue
if not element.getchildren():
element.set('type', newname)
# Change the extensions ('base' attribute) that refer to the old type.
pat = './/%s:extension' % (root.prefix, )
elements = xpath_find(root, pat)
for element in elements:
typename = element.get('base')
if not typename:
continue
names = typename.split(':')
if len(names) == 2:
typename = names[1]
elif len(names) == 1:
typename = names[0]
else:
continue
if typename != oldname:
continue
element.set('base', newname)
def xpath_find(node, pat):
namespaces = {node.prefix: node.nsmap[node.prefix]}
elements = node.xpath(pat, namespaces=namespaces)
return elements
def replace_group_defs(def_dict, refs):
for ref_node in refs:
name = trim_prefix(ref_node.get('ref'))
if name is None:
continue
def_node = def_dict.get(name)
if def_node is not None:
content = def_node.xpath(
'./xs:sequence|./xs:choice|./xs:all',
namespaces=Namespaces)
if content:
content = content[0]
parent = ref_node.getparent()
for node in content:
new_node = deepcopy(node)
# Copy minOccurs and maxOccurs attributes to new node.
value = ref_node.get('minOccurs')
if value is not None:
new_node.set('minOccurs', value)
value = ref_node.get('maxOccurs')
if value is not None:
new_node.set('maxOccurs', value)
ref_node.addprevious(new_node)
parent.remove(ref_node)
def raise_anon_complextypes(root):
""" Raise each anonymous complexType to top level and give it a name.
Rename if necessary to prevent duplicates.
"""
def_names = collect_type_names(root)
def_count = 0
# Find all complexTypes below top level.
# Raise them to top level and name them.
# Re-name if there is a duplicate (simpleType, complexType, or
# previous renamed type).
# Change the parent (xs:element) so the "type" attribute refers to
# the raised and renamed type.
# Collect the new types.
el = etree.Comment(text="Raised anonymous complexType definitions")
el.tail = "\n\n"
root.append(el)
prefix = root.prefix
if prefix:
pattern = './*/*//%s:complexType|./*/*//%s:simpleType' % (
prefix, prefix, )
element_tag = '{%s}element' % (root.nsmap[prefix], )
else:
pattern = './*/*//complexType|./*/*//simpleType'
element_tag = 'element'
defs = root.xpath(pattern, namespaces=Namespaces)
for node in defs:
parent = node.getparent()
if parent.tag != element_tag:
continue
name = parent.get('name')
if not name:
continue
type_name = '%sType' % (name, )
type_name, def_count = unique_name(type_name, def_names, def_count)
def_names.add(type_name)
parent.set('type', type_name)
node.set('name', type_name)
# Move the complexType node to top level.
root.append(node)
#
# Collect the names of all currently defined types (complexType,
# simpleType, element).
def collect_type_names(node):
prefix = node.prefix
if prefix is not None and prefix.strip():
pattern = './/%s:complexType|.//%s:simpleType|.//%s:element' % (
prefix, prefix, prefix)
# Must make sure that we have a namespace dictionary that does *not*
# have a key None.
namespaces = {prefix: node.nsmap[prefix]}
elements = node.xpath(pattern, namespaces=namespaces)
else:
pattern = './/complexType|.//simpleType|.//element'
elements = node.xpath(pattern)
names = [
el.attrib['name'] for el in elements if
'name' in el.attrib and el.getchildren()
]
names = set(names)
return names
def unique_name(type_name, def_names, def_count):
orig_type_name = type_name
while True:
if type_name not in def_names:
return type_name, def_count
def_count += 1
type_name = '%s%d' % (orig_type_name, def_count, )
def trim_prefix(name):
names = name.split(':')
if len(names) == 1:
return names[0]
elif len(names) == 2:
return names[1]
else:
return None
USAGE_TEXT = __doc__
def usage(parser):
parser.print_help()
sys.exit(1)
def main():
parser = OptionParser(USAGE_TEXT)
parser.add_option(
"-f", "--force", action="store_true",
dest="force", default=False,
help="force overwrite without asking")
(options, args) = parser.parse_args()
if len(args) == 2:
inpath = args[0]
outpath = args[1]
elif len(args) == 1:
inpath = args[0]
outpath = None
elif len(args) == 0:
inpath = None
outpath = None
else:
usage(parser)
prep_schema(inpath, outpath, options)
if __name__ == "__main__":
#import pdb; pdb.set_trace()
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dataflow_v1beta3.services.flex_templates_service import (
FlexTemplatesServiceAsyncClient,
)
from google.cloud.dataflow_v1beta3.services.flex_templates_service import (
FlexTemplatesServiceClient,
)
from google.cloud.dataflow_v1beta3.services.flex_templates_service import transports
from google.cloud.dataflow_v1beta3.types import environment
from google.cloud.dataflow_v1beta3.types import jobs
from google.cloud.dataflow_v1beta3.types import templates
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert FlexTemplatesServiceClient._get_default_mtls_endpoint(None) is None
assert (
FlexTemplatesServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
FlexTemplatesServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
FlexTemplatesServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
FlexTemplatesServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
FlexTemplatesServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [FlexTemplatesServiceClient, FlexTemplatesServiceAsyncClient,]
)
def test_flex_templates_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dataflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.FlexTemplatesServiceGrpcTransport, "grpc"),
(transports.FlexTemplatesServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_flex_templates_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [FlexTemplatesServiceClient, FlexTemplatesServiceAsyncClient,]
)
def test_flex_templates_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dataflow.googleapis.com:443"
def test_flex_templates_service_client_get_transport_class():
transport = FlexTemplatesServiceClient.get_transport_class()
available_transports = [
transports.FlexTemplatesServiceGrpcTransport,
]
assert transport in available_transports
transport = FlexTemplatesServiceClient.get_transport_class("grpc")
assert transport == transports.FlexTemplatesServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
FlexTemplatesServiceClient,
transports.FlexTemplatesServiceGrpcTransport,
"grpc",
),
(
FlexTemplatesServiceAsyncClient,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
FlexTemplatesServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FlexTemplatesServiceClient),
)
@mock.patch.object(
FlexTemplatesServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FlexTemplatesServiceAsyncClient),
)
def test_flex_templates_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(FlexTemplatesServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(FlexTemplatesServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
FlexTemplatesServiceClient,
transports.FlexTemplatesServiceGrpcTransport,
"grpc",
"true",
),
(
FlexTemplatesServiceAsyncClient,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
FlexTemplatesServiceClient,
transports.FlexTemplatesServiceGrpcTransport,
"grpc",
"false",
),
(
FlexTemplatesServiceAsyncClient,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
FlexTemplatesServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FlexTemplatesServiceClient),
)
@mock.patch.object(
FlexTemplatesServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FlexTemplatesServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_flex_templates_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [FlexTemplatesServiceClient, FlexTemplatesServiceAsyncClient]
)
@mock.patch.object(
FlexTemplatesServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FlexTemplatesServiceClient),
)
@mock.patch.object(
FlexTemplatesServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FlexTemplatesServiceAsyncClient),
)
def test_flex_templates_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
FlexTemplatesServiceClient,
transports.FlexTemplatesServiceGrpcTransport,
"grpc",
),
(
FlexTemplatesServiceAsyncClient,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_flex_templates_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
FlexTemplatesServiceClient,
transports.FlexTemplatesServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
FlexTemplatesServiceAsyncClient,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_flex_templates_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_flex_templates_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.dataflow_v1beta3.services.flex_templates_service.transports.FlexTemplatesServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = FlexTemplatesServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
FlexTemplatesServiceClient,
transports.FlexTemplatesServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
FlexTemplatesServiceAsyncClient,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_flex_templates_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dataflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
scopes=None,
default_host="dataflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [templates.LaunchFlexTemplateRequest, dict,])
def test_launch_flex_template(request_type, transport: str = "grpc"):
client = FlexTemplatesServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.launch_flex_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = templates.LaunchFlexTemplateResponse()
response = client.launch_flex_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == templates.LaunchFlexTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, templates.LaunchFlexTemplateResponse)
def test_launch_flex_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FlexTemplatesServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.launch_flex_template), "__call__"
) as call:
client.launch_flex_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == templates.LaunchFlexTemplateRequest()
@pytest.mark.asyncio
async def test_launch_flex_template_async(
transport: str = "grpc_asyncio", request_type=templates.LaunchFlexTemplateRequest
):
client = FlexTemplatesServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.launch_flex_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
templates.LaunchFlexTemplateResponse()
)
response = await client.launch_flex_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == templates.LaunchFlexTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, templates.LaunchFlexTemplateResponse)
@pytest.mark.asyncio
async def test_launch_flex_template_async_from_dict():
await test_launch_flex_template_async(request_type=dict)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FlexTemplatesServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FlexTemplatesServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FlexTemplatesServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FlexTemplatesServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.FlexTemplatesServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = FlexTemplatesServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = FlexTemplatesServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.FlexTemplatesServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FlexTemplatesServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FlexTemplatesServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = FlexTemplatesServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.FlexTemplatesServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.FlexTemplatesServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.FlexTemplatesServiceGrpcTransport,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = FlexTemplatesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(client.transport, transports.FlexTemplatesServiceGrpcTransport,)
def test_flex_templates_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.FlexTemplatesServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_flex_templates_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dataflow_v1beta3.services.flex_templates_service.transports.FlexTemplatesServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.FlexTemplatesServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("launch_flex_template",)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_flex_templates_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dataflow_v1beta3.services.flex_templates_service.transports.FlexTemplatesServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FlexTemplatesServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
quota_project_id="octopus",
)
def test_flex_templates_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dataflow_v1beta3.services.flex_templates_service.transports.FlexTemplatesServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FlexTemplatesServiceTransport()
adc.assert_called_once()
def test_flex_templates_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FlexTemplatesServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FlexTemplatesServiceGrpcTransport,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
],
)
def test_flex_templates_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.FlexTemplatesServiceGrpcTransport, grpc_helpers),
(transports.FlexTemplatesServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_flex_templates_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dataflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/userinfo.email",
),
scopes=["1", "2"],
default_host="dataflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FlexTemplatesServiceGrpcTransport,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
],
)
def test_flex_templates_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_flex_templates_service_host_no_port():
client = FlexTemplatesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dataflow.googleapis.com"
),
)
assert client.transport._host == "dataflow.googleapis.com:443"
def test_flex_templates_service_host_with_port():
client = FlexTemplatesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dataflow.googleapis.com:8000"
),
)
assert client.transport._host == "dataflow.googleapis.com:8000"
def test_flex_templates_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FlexTemplatesServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_flex_templates_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FlexTemplatesServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.FlexTemplatesServiceGrpcTransport,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
],
)
def test_flex_templates_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.FlexTemplatesServiceGrpcTransport,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
],
)
def test_flex_templates_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = FlexTemplatesServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = FlexTemplatesServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = FlexTemplatesServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = FlexTemplatesServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = FlexTemplatesServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FlexTemplatesServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = FlexTemplatesServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = FlexTemplatesServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FlexTemplatesServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = FlexTemplatesServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = FlexTemplatesServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FlexTemplatesServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = FlexTemplatesServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = FlexTemplatesServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FlexTemplatesServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.FlexTemplatesServiceTransport, "_prep_wrapped_messages"
) as prep:
client = FlexTemplatesServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.FlexTemplatesServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = FlexTemplatesServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = FlexTemplatesServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = FlexTemplatesServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = FlexTemplatesServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(FlexTemplatesServiceClient, transports.FlexTemplatesServiceGrpcTransport),
(
FlexTemplatesServiceAsyncClient,
transports.FlexTemplatesServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
|
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result = frequencies.to_offset('Q')
expected = frequencies.to_offset('Q-DEC')
assert(result == expected)
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
print(idx)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
self.assertEqual(idx.inferred_freq, 'H')
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00',
'2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'])
self.assertEqual(idx.inferred_freq, 'BH')
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
self.assertIsNone(rng.inferred_freq)
def test_non_datetimeindex(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
self.assertEqual(result, rng.inferred_freq)
def test_invalid_index_types(self):
# test all index types
for i in [ tm.makeIntIndex(10),
tm.makeFloatIndex(10),
tm.makePeriodIndex(10) ]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(i))
for i in [ tm.makeStringIndex(10),
tm.makeUnicodeIndex(10) ]:
self.assertRaises(ValueError, lambda : frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03', '2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03', '2004-04']))
self.assertEqual(result,expected)
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [ Series(np.arange(10)),
Series(np.arange(10.))]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# a non-convertible string
self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L', 'Y']:
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101',periods=10,freq=freq))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,freq)
s = Series(date_range('20130101','20130110'))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,'D')
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
def test_is_superperiod_subperiod():
assert(frequencies.is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert(frequencies.is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert(frequencies.is_superperiod(offsets.Hour(), offsets.Minute()))
assert(frequencies.is_subperiod(offsets.Minute(), offsets.Hour()))
assert(frequencies.is_superperiod(offsets.Second(), offsets.Milli()))
assert(frequencies.is_subperiod(offsets.Milli(), offsets.Second()))
assert(frequencies.is_superperiod(offsets.Milli(), offsets.Micro()))
assert(frequencies.is_subperiod(offsets.Micro(), offsets.Milli()))
assert(frequencies.is_superperiod(offsets.Micro(), offsets.Nano()))
assert(frequencies.is_subperiod(offsets.Nano(), offsets.Micro()))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
|
#!/usr/bin/env python
"""sunfinder.py: Queries the SolarCoin wallet, pulls solar production data and loads to database"""
__author__ = "Steven Campbell AKA Scalextrix"
__copyright__ = "Copyright 2017, Steven Campbell"
__license__ = "The Unlicense"
__version__ = "3.3"
from datetime import datetime
import getpass
import json
import os.path
import requests
import sqlite3
import sys
import time
def databasecreate():
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS GENDETAILS (unixdatetime INTEGER PRIMARY KEY, dataloggerid BLOB, txhash TEXT, block INTEGER,
time TEXT, period TEXT, totalmwh REAL, incrementmwh REAL)''')
c.execute('''CREATE TABLE IF NOT EXISTS SYSDETAILS (dataloggerid BLOB UNIQUE, panelid TEXT, inverterid TEXT, pkwatt TEXT, tilt TEXT,
azimuth TEXT, lat TEXT, lon TEXT, msg TEXT, datalogger TEXT, slrsigaddr TEXT, block_number INTEGER)''')
c.execute('''CREATE TABLE IF NOT EXISTS BLOCKTRACK (block_number INTEGER UNIQUE, block_hash TEXT)''')
conn.commit()
conn.close()
def databaseupdategen():
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
c.execute("INSERT OR IGNORE INTO GENDETAILS VALUES (?,?,?,?,?,?,?,?);", (enddatetime, datalogger_id, tx_hash, block_number, block_time, period, total_mwh, increment_mwh,))
c.execute('INSERT OR REPLACE INTO BLOCKTRACK VALUES (?,?);', (block_number, block_hash,))
conn.commit()
conn.close()
def databaseupdatesys():
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
c.execute("INSERT OR REPLACE INTO SYSDETAILS VALUES (?,?,?,?,?,?,?,?,?,?,?,?);", (datalogger_id, solar_panel, solar_inverter, peak_watt, tilt, azimuth, latitude, longitude, message, datalogger, solarcoin_sig_address, block_number,))
c.execute('INSERT OR REPLACE INTO BLOCKTRACK VALUES (?,?);', (block_number, block_hash,))
conn.commit()
conn.close()
def forkfinder(start_block_number):
# checks if the blockhash in the database matches the blockchain, if not recursively looks back 10 blocks until match found then reloads
chain_block_hash = str(instruct_wallet('getblockhash', [start_block_number])['result'])
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
dbase_block_hash = str(c.execute('select block_hash from BLOCKTRACK where block_number = {}'.format(start_block_number)).fetchone()[0])
conn.close()
if chain_block_hash == dbase_block_hash:
return start_block_number
else:
start_block_number = start_block_number -10
print '******** CHAIN FORK DETECTED, LOOKING BACK TO BLOCK {} AND ATTEMPTING RELOAD ********'.format(start_block_number)
return forkfinder(start_block_number)
def hashcheckergen():
# check if generation datalogs were signed with the correct sigaddr from the first system log
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
solarcoin_sig_address = str(c.execute("select slrsigaddr FROM SYSDETAILS where dataloggerid ='{}'".format(datalogger_id)).fetchone()[0])
conn.close()
counter=0
while True:
checksum_tx_message = tx_message+checksums[counter]
validate_sig = instruct_wallet('verifymessage', [solarcoin_sig_address, hash_present, checksum_tx_message])['result']
if validate_sig == True:
return validate_sig
else:
counter = counter+1
if counter == len(checksums):
break
def hashcheckersys(solarcoin_sig_address):
# check subsequent system datalogs against first sigaddr, except if this is the first datalog then nothing to check against
try:
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
solarcoin_sig_address = str(c.execute("select slrsigaddr FROM SYSDETAILS where dataloggerid ='{}'".format(datalogger_id)).fetchone()[0])
conn.close()
except:
print 'NOTE: Signing Address not found for System: {}'.format(datalogger_id)
validate_sig = True
return validate_sig
counter=0
while True:
checksum_tx_message = tx_message+checksums[counter]
validate_sig = instruct_wallet('verifymessage', [solarcoin_sig_address, hash_present, checksum_tx_message])['result']
if validate_sig == True:
return validate_sig
else:
counter = counter+1
if counter == len(checksums):
break
def instruct_wallet(method, params):
url = "http://127.0.0.1:18181/"
payload = json.dumps({"method": method, "params": params})
headers = {'content-type': "application/json", 'cache-control': "no-cache"}
try:
response = s.request("POST", url, data=payload, headers=headers, auth=(rpc_user, rpc_pass))
return json.loads(response.text)
except requests.exceptions.RequestException as e:
print e
print 'Backing off for 10 seconds'
time.sleep(10)
return instruct_wallet(method, params)
def incrementmwhs():
# calculate an incremental MWh amount based on each users last Total MWh reading
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
datalogger_id = c.execute('select DISTINCT dataloggerid FROM GENDETAILS').fetchall()
datalogger_id = [str(f[0]) for f in datalogger_id]
id_length = len(datalogger_id)
counter1=0
while True:
counter2=0
while True:
max_rows = c.execute("select count(*) FROM GENDETAILS where dataloggerid ='{}'".format(datalogger_id[counter1])).fetchone()[0]
if max_rows <= 1:
break
tot_energy0 = float(c.execute("select totalmwh FROM GENDETAILS where dataloggerid ='{}' limit {},1".format(datalogger_id[counter1], counter2)).fetchone()[0])
counter2 = counter2+1
tot_energy1 = float(c.execute("select totalmwh FROM GENDETAILS where dataloggerid ='{}' limit {},1".format(datalogger_id[counter1], counter2)).fetchone()[0])
increment_mwh = float("{0:.6f}".format(tot_energy1 - tot_energy0))
c.execute("update GENDETAILS SET incrementmwh = {} WHERE totalmwh = {}".format(increment_mwh, tot_energy1))
conn.commit()
print ('Updating Incremental Energy reading row {} for UserID {}').format(counter2, datalogger_id[counter1])
if counter2 == max_rows -1:
break
counter1=counter1+1
if counter1 == id_length:
conn.close()
print 'Incremental Energy Update Completed'
break
def searchstarter():
if os.path.isfile("solardetails.db"):
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
start_block_number = int(c.execute('select max(block_number) from BLOCKTRACK').fetchone()[0])
conn.commit()
conn.close()
start_block_number = forkfinder(start_block_number)
return start_block_number
else:
start_block_number = raw_input('Start search at which block?: ')
try:
start_block_number = int(start_block_number)
except:
print 'You must enter a whole number, please try again'
return searchstarter()
return start_block_number
def periodtounixtime():
#take the end time from the 'period' parameter and convert to unix time for use as primary key
timestamp = period
utc_dt = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')
return (utc_dt - datetime(1970, 1, 1)).total_seconds()
if os.name == 'nt':
user_account = getpass.getuser()
conf_location = 'C:\Users\{}\AppData\Roaming\SolarCoin\SolarCoin.conf'.format(user_account)
elif os.name == 'posix':
homedir = os.environ['HOME']
conf_location = '{}/.solarcoin/solarcoin.conf'.format(homedir)
else:
conf_location = ''
while True:
try:
solarcoin_conf = open(conf_location, 'rb')
break
except:
print 'solarcoin.conf not found'
conf_location = raw_input('Please enter the FULL path to solarcoin.conf: ')
rpc_user = ''
rpc_pass = ''
for line in solarcoin_conf:
line = line.rstrip()
if line[0:7] == 'rpcuser':
rpc_user = line[line.find('=')+1:]
if line[0:11] == 'rpcpassword':
rpc_pass = line[line.find('=')+1:]
solarcoin_conf.close()
if rpc_user == '' or rpc_pass == '':
print 'solarcoin.conf found but "rpcuser=" or "rpcpassword=" missing'
print 'Please add rpcuser=<username_here> and rpcpassword=<password_here> to solarcoi.conf'
print 'Exit in 10 seconds'
time.sleep(10)
sys.exit()
hash_check_required = raw_input('Would you like to check data-logs using digital signatures? Y/N: ').lower()
if hash_check_required != 'y':
print '*** WARNING: Without digital signature checking, data-logs cannot be verified as genuine! ***'
time.sleep(5)
f = open('goodchecksums.txt', 'rb')
checksums = f.read().splitlines()
f.close()
s = requests.Session()
while True:
print '--------- Sunfinder: Looking for SUNSHINE in the block-chain ---------'
print '---------- Press CTRL + c at any time to stop the Sunfinder ----------'
print ''
try:
top_block = int(instruct_wallet('getblockcount', [])['result'])
start_block_number = searchstarter()
block_number = int(start_block_number)
databasecreate()
while True:
block_hash = instruct_wallet('getblockhash', [block_number])['result']
block_json = instruct_wallet('getblock', [block_hash])['result']
block_time = block_json['time']
tx_length = len(block_json['tx'])
print 'Number of transactions in block {} = {}'.format(block_number, tx_length-1)
tx_counter = 1
while True:
tx_hash = block_json['tx'][tx_counter]
tx_json = instruct_wallet('getrawtransaction', [tx_hash, 1])['result']
try:
tx_message = str(tx_json['tx-comment'])
hash_present = str(tx_message[tx_message.find('Sig:')+4:tx_message.find('=')+1])
print 'Decoding Transaction {}'.format(tx_counter)
if tx_message[5:10] == 'sysv1':
try:
tx_message = tx_message[tx_message.find('{'):tx_message.find('}')+1]
tx_message_decoded = json.loads(tx_message)
solarcoin_sig_address = tx_message_decoded['SigAddr']
datalogger_id = tx_message_decoded['UID']
solar_panel = tx_message_decoded['module']
tilt = tx_message_decoded['tilt']
azimuth = tx_message_decoded['azimuth']
solar_inverter = tx_message_decoded['inverter']
datalogger = tx_message_decoded['data-logger']
peak_watt = tx_message_decoded['Size_kW']
latitude = tx_message_decoded['lat']
longitude = tx_message_decoded['long']
message = tx_message_decoded['Comment']
if hash_check_required == 'y':
hash_check = hashcheckersys(solarcoin_sig_address)
if hash_check == True:
databaseupdatesys()
print''
print ('In Block {} Added or Updated System Details for System: {}').format(block_number, datalogger_id)
else:
print''
print ('In Block {} System Details Hash check failed, not loading to database').format(block_number)
else:
databaseupdatesys()
print''
print ('In Block {} Added or Updated System Details for System: {}').format(block_number, datalogger_id)
except:
print ('Skipping load: Message in block {} does not conform').format(block_number)
print''
elif tx_message[5:10] == 'genv1':
try:
tx_message = tx_message[tx_message.find('{'):tx_message.find('}')+1]
tx_message_decoded = json.loads(tx_message)
datalogger_id = tx_message_decoded['UID']
hash_check = hashcheckergen()
increment_mwh = 0
db_counter = 0
while True:
total_mwh = tx_message_decoded['MWh{}'.format(db_counter)]
period = tx_message_decoded['t{}'.format(db_counter)]
enddatetime = periodtounixtime()
if hash_check_required == 'y':
if hash_check == True:
databaseupdategen()
else:
print''
print ('In Block {} System Details Hash check failed, not loading to database').format(block_number)
else:
databaseupdategen()
db_counter = db_counter + 1
if db_counter == 8:
break
print ('In block: {}').format(block_number)
print ('UserID: {}').format(datalogger_id)
print ('made TX hash: {}').format(tx_hash)
print ('and recorded a total of: {} MWh of energy').format(total_mwh)
print ('Message hash check passed: {}').format(hash_check)
print''
incrementmwhs()
except:
print ('Skipping load: Message in block {} does not conform').format(block_number)
print''
except UnicodeEncodeError:
print ('Skipping load: Message in block {} cannot be decoded, Unicode error').format(block_number)
print''
else:
print 'No System or Generation data to load in that transaction'
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
c.execute('INSERT OR REPLACE INTO BLOCKTRACK VALUES (?,?);', (block_number, block_hash,))
conn.commit()
conn.close()
tx_counter = tx_counter + 1
if tx_counter == tx_length:
break
block_number = block_number + 1
if block_number == top_block:
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
end_block_number = int(c.execute('select max(block_number) from BLOCKTRACK').fetchone()[0])
conn.close()
print 'Found {} new blocks'.format(end_block_number-start_block_number)
break
print '------ Sleeping for 10 minutes, then looking for more SUNSHINE! ------'
print '---------- Press CTRL + c at any time to stop the Sunfinder ----------'
print ''
time.sleep(600)
except KeyboardInterrupt:
conn = sqlite3.connect('solardetails.db')
c = conn.cursor()
end_block_number = int(c.execute('select max(block_number) from BLOCKTRACK').fetchone()[0])
conn.close()
print''
print 'Found {} new blocks'.format(end_block_number-start_block_number)
print("Stopping Sunfinder in 10 seconds")
time.sleep(10)
sys.exit()
|
|
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Benjamin Kampmann <[email protected]>
"""
This is a Media Backend that allows you to access the cool and cute pictures
from lolcats.com. This is mainly meant as a Sample Media Backend to learn how to
write a Media Backend.
So. You are still reading which allows me to assume that you want to learn how
to write a Media Backend for Coherence. NICE :) .
Once again: This is a SIMPLE Media Backend. It does not contain any big
requests, searches or even transcoding. The only thing we want to do in this
simple example, is to fetch a rss link on startup, parse it, save it and restart
the process one hour later again. Well, on top of this, we also want to provide
these informations as a Media Server in the UPnP/DLNA Network of course ;) .
Wow. You are still reading. You must be really interested. Then let's go.
"""
########## NOTE:
# Please don't complain about the coding style of this document - I know. It is
# just this way to make it easier to document and to understand.
########## The imports
# The entry point for each kind of Backend is a 'BackendStore'. The BackendStore
# is the instance that does everything Usually. In this Example it can be
# understood as the 'Server', the object retrieving and serving the data.
from coherence.backend import BackendStore
# The data itself is stored in BackendItems. They are also the first things we
# are going to create.
from coherence.backend import BackendItem
# To make the data 'renderable' we need to define the DIDLite-Class of the Media
# we are providing. For that we have a bunch of helpers that we also want to
# import
from coherence.upnp.core import DIDLLite
# Coherence relies on the Twisted backend. I hope you are familar with the
# concept of deferreds. If not please read:
# http://twistedmatrix.com/projects/core/documentation/howto/async.html
#
# It is a basic concept that you need to understand to understand the following
# code. But why am I talking about it? Oh, right, because we use a http-client
# based on the twisted.web.client module to do our requests.
from coherence.upnp.core.utils import getPage
# And we also import the reactor, that allows us to specify an action to happen
# later
from twisted.internet import reactor
# And to parse the RSS-Data (which is XML), we use the coherence helper
from coherence.extern.et import parse_xml
########## The models
# After the download and parsing of the data is done, we want to save it. In
# this case, we want to fetch the images and store their URL and the title of
# the image. That is the LolcatsImage class:
class LolcatsImage(BackendItem):
# We inherit from BackendItem as it already contains a lot of helper methods
# and implementations. For this simple example, we only have to fill the
# item with data.
def __init__(self, parent_id, id, title, url):
self.parentid = parent_id # used to be able to 'go back'
self.update_id = 0
self.id = id # each item has its own and unique id
self.location = url # the url of the picture
self.name = title # the title of the picture. Inside
# coherence this is called 'name'
# Item.item is a special thing. This is used to explain the client what
# kind of data this is. For e.g. A VideoItem or a MusicTrack. In our
# case, we have an image.
self.item = DIDLLite.ImageItem(id, parent_id, self.name)
# each Item.item has to have one or more Resource objects
# these hold detailed information about the media data
# and can represent variants of it (different sizes, transcoded formats)
res = DIDLLite.Resource(self.location, 'http-get:*:image/jpeg:*')
res.size = None #FIXME: we should have a size here
# and a resolution entry would be nice too
self.item.res.append(res)
class LolcatsContainer(BackendItem):
# The LolcatsContainer will hold the reference to all our LolcatsImages. This
# kind of BackenedItem is a bit different from the normal BackendItem,
# because it has 'children' (the lolcatsimages). Because of that we have
# some more stuff to do in here.
def __init__(self, parent_id, id):
# the ids as above
self.parent_id = parent_id
self.id = id
# we never have a different name anyway
self.name = 'LOLCats'
# but we need to set it to a certain mimetype to explain it, that we
# contain 'children'.
self.mimetype = 'directory'
# As we are updating our data periodically, we increase this value so
# that our clients can check easier if something has changed since their
# last request.
self.update_id = 0
# that is where we hold the children
self.children = []
# and we need to give a DIDLLite again. This time we want to be
# understood as 'Container'.
self.item = DIDLLite.Container(id, parent_id, self.name)
self.item.childCount = None # will be set as soon as we have images
def get_children(self, start=0, end=0):
# This is the only important implementation thing: we have to return our
# list of children
if end != 0:
return self.children[start:end]
return self.children[start:]
# there is nothing special in here
# FIXME: move it to a base BackendContainer class
def get_child_count(self):
return len(self.children)
def get_item(self):
return self.item
def get_name(self):
return self.name
def get_id(self):
return self.id
########## The server
# As already said before the implementation of the server is done in an
# inheritance of a BackendStore. This is where the real code happens (usually).
# In our case this would be: downloading the page, parsing the content, saving
# it in the models and returning them on request.
class LolcatsStore(BackendStore):
# this *must* be set. Because the (most used) MediaServer Coherence also
# allows other kind of Backends (like remote lights).
implements = ['MediaServer']
# this is only for this implementation: the http link to the lolcats rss
# feed that we want to read and parse:
rss_url = "http://feeds.feedburner.com/ICanHasCheezburger?format=xml"
# as we are going to build a (very small) tree with the items, we need to
# define the first (the root) item:
ROOT_ID = 0
def __init__(self, server, *args, **kwargs):
# first we inizialize our heritage
BackendStore.__init__(self,server,**kwargs)
# When a Backend is initialized, the configuration is given as keyword
# arguments to the initialization. We receive it here as a dicitonary
# and allow some values to be set:
# the name of the MediaServer as it appears in the network
self.name = kwargs.get('name', 'Lolcats')
# timeout between updates in hours:
self.refresh = int(kwargs.get('refresh', 1)) * (60 *60)
# the UPnP device that's hosting that backend, that's already done
# in the BackendStore.__init__, just left here the sake of completeness
self.server = server
# internally used to have a new id for each item
self.next_id = 1000
# we store the last update from the rss feed so that we know if we have
# to parse again, or not:
self.last_updated = None
# initialize our lolcats container (no parent, this is the root)
self.container = LolcatsContainer(None, self.ROOT_ID)
# but as we also have to return them on 'get_by_id', we have our local
# store of images per id:
self.images = {}
# we tell that if an XBox sends a request for images we'll
# map the WMC id of that request to our local one
self.wmc_mapping = {'16': 0}
# and trigger an update of the data
dfr = self.update_data()
# So, even though the initialize is kind of done, Coherence does not yet
# announce our Media Server.
# Coherence does wait for signal send by us that we are ready now.
# And we don't want that to happen as long as we don't have succeeded
# in fetching some first data, so we delay this signaling after the update is done:
dfr.addCallback(self.init_completed)
dfr.addCallback(self.queue_update)
def get_by_id(self, id):
print "asked for", id, type(id)
# what ever we are asked for, we want to return the container only
if isinstance(id, basestring):
id = id.split('@',1)
id = id[0]
if int(id) == self.ROOT_ID:
return self.container
return self.images.get(int(id), None)
def upnp_init(self):
# after the signal was triggered, this method is called by coherence and
# from now on self.server is existing and we can do
# the necessary setup here
# that allows us to specify our server options in more detail.
# here we define what kind of media content we do provide
# mostly needed to make some naughty DLNA devices behave
# will probably move into Coherence internals one day
self.server.connection_manager_server.set_variable( \
0, 'SourceProtocolInfo', ['http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_TN;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_SM;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_MED;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_LRG;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000',
'http-get:*:image/jpeg:*'])
# and as it was done after we fetched the data the first time
# we want to take care about the server wide updates as well
self._update_container()
def _update_container(self, result=None):
# we need to inform Coherence about these changes
# again this is something that will probably move
# into Coherence internals one day
if self.server:
self.server.content_directory_server.set_variable(0,
'SystemUpdateID', self.update_id)
value = (self.ROOT_ID,self.container.update_id)
self.server.content_directory_server.set_variable(0,
'ContainerUpdateIDs', value)
return result
def update_loop(self):
# in the loop we want to call update_data
dfr = self.update_data()
# aftert it was done we want to take care about updating
# the container
dfr.addCallback(self._update_container)
# in ANY case queue an update of the data
dfr.addBoth(self.queue_update)
def update_data(self):
# trigger an update of the data
# fetch the rss
dfr = getPage(self.rss_url)
# push it through our xml parser
dfr.addCallback(parse_xml)
# then parse the data into our models
dfr.addCallback(self.parse_data)
return dfr
def parse_data(self, xml_data):
# So. xml_data is a cElementTree Element now. We can read our data from
# it now.
# each xml has a root element
root = xml_data.getroot()
# from there, we look for the newest update and compare it with the one
# we have saved. If they are the same, we don't need to go on:
pub_date = root.find('./channel/lastBuildDate').text
if pub_date == self.last_updated:
return
# not the case, set this as the last update and continue
self.last_updated = pub_date
# and reset the childrens list of the container and the local storage
self.container.children = []
self.images = {}
# Attention, as this is an example, this code is meant to be as simple
# as possible and not as efficient as possible. IMHO the following code
# pretty much sucks, because it is totally blocking (even though we have
# 'only' 20 elements)
# we go through our entries and do something specific to the
# lolcats-rss-feed to fetch the data out of it
url_item = './{http://search.yahoo.com/mrss/}content'
for item in root.findall('./channel/item'):
title = item.find('./title').text
try:
url = item.findall(url_item)[1].get('url', None)
except IndexError:
continue
if url is None:
continue
image = LolcatsImage(self.ROOT_ID, self.next_id, title, url)
self.container.children.append(image)
self.images[self.next_id] = image
# increase the next_id entry every time
self.next_id += 1
# and increase the container update id and the system update id
# so that the clients can refresh with the new data
self.container.update_id += 1
self.update_id += 1
def queue_update(self, error_or_failure):
# We use the reactor to queue another updating of our data
print error_or_failure
reactor.callLater(self.refresh, self.update_loop)
|
|
#
# Copyright (c) 2011, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
INCHI_AVAILABLE = True
import logging
from rdkit.Chem import rdinchi
from rdkit import RDLogger
logger = RDLogger.logger()
logLevelToLogFunctionLookup = {
logging.INFO: logger.info,
logging.DEBUG: logger.debug,
logging.WARNING: logger.warning,
logging.CRITICAL: logger.critical,
logging.ERROR: logger.error
}
class InchiReadWriteError(Exception):
pass
def MolFromInchi(inchi, sanitize=True, removeHs=True, logLevel=None, treatWarningAsError=False):
"""Construct a molecule from a InChI string
Keyword arguments:
sanitize -- set to True to enable sanitization of the molecule. Default is
True
removeHs -- set to True to remove Hydrogens from a molecule. This only
makes sense when sanitization is enabled
logLevel -- the log level used for logging logs and messages from InChI
API. set to None to diable the logging completely
treatWarningAsError -- set to True to raise an exception in case of a
molecule that generates warning in calling InChI API. The resultant
molecule and error message are part of the excpetion
Returns:
a rdkit.Chem.rdchem.Mol instance
"""
try:
mol, retcode, message, log = rdinchi.InchiToMol(inchi, sanitize, removeHs)
except ValueError as e:
logger.error(str(e))
return None
if logLevel is not None:
if logLevel not in logLevelToLogFunctionLookup:
raise ValueError("Unsupported log level: %d" % logLevel)
log = logLevelToLogFunctionLookup[logLevel]
if retcode == 0:
log(message)
if retcode != 0:
if retcode == 1:
logger.warning(message)
else:
logger.error(message)
if treatWarningAsError and retcode != 0:
raise InchiReadWriteError(mol, message)
return mol
def MolToInchiAndAuxInfo(mol, options="", logLevel=None, treatWarningAsError=False):
"""Returns the standard InChI string and InChI auxInfo for a molecule
Keyword arguments:
logLevel -- the log level used for logging logs and messages from InChI
API. set to None to diable the logging completely
treatWarningAsError -- set to True to raise an exception in case of a
molecule that generates warning in calling InChI API. The resultant InChI
string and AuxInfo string as well as the error message are encoded in the
exception.
Returns:
a tuple of the standard InChI string and the auxInfo string returned by
InChI API, in that order, for the input molecule
"""
inchi, retcode, message, logs, aux = rdinchi.MolToInchi(mol, options)
if logLevel is not None:
if logLevel not in logLevelToLogFunctionLookup:
raise ValueError("Unsupported log level: %d" % logLevel)
log = logLevelToLogFunctionLookup[logLevel]
if retcode == 0:
log(message)
if retcode != 0:
if retcode == 1:
logger.warning(message)
else:
logger.error(message)
if treatWarningAsError and retcode != 0:
raise InchiReadWriteError(inchi, aux, message)
return inchi, aux
def MolBlockToInchiAndAuxInfo(molblock, options="", logLevel=None, treatWarningAsError=False):
"""Returns the standard InChI string and InChI auxInfo for a mol block
Keyword arguments:
logLevel -- the log level used for logging logs and messages from InChI
API. set to None to diable the logging completely
treatWarningAsError -- set to True to raise an exception in case of a
molecule that generates warning in calling InChI API. The resultant InChI
string and AuxInfo string as well as the error message are encoded in the
exception.
Returns:
a tuple of the standard InChI string and the auxInfo string returned by
InChI API, in that order, for the input molecule
"""
inchi, retcode, message, logs, aux = rdinchi.MolBlockToInchi(molblock, options)
if logLevel is not None:
if logLevel not in logLevelToLogFunctionLookup:
raise ValueError("Unsupported log level: %d" % logLevel)
log = logLevelToLogFunctionLookup[logLevel]
if retcode == 0:
log(message)
if retcode != 0:
if retcode == 1:
logger.warning(message)
else:
logger.error(message)
if treatWarningAsError and retcode != 0:
raise InchiReadWriteError(inchi, aux, message)
return inchi, aux
def MolToInchi(mol, options="", logLevel=None, treatWarningAsError=False):
"""Returns the standard InChI string for a molecule
Keyword arguments:
logLevel -- the log level used for logging logs and messages from InChI
API. set to None to diable the logging completely
treatWarningAsError -- set to True to raise an exception in case of a
molecule that generates warning in calling InChI API. The resultant InChI
string and AuxInfo string as well as the error message are encoded in the
exception.
Returns:
the standard InChI string returned by InChI API for the input molecule
"""
if options.find('AuxNone') == -1:
if options:
options += " /AuxNone"
else:
options += "/AuxNone"
try:
inchi, aux = MolToInchiAndAuxInfo(mol, options, logLevel=logLevel,
treatWarningAsError=treatWarningAsError)
except InchiReadWriteError as inst:
inchi, aux, message = inst.args
raise InchiReadWriteError(inchi, message)
return inchi
def MolBlockToInchi(molblock, options="", logLevel=None, treatWarningAsError=False):
"""Returns the standard InChI string for a mol block
Keyword arguments:
logLevel -- the log level used for logging logs and messages from InChI
API. set to None to diable the logging completely
treatWarningAsError -- set to True to raise an exception in case of a
molecule that generates warning in calling InChI API. The resultant InChI
string and AuxInfo string as well as the error message are encoded in the
exception.
Returns:
the standard InChI string returned by InChI API for the input molecule
"""
if options.find('AuxNone') == -1:
if options:
options += " /AuxNone"
else:
options += "/AuxNone"
try:
inchi, aux = MolBlockToInchiAndAuxInfo(molblock, options, logLevel=logLevel,
treatWarningAsError=treatWarningAsError)
except InchiReadWriteError as inst:
inchi, aux, message = inst.args
raise InchiReadWriteError(inchi, message)
return inchi
def InchiToInchiKey(inchi):
"""Return the InChI key for the given InChI string. Return None on error"""
ret = rdinchi.InchiToInchiKey(inchi)
if ret:
return ret
else:
return None
def MolToInchiKey(mol, options=""):
"""Returns the standard InChI key for a molecule
Returns:
the standard InChI key returned by InChI API for the input molecule
"""
return rdinchi.MolToInchiKey(mol,options)
__all__ = ['MolToInchiAndAuxInfo', 'MolToInchi', 'MolBlockToInchiAndAuxInfo', 'MolBlockToInchi', 'MolFromInchi', 'InchiReadWriteError',
'InchiToInchiKey', 'MolToInchiKey', 'INCHI_AVAILABLE']
|
|
#! /usr/bin/env python3
"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 2045 Base64 encodings
'encode', 'decode', 'encodebytes', 'decodebytes',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Base85 and Ascii85 encodings
'b85encode', 'b85decode', 'a85encode', 'a85decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
bytes_types = (bytes, bytearray) # Types acceptable as binary data
def _bytes_from_decode_data(s):
if isinstance(s, str):
try:
return s.encode('ascii')
except UnicodeEncodeError:
raise ValueError('string argument should contain only ASCII characters')
if isinstance(s, bytes_types):
return s
try:
return memoryview(s).tobytes()
except TypeError:
raise TypeError("argument should be a bytes-like object or ASCII "
"string, not %r" % s.__class__.__name__) from None
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode the bytes-like object s using Base64 and return a bytes object.
Optional altchars should be a byte string of length 2 which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
return encoded.translate(bytes.maketrans(b'+/', altchars))
return encoded
def b64decode(s, altchars=None, validate=False):
"""Decode the Base64 encoded bytes-like object or ASCII string s.
Optional altchars must be a bytes-like object or ASCII string of length 2
which specifies the alternative alphabet used instead of the '+' and '/'
characters.
The result is returned as a bytes object. A binascii.Error is raised if
s is incorrectly padded.
If validate is False (the default), characters that are neither in the
normal base-64 alphabet nor the alternative alphabet are discarded prior
to the padding check. If validate is True, these non-alphabet characters
in the input result in a binascii.Error.
"""
s = _bytes_from_decode_data(s)
if altchars is not None:
altchars = _bytes_from_decode_data(altchars)
assert len(altchars) == 2, repr(altchars)
s = s.translate(bytes.maketrans(altchars, b'+/'))
if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
raise binascii.Error('Non-base64 digit found')
return binascii.a2b_base64(s)
def standard_b64encode(s):
"""Encode bytes-like object s using the standard Base64 alphabet.
The result is returned as a bytes object.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode bytes encoded with the standard Base64 alphabet.
Argument s is a bytes-like object or ASCII string to decode. The result
is returned as a bytes object. A binascii.Error is raised if the input
is incorrectly padded. Characters that are not in the standard alphabet
are discarded prior to the padding check.
"""
return b64decode(s)
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
def urlsafe_b64encode(s):
"""Encode bytes using the URL- and filesystem-safe Base64 alphabet.
Argument s is a bytes-like object to encode. The result is returned as a
bytes object. The alphabet uses '-' instead of '+' and '_' instead of
'/'.
"""
return b64encode(s).translate(_urlsafe_encode_translation)
def urlsafe_b64decode(s):
"""Decode bytes using the URL- and filesystem-safe Base64 alphabet.
Argument s is a bytes-like object or ASCII string to decode. The result
is returned as a bytes object. A binascii.Error is raised if the input
is incorrectly padded. Characters that are not in the URL-safe base-64
alphabet, and are not a plus '+' or slash '/', are discarded prior to the
padding check.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
s = _bytes_from_decode_data(s)
s = s.translate(_urlsafe_decode_translation)
return b64decode(s)
# Base32 encoding/decoding must be done in Python
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
_b32tab2 = None
_b32rev = None
def b32encode(s):
"""Encode the bytes-like object s using Base32 and return a bytes object.
"""
global _b32tab2
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32tab2 is None:
b32tab = [bytes((i,)) for i in _b32alphabet]
_b32tab2 = [a + b for a in b32tab for b in b32tab]
b32tab = None
if not isinstance(s, bytes_types):
s = memoryview(s).tobytes()
leftover = len(s) % 5
# Pad the last quantum with zero bits if necessary
if leftover:
s = s + bytes(5 - leftover) # Don't use += !
encoded = bytearray()
from_bytes = int.from_bytes
b32tab2 = _b32tab2
for i in range(0, len(s), 5):
c = from_bytes(s[i: i + 5], 'big')
encoded += (b32tab2[c >> 30] + # bits 1 - 10
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
b32tab2[c & 0x3ff] # bits 31 - 40
)
# Adjust for any leftover partial quanta
if leftover == 1:
encoded[-6:] = b'======'
elif leftover == 2:
encoded[-4:] = b'===='
elif leftover == 3:
encoded[-3:] = b'==='
elif leftover == 4:
encoded[-1:] = b'='
return bytes(encoded)
def b32decode(s, casefold=False, map01=None):
"""Decode the Base32 encoded bytes-like object or ASCII string s.
Optional casefold is a flag specifying whether a lowercase alphabet is
acceptable as input. For security purposes, the default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
letter O (oh), and for optional mapping of the digit 1 (one) to
either the letter I (eye) or letter L (el). The optional argument
map01 when not None, specifies which letter the digit 1 should be
mapped to (when map01 is not None, the digit 0 is always mapped to
the letter O). For security purposes the default is None, so that
0 and 1 are not allowed in the input.
The result is returned as a bytes object. A binascii.Error is raised if
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
global _b32rev
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32rev is None:
_b32rev = {v: k for k, v in enumerate(_b32alphabet)}
s = _bytes_from_decode_data(s)
if len(s) % 8:
raise binascii.Error('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01 is not None:
map01 = _bytes_from_decode_data(map01)
assert len(map01) == 1, repr(map01)
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
l = len(s)
s = s.rstrip(b'=')
padchars = l - len(s)
# Now decode the full quanta
decoded = bytearray()
b32rev = _b32rev
for i in range(0, len(s), 8):
quanta = s[i: i + 8]
acc = 0
try:
for c in quanta:
acc = (acc << 5) + b32rev[c]
except KeyError:
raise binascii.Error('Non-base32 digit found') from None
decoded += acc.to_bytes(5, 'big')
# Process the last, partial quanta
if padchars:
acc <<= 5 * padchars
last = acc.to_bytes(5, 'big')
if padchars == 1:
decoded[-5:] = last[:-1]
elif padchars == 3:
decoded[-5:] = last[:-2]
elif padchars == 4:
decoded[-5:] = last[:-3]
elif padchars == 6:
decoded[-5:] = last[:-4]
else:
raise binascii.Error('Incorrect padding')
return bytes(decoded)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode the bytes-like object s using Base16 and return a bytes object.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode the Base16 encoded bytes-like object or ASCII string s.
Optional casefold is a flag specifying whether a lowercase alphabet is
acceptable as input. For security purposes, the default is False.
The result is returned as a bytes object. A binascii.Error is raised if
s is incorrectly padded or if there are non-alphabet characters present
in the input.
"""
s = _bytes_from_decode_data(s)
if casefold:
s = s.upper()
if re.search(b'[^0-9A-F]', s):
raise binascii.Error('Non-base16 digit found')
return binascii.unhexlify(s)
#
# Ascii85 encoding/decoding
#
_a85chars = None
_a85chars2 = None
_A85START = b"<~"
_A85END = b"~>"
def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False):
# Helper function for a85encode and b85encode
if not isinstance(b, bytes_types):
b = memoryview(b).tobytes()
padding = (-len(b)) % 4
if padding:
b = b + b'\0' * padding
words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b)
chunks = [b'z' if foldnuls and not word else
b'y' if foldspaces and word == 0x20202020 else
(chars2[word // 614125] +
chars2[word // 85 % 7225] +
chars[word % 85])
for word in words]
if padding and not pad:
if chunks[-1] == b'z':
chunks[-1] = chars[0] * 5
chunks[-1] = chunks[-1][:-padding]
return b''.join(chunks)
def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False):
"""Encode bytes-like object b using Ascii85 and return a bytes object.
foldspaces is an optional flag that uses the special short sequence 'y'
instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This
feature is not supported by the "standard" Adobe encoding.
wrapcol controls whether the output should have newline (b'\\n') characters
added to it. If this is non-zero, each output line will be at most this
many characters long.
pad controls whether the input is padded to a multiple of 4 before
encoding. Note that the btoa implementation always pads.
adobe controls whether the encoded byte sequence is framed with <~ and ~>,
which is used by the Adobe implementation.
"""
global _a85chars, _a85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _a85chars is None:
_a85chars = [bytes((i,)) for i in range(33, 118)]
_a85chars2 = [(a + b) for a in _a85chars for b in _a85chars]
result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces)
if adobe:
result = _A85START + result
if wrapcol:
wrapcol = max(2 if adobe else 1, wrapcol)
chunks = [result[i: i + wrapcol]
for i in range(0, len(result), wrapcol)]
if adobe:
if len(chunks[-1]) + 2 > wrapcol:
chunks.append(b'')
result = b'\n'.join(chunks)
if adobe:
result += _A85END
return result
def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'):
"""Decode the Ascii85 encoded bytes-like object or ASCII string b.
foldspaces is a flag that specifies whether the 'y' short sequence should be
accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is
not supported by the "standard" Adobe encoding.
adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.
is framed with <~ and ~>).
ignorechars should be a byte string containing characters to ignore from the
input. This should only contain whitespace characters, and by default
contains all whitespace characters in ASCII.
The result is returned as a bytes object.
"""
b = _bytes_from_decode_data(b)
if adobe:
if not b.endswith(_A85END):
raise ValueError(
"Ascii85 encoded byte sequences must end "
"with {!r}".format(_A85END)
)
if b.startswith(_A85START):
b = b[2:-2] # Strip off start/end markers
else:
b = b[:-2]
#
# We have to go through this stepwise, so as to ignore spaces and handle
# special short sequences
#
packI = struct.Struct('!I').pack
decoded = []
decoded_append = decoded.append
curr = []
curr_append = curr.append
curr_clear = curr.clear
for x in b + b'u' * 4:
if b'!'[0] <= x <= b'u'[0]:
curr_append(x)
if len(curr) == 5:
acc = 0
for x in curr:
acc = 85 * acc + (x - 33)
try:
decoded_append(packI(acc))
except struct.error:
raise ValueError('Ascii85 overflow') from None
curr_clear()
elif x == b'z'[0]:
if curr:
raise ValueError('z inside Ascii85 5-tuple')
decoded_append(b'\0\0\0\0')
elif foldspaces and x == b'y'[0]:
if curr:
raise ValueError('y inside Ascii85 5-tuple')
decoded_append(b'\x20\x20\x20\x20')
elif x in ignorechars:
# Skip whitespace
continue
else:
raise ValueError('Non-Ascii85 digit found: %c' % x)
result = b''.join(decoded)
padding = 4 - len(curr)
if padding:
# Throw away the extra padding
result = result[:-padding]
return result
# The following code is originally taken (with permission) from Mercurial
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
_b85chars = None
_b85chars2 = None
_b85dec = None
def b85encode(b, pad=False):
"""Encode bytes-like object b in base85 format and return a bytes object.
If pad is true, the input is padded with b'\\0' so its length is a multiple of
4 bytes before encoding.
"""
global _b85chars, _b85chars2
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85chars is None:
_b85chars = [bytes((i,)) for i in _b85alphabet]
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
return _85encode(b, _b85chars, _b85chars2, pad)
def b85decode(b):
"""Decode the base85-encoded bytes-like object or ASCII string b
The result is returned as a bytes object.
"""
global _b85dec
# Delay the initialization of tables to not waste memory
# if the function is never called
if _b85dec is None:
_b85dec = [None] * 256
for i, c in enumerate(_b85alphabet):
_b85dec[c] = i
b = _bytes_from_decode_data(b)
padding = (-len(b)) % 5
b = b + b'~' * padding
out = []
packI = struct.Struct('!I').pack
for i in range(0, len(b), 5):
chunk = b[i:i + 5]
acc = 0
try:
for c in chunk:
acc = acc * 85 + _b85dec[c]
except TypeError:
for j, c in enumerate(chunk):
if _b85dec[c] is None:
raise ValueError('bad base85 character at position %d'
% (i + j)) from None
raise
try:
out.append(packI(acc))
except struct.error:
raise ValueError('base85 overflow in hunk starting at byte %d'
% i) from None
result = b''.join(out)
if padding:
result = result[:-padding]
return result
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though. The files should be opened in binary mode.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file; input and output are binary files."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file; input and output are binary files."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def _input_type_check(s):
try:
m = memoryview(s)
except TypeError as err:
msg = "expected bytes-like object, not %s" % s.__class__.__name__
raise TypeError(msg) from err
if m.format not in ('c', 'b', 'B'):
msg = ("expected single byte elements, not %r from %s" %
(m.format, s.__class__.__name__))
raise TypeError(msg)
if m.ndim != 1:
msg = ("expected 1-D data, not %d-D data from %s" %
(m.ndim, s.__class__.__name__))
raise TypeError(msg)
def encodebytes(s):
"""Encode a bytestring into a bytes object containing multiple lines
of base-64 data."""
_input_type_check(s)
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return b"".join(pieces)
def encodestring(s):
"""Legacy alias of encodebytes()."""
import warnings
warnings.warn("encodestring() is a deprecated alias, use encodebytes()",
DeprecationWarning, 2)
return encodebytes(s)
def decodebytes(s):
"""Decode a bytestring of base-64 data into a bytes object."""
_input_type_check(s)
return binascii.a2b_base64(s)
def decodestring(s):
"""Legacy alias of decodebytes()."""
import warnings
warnings.warn("decodestring() is a deprecated alias, use decodebytes()",
DeprecationWarning, 2)
return decodebytes(s)
# Usable as a script...
def main():
"""Small main program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("""usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout.buffer)
else:
func(sys.stdin.buffer, sys.stdout.buffer)
def test():
s0 = b"Aladdin:open sesame"
print(repr(s0))
s1 = encodebytes(s0)
print(repr(s1))
s2 = decodebytes(s1)
print(repr(s2))
assert s0 == s2
if __name__ == '__main__':
main()
|
|
# gevent monkey patching should be done as soon as possible dont move!
import gevent
import gevent.monkey
gevent.monkey.patch_all()
from JumpScale import j
j.application.start("jsagent")
import time
import sys
import atexit
import psutil
import os
import select
import subprocess
from JumpScale.baselib import cmdutils
import JumpScale.grid.agentcontroller
import socket
processes = list()
import JumpScale.baselib.redis
#from lib.web import PMWSServer
import JumpScale.grid.processmanager
class Process():
def __init__(self):
self.name="unknown"
self.domain=""
self.instance="0"
self.pid=0
self.workingdir=None
self.cmds=[]
self.env=None
self.pythonArgs={}
self.pythonObj=None
self.pythonCode=None
self.logpath=None
self.ports=[]
self.psstring=""
self.sync=False
self.restart=False
self.p=None
def start(self):
if self.cmds<>[]:
self._spawnProcess()
if self.pythonCode<>None:
if self.sync:
self.do()
else:
self.pid=os.fork()
if self.pid==0:
self.do()
else:
self.refresh()
def refresh(self):
self.p= psutil.Process(self.pid)
def kill(self):
if self.p<>None:
self.p.kill()
def is_running(self):
rss,vms=self.p.get_memory_info()
return vms<>0
def _spawnProcess(self):
if self.logpath==None:
self.logpath=j.system.fs.joinPaths(j.dirs.logDir,"processmanager","logs","%s_%s_%s.log"%(self.domain,self.name,self.instance))
j.system.fs.createDir(j.system.fs.joinPaths(j.dirs.logDir,"processmanager","logs"))
stdin = subprocess.PIPE
stdout = sys.stdout
stderr = sys.stderr
self.cmds.extend(['-lp',self.logpath])
try:
self.p = psutil.Popen(self.cmds, env=self.env,cwd=self.workingdir,stdin=stdin, stdout=stdout, stderr=stderr,bufsize=0,shell=False) #f was: subprocess.PIPE
self.pid=self.p.pid
except Exception,e:
print "could not execute:%s\nError:\n%s"%(self,e)
time.sleep(0.1)
if self.is_running()==False:
print "could not execute:%s\n"%(self)
if j.system.fs.exists(path=self.logpath):
log=j.system.fs.fileGetContents(self.logpath)
print "log:\n%s"%log
def do(self):
print 'A new child %s' % self.name, os.getpid()
if self.pythonCode<>None:
exec(self.pythonCode)
os._exit(0)
def __str__(self):
return "%s"%self.__dict__
__repr__=__str__
class ProcessManager():
def __init__(self,reset=False):
self.processes = list()
self.services = list()
self.dir_data=j.system.fs.joinPaths(j.dirs.baseDir,"jsagent_data")
self.dir_hekadconfig=j.system.fs.joinPaths(self.dir_data,"dir_hekadconfig")
self.dir_actions=j.system.fs.joinPaths(self.dir_data,"actions")
j.system.fs.createDir(self.dir_data)
#check there is a redis on port 9998 & 9999 (the new port for all)
for port in [9998,8001]:
if j.system.net.tcpPortConnectionTest("localhost",port):
j.system.process.killProcessByPort(port)
if j.system.net.tcpPortConnectionTest("localhost",9999)==False:
jp=j.packages.findNewest("jumpscale","redis")
if not jp.isInstalled(instance="mem") and not j.system.net.tcpPortConnectionTest("localhost",9999):
jp.install(hrddata={"redis.name":"mem","redis.port":9999,"redis.disk":"0","redis.mem":40},instance="mem")
for name in ["mem"]:
p=Process()
p.domain="jumpscale"
p.name="redis_%s"%name
p.instance=name
p.workingdir="/"
p.cmds=[j.dirs.replaceTxtDirVars("$base/apps/redis/redis-server"),j.dirs.replaceTxtDirVars("$vardir/redis/%s/redis.conf"%name)]
p.logpath=j.dirs.replaceTxtDirVars("$vardir/redis/%s/redis.log"%name)
p.start()
self.processes.append(p)
if j.system.net.waitConnectionTest("localhost",9999,10)==False:
j.events.opserror_critical("could not start redis on port 9999 inside processmanager",category="processmanager.redis.start")
self.redis_mem=j.clients.redis.getGeventRedisClient("localhost",9999)
# self.redis_disk=j.clients.redis.getGeventRedisClient("localhost",9998)
self.redis_queues={}
self.redis_queues["io"] = j.clients.redis.getGeventRedisQueue("localhost",9999,"workers:work:io")
self.redis_queues["hypervisor"] = j.clients.redis.getGeventRedisQueue("localhost",9999,"workers:work:hypervisor")
self.redis_queues["default"] = j.clients.redis.getGeventRedisQueue("localhost",9999,"workers:work:default")
self.redis_queues["process"] = j.clients.redis.getGeventRedisQueue("localhost",9999,"workers:work:process")
j.processmanager=self
self.hrd=j.application.instanceconfig
acip=self.hrd.get("ac.ipaddress",default="")
if "hekad" in self.services:
jp=j.packages.findNewest("jumpscale","hekad")
if not jp.isInstalled(instance="0"):
jp.install(hrddata={},instance="hekad")
p=Process()
p.domain="jumpscale"
p.name="hekad"
p.instance=name
p.workingdir="/opt/heka"
p.cmds=["bin/hekad","--config=%s"%self.dir_hekadconfig]
p.start()
self.processes.append(p)
if acip<>"":
if j.application.config.exists("grid.id"):
if j.application.config.get("grid.id")=="" or j.application.config.getInt("grid.id")==0:
j.application.config.set("grid.id",self.hrd.get("grid.id"))
acport=self.hrd.getInt("ac.port")
aclogin=self.hrd.get("ac.login",default="node")
acpasswd=self.hrd.get("ac.passwd",default="")
acclientinstancename = self.hrd.get('agentcontroller.connection')
#processmanager enabled
while j.system.net.waitConnectionTest(acip,acport,2)==False:
print "cannot connect to agentcontroller, will retry forever: '%s:%s'"%(acip,acport)
#now register to agentcontroller
self.acclient = j.clients.agentcontroller.get(acip, login=aclogin, passwd=acpasswd)
res=self.acclient.registerNode(hostname=socket.gethostname(), machineguid=j.application.getUniqueMachineId())
nid=res["node"]["id"]
webdiskey=res["webdiskey"]
j.application.config.set("grid.node.id",nid)
j.application.loadConfig()
j.application.initWhoAmI(True)
j.application.config.set("agentcontroller.webdiskey",webdiskey)
j.application.config.set("grid.id",res["node"]["gid"])
j.application.config.set("grid.node.machineguid",j.application.getUniqueMachineId())
j.application.config.set("grid.master.ip",acip)
if aclogin=="root":
j.application.config.set("grid.master.superadminpasswd",acpasswd)
jp=j.packages.findNewest("jumpscale","webdis_client")
if reset or not jp.isInstalled(instance="main"):
jp.install(hrddata={"addr":acip,"port":7779},instance="main",reinstall=reset)
jp=j.packages.findNewest("jumpscale","osis_client")
if reset or not jp.isInstalled(instance="processmanager"):
jp.install(hrddata={"osis.client.addr":acip,"osis.client.port":5544,"osis.client.login":aclogin,"osis.client.passwd":acpasswd},instance="processmanager",reinstall=reset)
self.hrd.set("osis.connection","processmanager")
jp=j.packages.findNewest("jumpscale","agentcontroller_client")
if reset or not jp.isInstalled(instance="main"):
jp.install(hrddata={"agentcontroller.client.addr":acip,"agentcontroller.client.port":4444,"agentcontroller.client.login":aclogin},instance=acclientinstancename,reinstall=reset)
self.acclient=j.clients.agentcontroller.getByInstance(acclientinstancename, new=True)
else:
self.acclient=None
def start(self):
# self._webserverStart()
self._workerStart()
j.core.grid.init()
gevent.spawn(self._processManagerStart)
self.mainloop()
def _webserverStart(self):
#start webserver
server=PMWSServer()
server.pm=self
p=Process()
p.domain="jumpscale"
p.name="web"
p.instance="main"
p.workingdir="/"
p.pythonObj=server
p.pythonCode="self.pythonObj.start()"
p.start()
self.processes.append(p)
def _processManagerStart(self):
j.core.processmanager.start()
def _workerStart(self):
pwd = '/opt/jumpscale/apps/jsagent/lib'
for qname in ["default","io","process","hypervisor"]:
p = Process()
p.domain = 'workers'
p.name = '%s' % qname
p.instance = 'main'
p.workingdir = pwd
p.cmds = ['python', 'worker.py', '-qn', qname, '-i', opts.instance]
p.restart = True
p.start()
self.processes.append(p)
def mainloop(self):
i=0
while True:
i+=1
# print "NEXT:%s\n"%i
for p in self.processes[:]:
# p.refresh()
if p.p<>None:
if not p.is_running():
if p.restart:
print "%s:%s was stopped restarting" % (p.domain, p.name)
p.start()
else:
print "Process %s has stopped" % p
p.kill()
self.processes.remove(p)
time.sleep(1)
if len(self.processes)==0:
print "no more children"
# return
@atexit.register
def kill_subprocesses():
for p in processes:
p.kill()
parser = cmdutils.ArgumentParser()
parser.add_argument("-i", '--instance', default="0", help='jsagent instance', required=False)
parser.add_argument("-r", '--reset', action='store_true',help='jsagent reset', required=False,default=False)
parser.add_argument("-s", '--services', help='list of services to run e.g heka, agentcontroller,web', required=False,default="")
opts = parser.parse_args()
jp=j.packages.findNewest("jumpscale","jsagent")
if opts.reset or not jp.isInstalled(instance=opts.instance):
jp.install(instance=opts.instance,reinstall=opts.reset)
jp = j.packages.findNewest('jumpscale', 'jsagent')
jp.load(opts.instance)
j.application.instanceconfig = jp.hrd_instance
#first start processmanager with all required stuff
pm=ProcessManager(reset=opts.reset)
processes=pm.processes
pm.services=[item.strip().lower() for item in opts.services.split(",")]
from lib.worker import Worker
#I had to do this in mother process otherwise weird issues caused by gevent !!!!!!!
j.core.osis.client = j.core.osis.getClientByInstance()
from gevent.pywsgi import WSGIServer
pm.start()
j.application.stop()
|
|
# coding=utf-8
"""audio_read reads in a whole audio file with resampling."""
# Equivalent to:
# import librosa
# def audio_read(filename, sr=11025, channels=1):
# """Read a soundfile, return (d, sr)."""
# d, sr = librosa.load(filename, sr=sr, mono=(channels == 1))
# return d, sr
# The code below is adapted from:
# https://github.com/bmcfee/librosa/blob/master/librosa/core/audio.py
# This is its original copyright notice:
# Copyright (c) 2014, Brian McFee, Matt McVicar, Dawen Liang, Colin Raffel, Douglas Repetto, Dan Ellis.
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
import os
import re
import subprocess
import threading
import time
import numpy as np
# For wavread fallback.
import scipy.io.wavfile as wav
try:
import queue
except ImportError:
# noinspection PyUnresolvedReferences
import Queue as queue
# If ffmpeg is unavailable, you can set HAVE_FFMPEG to False which will cause
# soundfile reads to go via scipy.io.wavfile. However, this means that only
# *.wav files are supported *and* they must already be resampled to the
# system sampling rate (e.g. 11025 Hz).
HAVE_FFMPEG = True
def wavread(filename):
"""Read in audio data from a wav file. Return d, sr."""
# Read in wav file.
samplerate, wave_data = wav.read(filename)
# Normalize short ints to floats in range [-1..1).
data = np.asfarray(wave_data) / 32768.0
return data, samplerate
def audio_read(filename, sr=None, channels=None):
"""Read a soundfile, return (d, sr)."""
if HAVE_FFMPEG:
return audio_read_ffmpeg(filename, sr, channels)
else:
data, samplerate = wavread(filename)
if channels == 1 and len(data.shape) == 2 and data.shape[-1] != 1:
# Convert stereo to mono.
data = np.mean(data, axis=-1)
if sr and sr != samplerate:
raise ValueError("Wav file has samplerate %f but %f requested." % (
samplerate, sr))
return data, samplerate
def audio_read_ffmpeg(filename, sr=None, channels=None):
"""Read a soundfile, return (d, sr)."""
# Hacked version of librosa.load and audioread/ff.
offset = 0.0
duration = None
dtype = np.float32
y = []
with FFmpegAudioFile(os.path.realpath(filename),
sample_rate=sr, channels=channels) as input_file:
sr = input_file.sample_rate
channels = input_file.channels
s_start = int(np.floor(sr * offset) * channels)
if duration is None:
s_end = np.inf
else:
s_end = s_start + int(np.ceil(sr * duration) * channels)
num_read = 0
for frame in input_file:
frame = buf_to_float(frame, dtype=dtype)
num_read_prev = num_read
num_read += len(frame)
if num_read < s_start:
# offset is after the current frame, keep reading.
continue
if s_end < num_read_prev:
# we're off the end. stop reading
break
if s_end < num_read:
# the end is in this frame. crop.
frame = frame[:s_end - num_read_prev]
if num_read_prev <= s_start < num_read:
# beginning is in this frame
frame = frame[(s_start - num_read_prev):]
# tack on the current frame
y.append(frame)
if not len(y):
# Zero-length read
y = np.zeros(0, dtype=dtype)
else:
y = np.concatenate(y)
if channels > 1:
y = y.reshape((-1, 2)).T
# Final cleanup for dtype and contiguity
y = np.ascontiguousarray(y, dtype=dtype)
return (y, sr)
def buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
.. seealso:: :func:`librosa.util.buf_to_float`
:parameters:
- x : np.ndarray [dtype=int]
The integer-valued data buffer
- n_bytes : int [1, 2, 4]
The number of bytes per sample in ``x``
- dtype : numeric type
The target output type (default: 32-bit float)
:return:
- x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1. / float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = '<i{:d}'.format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
# The code below is adapted from:
# https://github.com/sampsyo/audioread/blob/master/audioread/ffdec.py
# Below is its original copyright notice:
# This file is part of audioread.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
class QueueReaderThread(threading.Thread):
"""A thread that consumes data from a filehandle and sends the data
over a Queue.
"""
def __init__(self, fh, blocksize=1024, discard=False):
super(QueueReaderThread, self).__init__()
self.fh = fh
self.blocksize = blocksize
self.daemon = True
self.discard = discard
self.queue = None if discard else queue.Queue()
def run(self):
while True:
data = self.fh.read(self.blocksize)
if not self.discard:
self.queue.put(data)
if not data:
# Stream closed (EOF).
break
class FFmpegAudioFile(object):
"""An audio file decoded by the ffmpeg command-line utility."""
def __init__(self, filename, channels=None, sample_rate=None, block_size=4096):
if not os.path.isfile(filename):
raise ValueError(filename + " not found.")
popen_args = ['ffmpeg', '-i', filename, '-f', 's16le']
self.channels = channels
self.sample_rate = sample_rate
if channels:
popen_args.extend(['-ac', str(channels)])
if sample_rate:
popen_args.extend(['-ar', str(sample_rate)])
popen_args.append('-')
self.proc = subprocess.Popen(
popen_args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Start another thread to consume the standard output of the
# process, which contains raw audio data.
self.stdout_reader = QueueReaderThread(self.proc.stdout, block_size)
self.stdout_reader.start()
# Read relevant information from stderr.
try:
self._get_info()
except ValueError:
raise ValueError("Error reading header info from " + filename)
# Start a separate thread to read the rest of the data from
# stderr. This (a) avoids filling up the OS buffer and (b)
# collects the error output for diagnosis.
self.stderr_reader = QueueReaderThread(self.proc.stderr)
self.stderr_reader.start()
def read_data(self, timeout=10.0):
"""Read blocks of raw PCM data from the file."""
# Read from stdout in a separate thread and consume data from
# the queue.
start_time = time.time()
while True:
# Wait for data to be available or a timeout.
data = None
try:
data = self.stdout_reader.queue.get(timeout=timeout)
if data:
yield data
else:
# End of file.
break
except queue.Empty:
# Queue read timed out.
end_time = time.time()
if not data:
if end_time - start_time >= timeout:
# Nothing interesting has happened for a while --
# FFmpeg is probably hanging.
raise ValueError('ffmpeg output: {}'.format(
''.join(self.stderr_reader.queue.queue)
))
else:
start_time = end_time
# Keep waiting.
continue
def _get_info(self):
"""Reads the tool's output from its stderr stream, extracts the
relevant information, and parses it.
"""
out_parts = []
while True:
line = self.proc.stderr.readline()
if not line:
# EOF and data not found.
raise ValueError("stream info not found")
# In Python 3, result of reading from stderr is bytes.
if isinstance(line, bytes):
line = line.decode('utf8', 'ignore')
line = line.strip().lower()
if 'no such file' in line:
raise IOError('file not found')
elif 'invalid data found' in line:
raise ValueError()
elif 'duration:' in line:
out_parts.append(line)
elif 'audio:' in line:
out_parts.append(line)
self._parse_info(''.join(out_parts))
break
def _parse_info(self, s):
"""Given relevant data from the ffmpeg output, set audio
parameter fields on this object.
"""
# Sample rate.
match = re.search(r'(\d+) hz', s)
if match:
self.sample_rate_orig = int(match.group(1))
else:
self.sample_rate_orig = 0
if self.sample_rate is None:
self.sample_rate = self.sample_rate_orig
# Channel count.
match = re.search(r'hz, ([^,]+),', s)
if match:
mode = match.group(1)
if mode == 'stereo':
self.channels_orig = 2
else:
match = re.match(r'(\d+) ', mode)
if match:
self.channels_orig = int(match.group(1))
else:
self.channels_orig = 1
else:
self.channels_orig = 0
if self.channels is None:
self.channels = self.channels_orig
# Duration.
match = re.search(
r'duration: (\d+):(\d+):(\d+).(\d)', s
)
if match:
durparts = list(map(int, match.groups()))
duration = (
durparts[0] * 60 * 60 +
durparts[1] * 60 +
durparts[2] +
float(durparts[3]) / 10
)
self.duration = duration
else:
# No duration found.
self.duration = 0
def close(self):
"""Close the ffmpeg process used to perform the decoding."""
# Kill the process if it is still running.
if hasattr(self, 'proc') and self.proc.returncode is None:
self.proc.kill()
self.proc.wait()
def __del__(self):
self.close()
# Iteration.
def __iter__(self):
return self.read_data()
# Context manager.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.cast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
import platform
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class CastOpTest(test.TestCase):
def _toDataType(self, dtype):
"""Returns TensorFlow data type for numpy type."""
if dtype == np.float32:
return dtypes.float32
elif dtype == np.float64:
return dtypes.float64
elif dtype == np.int32:
return dtypes.int32
elif dtype == np.int64:
return dtypes.int64
elif dtype == np.bool:
return dtypes.bool
elif dtype == np.complex64:
return dtypes.complex64
elif dtype == np.complex128:
return dtypes.complex128
else:
return None
def _cast(self, x, dtype, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
val = constant_op.constant(x, self._toDataType(np.array([x]).dtype))
return math_ops.cast(val, self._toDataType(dtype), name="cast").eval()
def _test(self, x, dtype, use_gpu=False):
"""Tests cast(x) to dtype behaves the same as numpy.astype."""
np_ans = x.astype(dtype)
tf_ans = self._cast(x, dtype, use_gpu)
self.assertAllEqual(np_ans, tf_ans)
def _testTypes(self, x, use_gpu=False):
"""Tests cast(x) to different tf."""
if use_gpu:
type_list = [
np.float32, np.float64, np.int64, np.complex64, np.complex128
]
else:
type_list = [
np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]
for from_type in type_list:
for to_type in type_list:
self._test(x.astype(from_type), to_type, use_gpu)
self._test(x.astype(np.bool), np.float32, use_gpu)
self._test(x.astype(np.uint8), np.float32, use_gpu)
if not use_gpu:
self._test(x.astype(np.bool), np.int32, use_gpu)
self._test(x.astype(np.int32), np.int32, use_gpu)
def _testAll(self, x):
self._testTypes(x, use_gpu=False)
if x.dtype == np.float32 or x.dtype == np.float64:
self._testTypes(x, use_gpu=True)
def testBasic(self):
self._testAll(np.arange(-10, 10).reshape(2, 10))
self._testAll(np.linspace(-10, 10, 17))
def testSmallValues(self):
f4 = np.finfo(np.float32)
f8 = np.finfo(np.float64)
self._testAll(
np.array([
0, -1, 1, -f4.resolution, f4.resolution, f8.resolution,
-f8.resolution
]))
def testBfloat16(self):
a = np.random.uniform(-100, 100, 100).astype(np.float32)
with self.test_session(use_gpu=False):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, b.eval(), rtol=1 / 128.)
with self.test_session(use_gpu=True):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, b.eval(), rtol=1 / 128.)
def testRandom(self):
self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
self._testAll(np.random.normal(0, 1e6, 210).reshape([2, 3, 5, 7]))
# Special values like int32max, int64min, inf, -inf, nan casted to
# integer values in somewhat unexpected ways. And they behave
# differently on CPU and GPU.
def _compare(self, x, dst_dtype, expected, use_gpu=False):
np.testing.assert_equal(
self._cast(
x, dst_dtype, use_gpu=use_gpu), dst_dtype(expected))
def testIntToFloatBoundary(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(i4.min, np.float32, i4.min, False)
self._compare(i4.max, np.float32, i4.max, False)
self._compare(i8.min, np.float32, i8.min, False)
self._compare(i8.max, np.float32, i8.max, False)
self._compare(i4.min, np.float64, i4.min, False)
self._compare(i4.max, np.float64, i4.max, False)
self._compare(i8.min, np.float64, i8.min, False)
self._compare(i8.max, np.float64, i8.max, False)
# NOTE: GPU does not support int32/int64 for casting.
def testInfNan(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(np.inf, np.float32, np.inf, False)
self._compare(np.inf, np.float64, np.inf, False)
if sys.byteorder == "big":
self._compare(np.inf, np.int32, i4.max, False)
self._compare(np.inf, np.int64, i8.max, False)
else:
# np.float64("np.inf").astype(np.int32) is negative on x86 but positive on ppc64le
# Numpy link to relevant discussion - https://github.com/numpy/numpy/issues/9040
# Tensorflow link to relevant discussion - https://github.com/tensorflow/tensorflow/issues/9360
if platform.machine() == "ppc64le":
self._compare(-np.inf, np.int32, i4.min, False)
self._compare(-np.inf, np.int64, i8.min, False)
else:
self._compare(np.inf, np.int32, i4.min, False)
self._compare(np.inf, np.int64, i8.min, False)
self._compare(-np.inf, np.float32, -np.inf, False)
self._compare(-np.inf, np.float64, -np.inf, False)
self._compare(-np.inf, np.int32, i4.min, False)
self._compare(-np.inf, np.int64, i8.min, False)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, False)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, False)), True)
self._compare(np.nan, np.int32, i4.min, False)
self._compare(np.nan, np.int64, i8.min, False)
self._compare(np.inf, np.float32, np.inf, True)
self._compare(np.inf, np.float64, np.inf, True)
self._compare(-np.inf, np.float32, -np.inf, True)
self._compare(-np.inf, np.float64, -np.inf, True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, True)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, True)), True)
def _OpError(self, x, dtype, err):
with self.test_session():
with self.assertRaisesOpError(err):
math_ops.cast(x, dtype).eval()
def testNotImplemented(self):
self._OpError(np.arange(0, 10), dtypes.string, "Cast.*int64.*string.*")
def testCastToTypeOfVariable(self):
with self.test_session() as sess:
x = variables.Variable(5, dtype=dtypes.float32)
y = variables.Variable(True, dtype=dtypes.bool)
cast = math_ops.cast(y, x.dtype)
variables.global_variables_initializer().run()
self.assertEqual(1.0, sess.run(cast))
def testGradients(self):
t = [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
for src_t in t:
for dst_t in t:
with self.test_session():
x = constant_op.constant(1.0, src_t)
z = array_ops.identity(x)
y = math_ops.cast(z, dst_t)
err = gradient_checker.compute_gradient_error(x, [], y, [])
self.assertLess(err, 1e-3)
class SparseTensorCastTest(test.TestCase):
def testCast(self):
indices = constant_op.constant([[0], [1], [2]], dtypes.int64)
values = constant_op.constant(np.array([1, 2, 3], np.int64))
shape = constant_op.constant([3], dtypes.int64)
st = sparse_tensor.SparseTensor(indices, values, shape)
st_cast = math_ops.cast(st, dtypes.float32)
with self.test_session():
self.assertAllEqual(st_cast.indices.eval(), [[0], [1], [2]])
self.assertAllEqual(st_cast.values.eval(),
np.array([1, 2, 3], np.float32))
self.assertAllEqual(st_cast.dense_shape.eval(), [3])
class SaturateCastTest(test.TestCase):
def testSaturate(self):
in_types = dtypes.float32,
out_types = dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.float32
with self.test_session() as sess:
for in_type in in_types:
for out_type in out_types:
lo, hi = in_type.min, in_type.max
x = constant_op.constant(
[lo, lo + 1, lo // 2, hi // 2, hi - 1, hi], dtype=in_type)
y = math_ops.saturate_cast(x, dtype=out_type)
self.assertEqual(y.dtype, out_type)
x, y = sess.run([x, y])
correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
self.assertAllEqual(correct, y)
if __name__ == "__main__":
test.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
import wtforms
from warehouse.accounts import forms
from warehouse.accounts.interfaces import TooManyFailedLogins
from warehouse.accounts.models import DisableReason
class TestLoginForm:
def test_creation(self):
request = pretend.stub()
user_service = pretend.stub()
breach_service = pretend.stub()
form = forms.LoginForm(
request=request, user_service=user_service, breach_service=breach_service
)
assert form.request is request
assert form.user_service is user_service
assert form.breach_service is breach_service
def test_validate_username_with_no_user(self):
request = pretend.stub()
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda userid: None)
)
breach_service = pretend.stub()
form = forms.LoginForm(
request=request, user_service=user_service, breach_service=breach_service
)
field = pretend.stub(data="my_username")
with pytest.raises(wtforms.validators.ValidationError):
form.validate_username(field)
assert user_service.find_userid.calls == [pretend.call("my_username")]
def test_validate_username_with_user(self):
request = pretend.stub()
user_service = pretend.stub(find_userid=pretend.call_recorder(lambda userid: 1))
breach_service = pretend.stub()
form = forms.LoginForm(
request=request, user_service=user_service, breach_service=breach_service
)
field = pretend.stub(data="my_username")
form.validate_username(field)
assert user_service.find_userid.calls == [pretend.call("my_username")]
def test_validate_password_no_user(self):
request = pretend.stub()
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda userid: None)
)
breach_service = pretend.stub()
form = forms.LoginForm(
data={"username": "my_username"},
request=request,
user_service=user_service,
breach_service=breach_service,
)
field = pretend.stub(data="password")
form.validate_password(field)
assert user_service.find_userid.calls == [
pretend.call("my_username"),
pretend.call("my_username"),
]
def test_validate_password_disabled_for_compromised_pw(self, db_session):
request = pretend.stub()
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda userid: 1),
is_disabled=pretend.call_recorder(
lambda userid: (True, DisableReason.CompromisedPassword)
),
)
breach_service = pretend.stub(failure_message="Bad Password!")
form = forms.LoginForm(
data={"username": "my_username"},
request=request,
user_service=user_service,
breach_service=breach_service,
)
field = pretend.stub(data="pw")
with pytest.raises(wtforms.validators.ValidationError, match=r"Bad Password\!"):
form.validate_password(field)
assert user_service.find_userid.calls == [pretend.call("my_username")]
assert user_service.is_disabled.calls == [pretend.call(1)]
def test_validate_password_ok(self):
request = pretend.stub()
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda userid: 1),
check_password=pretend.call_recorder(
lambda userid, password, tags=None: True
),
is_disabled=pretend.call_recorder(lambda userid: (False, None)),
)
breach_service = pretend.stub(
check_password=pretend.call_recorder(lambda pw, tags: False)
)
form = forms.LoginForm(
data={"username": "my_username"},
request=request,
user_service=user_service,
breach_service=breach_service,
check_password_metrics_tags=["bar"],
)
field = pretend.stub(data="pw")
form.validate_password(field)
assert user_service.find_userid.calls == [
pretend.call("my_username"),
pretend.call("my_username"),
]
assert user_service.is_disabled.calls == [pretend.call(1)]
assert user_service.check_password.calls == [
pretend.call(1, "pw", tags=["bar"])
]
assert breach_service.check_password.calls == [
pretend.call("pw", tags=["method:auth", "auth_method:login_form"])
]
def test_validate_password_notok(self, db_session):
request = pretend.stub()
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda userid: 1),
check_password=pretend.call_recorder(
lambda userid, password, tags=None: False
),
is_disabled=pretend.call_recorder(lambda userid: (False, None)),
)
breach_service = pretend.stub()
form = forms.LoginForm(
data={"username": "my_username"},
request=request,
user_service=user_service,
breach_service=breach_service,
)
field = pretend.stub(data="pw")
with pytest.raises(wtforms.validators.ValidationError):
form.validate_password(field)
assert user_service.find_userid.calls == [
pretend.call("my_username"),
pretend.call("my_username"),
]
assert user_service.is_disabled.calls == [pretend.call(1)]
assert user_service.check_password.calls == [pretend.call(1, "pw", tags=None)]
def test_validate_password_too_many_failed(self):
@pretend.call_recorder
def check_password(userid, password, tags=None):
raise TooManyFailedLogins(resets_in=None)
request = pretend.stub()
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda userid: 1),
check_password=check_password,
is_disabled=pretend.call_recorder(lambda userid: (False, None)),
)
breach_service = pretend.stub()
form = forms.LoginForm(
data={"username": "my_username"},
request=request,
user_service=user_service,
breach_service=breach_service,
)
field = pretend.stub(data="pw")
with pytest.raises(wtforms.validators.ValidationError):
form.validate_password(field)
assert user_service.find_userid.calls == [
pretend.call("my_username"),
pretend.call("my_username"),
]
assert user_service.is_disabled.calls == [pretend.call(1)]
assert user_service.check_password.calls == [pretend.call(1, "pw", tags=None)]
def test_password_breached(self, monkeypatch):
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(forms, "send_password_compromised_email", send_email)
user = pretend.stub(id=1)
request = pretend.stub()
user_service = pretend.stub(
find_userid=lambda _: 1,
get_user=lambda _: user,
check_password=lambda userid, pw, tags=None: True,
disable_password=pretend.call_recorder(lambda user_id, reason=None: None),
is_disabled=lambda userid: (False, None),
)
breach_service = pretend.stub(
check_password=lambda pw, tags=None: True, failure_message="Bad Password!"
)
form = forms.LoginForm(
data={"password": "password"},
request=request,
user_service=user_service,
breach_service=breach_service,
)
assert not form.validate()
assert form.password.errors.pop() == "Bad Password!"
assert user_service.disable_password.calls == [
pretend.call(1, reason=DisableReason.CompromisedPassword)
]
assert send_email.calls == [pretend.call(request, user)]
class TestRegistrationForm:
def test_create(self):
user_service = pretend.stub()
breach_service = pretend.stub()
form = forms.RegistrationForm(
data={}, user_service=user_service, breach_service=breach_service
)
assert form.user_service is user_service
def test_password_confirm_required_error(self):
form = forms.RegistrationForm(
data={"password_confirm": ""},
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub())
),
breach_service=pretend.stub(check_password=lambda pw: False),
)
assert not form.validate()
assert form.password_confirm.errors.pop() == "This field is required."
def test_passwords_mismatch_error(self):
user_service = pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub())
)
form = forms.RegistrationForm(
data={"new_password": "password", "password_confirm": "mismatch"},
user_service=user_service,
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
form.password_confirm.errors.pop()
== "Your passwords don't match. Try again."
)
def test_passwords_match_success(self):
user_service = pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub())
)
form = forms.RegistrationForm(
data={
"new_password": "MyStr0ng!shPassword",
"password_confirm": "MyStr0ng!shPassword",
},
user_service=user_service,
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
form.validate()
assert len(form.new_password.errors) == 0
assert len(form.password_confirm.errors) == 0
def test_email_required_error(self):
form = forms.RegistrationForm(
data={"email": ""},
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub())
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert form.email.errors.pop() == "This field is required."
def test_invalid_email_error(self):
form = forms.RegistrationForm(
data={"email": "bad"},
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: None)
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert form.email.errors.pop() == "The email address isn't valid. Try again."
def test_email_exists_error(self):
form = forms.RegistrationForm(
data={"email": "[email protected]"},
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: pretend.stub())
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
form.email.errors.pop()
== "This email address is already being used by another account. "
"Use a different email."
)
def test_blacklisted_email_error(self):
form = forms.RegistrationForm(
data={"email": "[email protected]"},
user_service=pretend.stub(
find_userid_by_email=pretend.call_recorder(lambda _: None)
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
form.email.errors.pop()
== "You can't create an account with an email address from "
"this domain. Use a different email."
)
def test_username_exists(self):
form = forms.RegistrationForm(
data={"username": "foo"},
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda name: 1)
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
form.username.errors.pop()
== "This username is already being used by another account. "
"Choose a different username."
)
@pytest.mark.parametrize("username", ["_foo", "bar_", "foo^bar"])
def test_username_is_valid(self, username):
form = forms.RegistrationForm(
data={"username": username},
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda _: None)
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
form.username.errors.pop() == "The username is invalid. Usernames "
"must be composed of letters, numbers, "
"dots, hyphens and underscores. And must "
"also start and finish with a letter or number. "
"Choose a different username."
)
def test_password_strength(self):
cases = (
("foobar", False),
("somethingalittlebetter9", True),
("1aDeCent!1", True),
)
for pwd, valid in cases:
form = forms.RegistrationForm(
data={"new_password": pwd, "password_confirm": pwd},
user_service=pretend.stub(),
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
form.validate()
assert (len(form.new_password.errors) == 0) == valid
def test_password_breached(self):
form = forms.RegistrationForm(
data={"new_password": "password"},
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda _: None)
),
breach_service=pretend.stub(
check_password=lambda pw, tags=None: True,
failure_message=(
"This password has appeared in a breach or has otherwise been "
"compromised and cannot be used."
),
),
)
assert not form.validate()
assert form.new_password.errors.pop() == (
"This password has appeared in a breach or has otherwise been "
"compromised and cannot be used."
)
def test_name_too_long(self):
form = forms.RegistrationForm(
data={"full_name": "hello " * 50},
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda _: None)
),
breach_service=pretend.stub(check_password=lambda pw, tags=None: True),
)
assert not form.validate()
assert (
form.full_name.errors.pop()
== "The name is too long. Choose a name with 100 characters or less."
)
class TestRequestPasswordResetForm:
def test_creation(self):
user_service = pretend.stub()
form = forms.RequestPasswordResetForm(user_service=user_service)
assert form.user_service is user_service
def test_no_password_field(self):
user_service = pretend.stub()
form = forms.RequestPasswordResetForm(user_service=user_service)
assert "password" not in form._fields
def test_validate_username_or_email(self):
user_service = pretend.stub(
get_user_by_username=pretend.call_recorder(lambda userid: "1"),
get_user_by_email=pretend.call_recorder(lambda userid: "1"),
)
form = forms.RequestPasswordResetForm(user_service=user_service)
field = pretend.stub(data="username_or_email")
form.validate_username_or_email(field)
assert user_service.get_user_by_username.calls == [
pretend.call("username_or_email")
]
def test_validate_username_or_email_with_none(self):
user_service = pretend.stub(
get_user_by_username=pretend.call_recorder(lambda userid: None),
get_user_by_email=pretend.call_recorder(lambda userid: None),
)
form = forms.RequestPasswordResetForm(user_service=user_service)
field = pretend.stub(data="username_or_email")
with pytest.raises(wtforms.validators.ValidationError):
form.validate_username_or_email(field)
assert user_service.get_user_by_username.calls == [
pretend.call("username_or_email")
]
assert user_service.get_user_by_email.calls == [
pretend.call("username_or_email")
]
class TestResetPasswordForm:
def test_password_confirm_required_error(self):
form = forms.ResetPasswordForm(
data={"password_confirm": ""},
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert form.password_confirm.errors.pop() == "This field is required."
def test_passwords_mismatch_error(self):
form = forms.ResetPasswordForm(
data={
"new_password": "password",
"password_confirm": "mismatch",
"username": "username",
"full_name": "full_name",
"email": "email",
},
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert not form.validate()
assert (
form.password_confirm.errors.pop()
== "Your passwords don't match. Try again."
)
@pytest.mark.parametrize(
("password", "expected"),
[("foobar", False), ("somethingalittlebetter9", True), ("1aDeCent!1", True)],
)
def test_password_strength(self, password, expected):
form = forms.ResetPasswordForm(
data={
"new_password": password,
"password_confirm": password,
"username": "username",
"full_name": "full_name",
"email": "email",
},
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert form.validate() == expected
def test_passwords_match_success(self):
form = forms.ResetPasswordForm(
data={
"new_password": "MyStr0ng!shPassword",
"password_confirm": "MyStr0ng!shPassword",
"username": "username",
"full_name": "full_name",
"email": "email",
},
breach_service=pretend.stub(check_password=lambda pw, tags=None: False),
)
assert form.validate()
def test_password_breached(self):
form = forms.ResetPasswordForm(
data={
"new_password": "MyStr0ng!shPassword",
"password_confirm": "MyStr0ng!shPassword",
"username": "username",
"full_name": "full_name",
"email": "email",
},
user_service=pretend.stub(
find_userid=pretend.call_recorder(lambda _: None)
),
breach_service=pretend.stub(
check_password=lambda pw, tags=None: True,
failure_message=(
"This password has appeared in a breach or has otherwise been "
"compromised and cannot be used."
),
),
)
assert not form.validate()
assert form.new_password.errors.pop() == (
"This password has appeared in a breach or has otherwise been "
"compromised and cannot be used."
)
|
|
"""
Load npy xy, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rc('font', family = 'serif', serif = 'cmr10')
import numpy as np
from datetime import timedelta
import datetime
import imp
import re
from textwrap import wrap
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
#unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py')
###############
# Things to change
top_dir='/nfs/a90/eepdw/Data/Rain_Land_Sea_Diurnal'
pp_file = 'avg.5216'
lon_max = 80
lon_min = 60
lat_max= 5
lat_min=-10
trmm_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/'
trmm_file = "trmm_diurnal_average_lat_%s_%s_lon_%s_%s_southern_western_indian_ocean.npz" % (lat_min,lat_max, lon_min, lon_max)
#############
# Make own time x-axis
d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30), datetime.datetime(2011, 8, 22, 6, 30), timedelta(hours=1))
formatter = matplotlib.dates.DateFormatter('%H:%M')
def main():
#experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
experiment_ids_p = [ 'dkmbq', 'dklzq' ] # Most of Params
experiment_ids_e = ['dklyu', 'dklwu'] # Most of Explicit
#experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#plt.ion()
NUM_COLOURS = 15
cmap=cm.get_cmap(cm.Set1, NUM_COLOURS)
#cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS))
for ls in ['sea']:
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
legendEntries=[]
legendtext=[]
plot_trmm = np.load('%s%s_%s' % (trmm_dir, ls, trmm_file))
dates_trmm=[]
p=[]
for dp in plot_trmm['hour']:
print dp
if ((int(dp)<23) & (int(dp)>=6)):
dates_trmm.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_trmm.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
#print dates_trmm
a = np.argsort(dates_trmm,axis=0)
d_trmm = np.array(dates_trmm)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_trmm, pl, label='TRMM', linewidth=2, linestyle='-', marker='', markersize=2, fmt='', color='#262626')
legendEntries.append(l)
legendtext.append('TRMM')
l0=plt.legend(legendEntries, legendtext,title='', frameon=False, prop={'size':8}, loc=9, bbox_to_anchor=(0.21, 0,1, 1))
# Change the legend label colors to almost black
texts = l0.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
for c, experiment_id in enumerate(experiment_ids_p):
expmin1 = experiment_id[:-1]
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max))
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, prop={'size':8}, bbox_to_anchor=(0, 0,1, 1))
# Change the legend label colors to almost black
texts = l1.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
c1=0
for c, experiment_id in enumerate(experiment_ids_e):
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
expmin1 = experiment_id[:-1]
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max))
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, bbox_to_anchor=(0.11, 0,1, 1), prop={'size':8})
plt.gca().add_artist(l1)
plt.gca().add_artist(l0)
plt.gca().xaxis.set_major_formatter(formatter)
# Change the legend label colors to almost black
texts = l2.texts
for t in texts:
t.set_color('#262626')
plt.xlabel('Time (UTC)')
plt.ylabel('mm/h')
title="Domain Averaged Rainfall - %s" % ls
t=re.sub('(.{68} )', '\\1\n', str(title), 0, re.DOTALL)
t = re.sub(r'[(\']', ' ', t)
t = re.sub(r'[\',)]', ' ', t)
pp_filenodot= pp_file.replace(".", "")
# Bit of formatting
# Set colour of axis lines
spines_to_keep = ['bottom', 'left']
for spine in spines_to_keep:
ax.spines[spine].set_linewidth(0.5)
ax.spines[spine].set_color('#262626')
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax.spines[spine].set_visible(False)
# Get rid of ticks. The position of the numbers is informative enough of
# the position of the value.
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Change the labels to the off-black
ax.xaxis.label.set_color('#262626')
ax.yaxis.label.set_color('#262626')
if not os.path.exists('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/'): os.makedirs('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/')
plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_southern_western_indian_ocean_notitle_8and12kmonly.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s' % (t.title()), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_southern_western_indian_ocean_8and12kmonly.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8; mode: python; -*-
"""Module providing class for sense disambiguation of connectives.
Attributes:
DiscourseSenser (class): class for sense disambiguation of connectives
"""
##################################################################
# Imports
from __future__ import absolute_import, print_function, unicode_literals
from dsenser.constants import ARG1, ARG2, CHAR_SPAN, CONNECTIVE, ENCODING, \
RAW_TEXT, SENSE, TOK_LIST, TOK_OFFS_IDX, TYPE, DFLT_MODEL_PATH, \
DFLT_MODEL_TYPE, DFLT_ECONN_PATH, ALT_LEX, EXPLICIT, IMPLICIT, SVD, \
LSTM, MJR, WANG, XGBOOST, PARSE_TREE, DEPS, WORDS, SENTENCES, SHORT2FULL
from dsenser.utils import timeit
from collections import Iterable
from cPickle import dump, load
from itertools import chain
import codecs
import gc
import numpy as np
import os
import sys
##################################################################
# Variables and Constants
# load default explicit discourse connectives
DFLT_CONN = set(["upon"])
with codecs.open(DFLT_ECONN_PATH, 'r', ENCODING) as ifile:
for iline in ifile:
iline = iline.strip()
if iline:
DFLT_CONN.add(iline)
##################################################################
# Class
class DiscourseSenser(object):
"""
Class for sense disambiguation of connectives.
Attributes:
models (list(BaseSenser)):
sense disambiguation models
judge (dsenser.Judge):
meta-classifier
cls2idx (dict):
mapping from class to index
idx2cls (dict):
mapping from index to class
econn (set):
connectives marking explicit relations
"""
def __init__(self, a_model=None):
"""
Class constructor.
Args:
a_model (str or None): path to serialized model or None
"""
self.models = []
self.model_paths = []
self.judge = None
self.cls2idx = {}
self.idx2cls = {}
self.wbench = None
self.econn = set([self._normalize_conn(iconn) for iconn in DFLT_CONN])
# load serialized model
if a_model is not None:
self._load(a_model)
def train(self, a_train_data, a_type=DFLT_MODEL_TYPE,
a_path=DFLT_MODEL_PATH, a_dev_data=None,
a_grid_search=False, a_w2v=False, a_lstsq=False):
"""Train specified model(s) on the provided data.
Args:
a_train_data (list or None):
training set
a_path (str):
path for storing the model
a_type (str):
type of the model to be trained
a_dev_data (list or None):
development set
a_grid_search (bool):
use grid search in order to determine hyper-paramaters of
the model
a_w2v (bool):
use word2vec embeddings
a_lstsq (bool):
use least squares method
Returns:
void:
"""
if a_type == 0:
raise RuntimeError("No model type specified.")
if a_dev_data is None:
a_dev_data = ([], {})
# initialize models
if a_type & MJR:
from dsenser.major import MajorSenser
self.models.append(MajorSenser())
if a_type & WANG:
from dsenser.wang import WangSenser
self.models.append(WangSenser(a_grid_search=a_grid_search))
if a_type & XGBOOST:
from dsenser.xgboost import XGBoostSenser
self.models.append(XGBoostSenser(a_grid_search=a_grid_search))
# NN models have to go last, since we are pruning the parses for them
# to free some memory
nn_used = False
if a_type & SVD:
from dsenser.svd import SVDSenser
# since we cannot differentiate SVD yet, we can only use word2vec
# embeddings
if not a_w2v or a_lstsq:
print("SVD senser does not support task-specific embeddings "
"and least squares yet.", file=sys.stderr)
self.models.append(SVDSenser(a_w2v=True, a_lstsq=False,
a_max_iters=256))
nn_used = True
if a_type & LSTM:
from dsenser.lstm import LSTMSenser
self.models.append(LSTMSenser(a_w2v, a_lstsq))
nn_used = True
# remember all possible senses
n_senses = 0
for irel in chain(a_train_data[0], a_dev_data[0]
if a_dev_data is not None else []):
for isense in irel[SENSE]:
isense = SHORT2FULL.get(isense, isense)
if isense not in self.cls2idx:
n_senses = len(self.cls2idx)
self.cls2idx[isense] = n_senses
self.idx2cls[n_senses] = isense
if irel[TYPE] == EXPLICIT:
self.econn.add(self._normalize_conn(
irel[CONNECTIVE][RAW_TEXT]))
else:
irel[CONNECTIVE][RAW_TEXT] = ""
# convert sense classes to indices
self._sense2idx(a_train_data[0])
if a_dev_data is not None:
self._sense2idx(a_dev_data[0])
# train models and remember their predictions (temporarly commented due
# to memory optimization, since we are not using the judge now)
# x_train = np.zeros((len(a_train_data[0]), len(self.models),
# len(self.cls2idx)))
# x_dev = np.zeros((len(a_dev_data[0] if a_dev_data else ()),
# len(self.models), len(self.cls2idx)))
i = 0
data_pruned = False
imodel = x_train = x_dev = None
imodel_name = imodel_path = ""
imodel_dir = os.path.dirname(a_path)
while i < len(self.models):
imodel = self.models[i]
imodel_name = imodel.__class__.__name__
imodel_path = a_path + '.' + imodel_name
if nn_used and not data_pruned:
from dsenser.svd import SVDSenser
from dsenser.lstm import LSTMSenser
if isinstance(imodel, LSTMSenser) or \
isinstance(imodel, SVDSenser):
a_train_data = self._prune_data(*a_train_data)
a_dev_data = self._prune_data(*a_dev_data)
data_pruned = True
# i = -1 (means do not make predictions for the judge)
# imodel.train(a_train_data, a_dev_data, len(self.cls2idx),
# i, x_train, x_dev)
imodel.train(a_train_data, a_dev_data, len(self.cls2idx),
-1, x_train, x_dev)
self._dump(imodel, imodel_path)
self.model_paths.append(os.path.relpath(imodel_path,
imodel_dir))
self.models[i] = imodel = None
gc.collect()
i += 1
# convert training and development sets to the format appropriate for
# the judge
# x_train = [(x_i, irel, irel[SENSE])
# for x_i, irel in zip(x_train, a_train_data[0])]
# x_dev = [(x_i, irel, irel[SENSE])
# for x_i, irel in zip(x_dev, a_dev_data[0])]
# train the judge
# from dsenser.judge import Judge
# self.judge = Judge(len(self.models), len(self.cls2idx))
# self.judge.train(x_train, x_dev)
# dump model (clean the model list before)
self.models = []
self._dump(self, a_path)
def predict(self, a_data):
"""Determine senses of discourse connectives.
This is a memory-optimized version of prediction function. Due to
these optimizations, however, it does not support the judge model.
Args:
a_data (list):
input data to be analyzed
Returns:
void: updates input set in place
"""
if not self.model_paths:
raise RuntimeError(
"No paths to trained models are provided to make predictions.")
rels = a_data[0]
# normalize input relations
self._preprocess_rels(rels)
# predict sense
imodel = isense = None
# allocate space for predictions
self.wbench = np.zeros((len(rels), len(self.cls2idx)))
# iterate over each trained model and sum up their predictions
for ipath in self.model_paths:
print("ipath = {:s}".format(ipath).encode(ENCODING),
file=sys.stderr)
with open(ipath, "rb") as ifile:
imodel = load(ifile)
imodel.batch_predict(rels, a_data, self.wbench)
del imodel
imodel = None
gc.collect()
# make final judgements
idx = -1
isense = None
for i, irel in enumerate(rels):
idx = int(np.argmax(self.wbench[i]))
isense = self.idx2cls[idx]
irel[SENSE].append(SHORT2FULL.get(isense, isense))
# free memory occupied by workbench
del self.wbench
self.wbench = None
gc.collect()
# postprocess input relations
self._postprocess_rels(rels)
def _predict(self, a_rel, a_data):
"""Determine sense of discourse relation.
Args:
a_rel (dict):
JSON instance representing discourse relation
a_data (list):
2-tuple(dict, dict): input rels and parses
Returns:
tuple(str, float): predicted label and its probability
"""
# the best performing strategy so far is to return the highest mean
# judgment
x = self._prejudge(a_rel, a_data)
x_mean = np.mean(x, axis=0)
idx = np.argmax(x_mean)
lbl = self.idx2cls[int(idx)]
return (lbl, x_mean[idx])
# earlier we were using a pre-trained judge tensor
# idx, iprob = self.judge.predict(a_rel, self._prejudge(a_rel, a_data))
# lbl = self.idx2cls[int(idx)]
# return (lbl, iprob)
def _prejudge(self, a_rel, a_data):
"""Collect judgments of single classifiers.
Args:
a_rel (dict):
discourse relation whose sense should be predicted
a_data (2-tuple(dict, dict)):
list of input JSON data
Returns:
np.array: modified ``a_ret``
"""
if self.wbench is None:
self.wbench = np.zeros((len(self.models), len(self.cls2idx)))
else:
self.wbench *= 0
for i, imodel in enumerate(self.models):
imodel.predict(a_rel, a_data, self.wbench, i)
return self.wbench
def _prune_data(self, a_rels, a_parses):
"""Remove unnecessary information from data.
Args:
a_rels (list):
list of input discourse relations
a_parses (dict):
parse trees
Returns:
2-tuple(list, dict):
abridged input data
"""
arg = None
# clean-up relations
for irel in a_rels:
irel.pop("ID")
irel[CONNECTIVE].pop(CHAR_SPAN)
arg = irel[ARG1]
arg.pop(CHAR_SPAN)
arg.pop(RAW_TEXT)
arg = irel[ARG2]
arg.pop(CHAR_SPAN)
arg.pop(RAW_TEXT)
# clean-up parses
w_attrs = None
for isentences in a_parses.itervalues():
for isent in isentences[SENTENCES]:
isent.pop(PARSE_TREE)
isent.pop(DEPS)
for iword in isent[WORDS]:
iword[-1].clear()
return (a_rels, a_parses)
def _preprocess_rels(self, a_rels):
"""Preprocess input relations.
Args:
a_rels (list):
input relations to be preprocessed
Returns:
(void):
modifies ``a_rels`` in place
"""
arg1 = arg2 = None
for irel in a_rels:
arg1 = irel[ARG1]
arg1.pop(CHAR_SPAN, None)
arg1.pop(RAW_TEXT, None)
arg2 = irel[ARG2]
arg2.pop(CHAR_SPAN, None)
arg2.pop(RAW_TEXT, None)
if len(irel[CONNECTIVE][TOK_LIST]) == 0:
irel[CONNECTIVE][RAW_TEXT] = ""
if not SENSE in irel:
irel[SENSE] = []
if not TYPE in irel or not irel[TYPE]:
irel[TYPE] = self._get_type(irel)
def _postprocess_rels(self, a_rels):
"""Postprocess input relations.
Args:
a_rels (list):
input relations to be preprocessed
Returns:
(void):
modifies ``a_rels`` in place
"""
arg1 = arg2 = None
for irel in a_rels:
arg1 = irel[ARG1]
arg2 = irel[ARG2]
irel[CONNECTIVE].pop(CHAR_SPAN, None)
if irel[TYPE] != EXPLICIT:
irel[CONNECTIVE].pop(RAW_TEXT, None)
arg1[TOK_LIST] = self._normalize_tok_list(arg1[TOK_LIST])
arg2[TOK_LIST] = self._normalize_tok_list(arg2[TOK_LIST])
irel[CONNECTIVE][TOK_LIST] = self._normalize_tok_list(
irel[CONNECTIVE][TOK_LIST])
def _sense2idx(self, a_rels):
"""Convert symbolic senses to vectors.
Args:
a_rels (list):
list of discourse relations
Returns:
void:
Note:
updates ``a_rels`` in place
"""
n_senses = len(self.cls2idx)
isense = isenses = vsense = None
for irel in a_rels:
isenses = irel[SENSE]
vsense = np.zeros(n_senses)
for isense in isenses:
isense = SHORT2FULL.get(isense, isense)
vsense[self.cls2idx[isense]] = 1
irel[SENSE] = vsense / sum(vsense)
def _get_type(self, a_rel):
"""Determine type of discourse relation.
Args:
a_rel (dict):
relation in question
Returns:
(void)
"""
conn = a_rel[CONNECTIVE]
conn_txt = conn.get(RAW_TEXT, None)
if conn_txt is not None:
if not conn.get(TOK_LIST, None):
rel = IMPLICIT
elif self._normalize_conn(conn_txt) in self.econn:
rel = EXPLICIT
else:
rel = ALT_LEX
else:
rel = IMPLICIT
return rel
def _normalize_tok_list(self, a_tok_list):
"""Flatten token list, only leaving doc offsets.
Args:
a_tok_list (list(list(int)):
relation in question
Returns:
(void)
"""
return [el[TOK_OFFS_IDX] if isinstance(el, Iterable) else el
for el in a_tok_list]
def _normalize_conn(self, a_conn):
"""Normalize connective form.
Args:
a_conn (str):
connectve to be normalized
Returns:
(void)
"""
return a_conn.strip().lower()
def _dump(self, a_obj, a_path=None):
"""Dump this model to disc at the given path.
Args:
a_obj (object):
object being dumped
a_path (str or None):
path to file in which to store the model
Returns:
(void)
"""
if a_path is None:
return
# check directory
dirname = os.path.dirname(a_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
elif not os.path.exists(a_path):
if not os.access(dirname, os.W_OK) or \
not os.path.isdir(dirname):
raise RuntimeError("Cannot write to directory '{:s}'.".format(
dirname))
else:
if not os.access(a_path, os.W_OK):
raise RuntimeError("Cannot write to file '{:s}'.".format(
a_path))
with open(a_path, "wb") as ofile:
dump(a_obj, ofile)
def _load(self, a_path):
"""Load serialized model from disc.
Args:
a_path (str):
path to file from which to load the model
Returns:
(void)
"""
# load paths to serialized models
with open(a_path, "rb") as ifile:
self._move(load(ifile))
bfname = os.path.dirname(a_path)
# normalize paths to serialized models
self.model_paths = [os.path.join(bfname, ipath)
for ipath in self.model_paths]
def _move(self, a_senser):
"""Load serialized model from disc.
Args:
a_senser (dsenser.DiscourseSenser):
source DiscourseSenser model
Returns:
(void)
"""
self._free()
self._copy(a_senser)
def _copy(self, a_senser):
"""Load serialized model from disc.
Args:
a_senser (dsenser.DiscourseSenser):
source DiscourseSenser model
Returns:
(void)
"""
self.wbench = a_senser.wbench
self.models = a_senser.models
self.model_paths = a_senser.model_paths
self.judge = a_senser.judge
self.cls2idx = a_senser.cls2idx
self.idx2cls = a_senser.idx2cls
self.econn = a_senser.econn
def _free(self):
"""Free allocated resources.
Args:
(void):
Returns:
(void)
"""
for imodel in self.models:
imodel._free()
del self.models[:]
del self.judge
del self.cls2idx
del self.idx2cls
del self.econn
|
|
"""
Sparse matrix functions
"""
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
__all__ = ['expm', 'inv']
import numpy as np
import scipy.special
from scipy._lib._util import float_factorial
from scipy.linalg._basic import solve, solve_triangular
from scipy.sparse._base import isspmatrix
from scipy.sparse.linalg import spsolve
from scipy.sparse._sputils import is_pydata_spmatrix
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg._interface import LinearOperator
from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm
UPPER_TRIANGULAR = 'upper_triangular'
def inv(A):
"""
Compute the inverse of a sparse matrix
Parameters
----------
A : (M,M) ndarray or sparse matrix
square matrix to be inverted
Returns
-------
Ainv : (M,M) ndarray or sparse matrix
inverse of `A`
Notes
-----
This computes the sparse inverse of `A`. If the inverse of `A` is expected
to be non-sparse, it will likely be faster to convert `A` to dense and use
scipy.linalg.inv.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import inv
>>> A = csc_matrix([[1., 0.], [1., 2.]])
>>> Ainv = inv(A)
>>> Ainv
<2x2 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Column format>
>>> A.dot(Ainv)
<2x2 sparse matrix of type '<class 'numpy.float64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> A.dot(Ainv).toarray()
array([[ 1., 0.],
[ 0., 1.]])
.. versionadded:: 0.12.0
"""
#check input
if not (scipy.sparse.isspmatrix(A) or is_pydata_spmatrix(A)):
raise TypeError('Input must be a sparse matrix')
I = _ident_like(A)
Ainv = spsolve(A, I)
return Ainv
def _onenorm_matrix_power_nnm(A, p):
"""
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse matrix
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse matrix).
v = np.ones((A.shape[0], 1), dtype=float)
M = A.T
for i in range(p):
v = M.dot(v)
return np.max(v)
def _is_upper_triangular(A):
# This function could possibly be of wider interest.
if isspmatrix(A):
lower_part = scipy.sparse.tril(A, -1)
# Check structural upper triangularity,
# then coincidental upper triangularity if needed.
return lower_part.nnz == 0 or lower_part.count_nonzero() == 0
elif is_pydata_spmatrix(A):
import sparse
lower_part = sparse.tril(A, -1)
return lower_part.nnz == 0
else:
return not np.tril(A, -1).any()
def _smart_matrix_product(A, B, alpha=None, structure=None):
"""
A matrix product that knows about sparse and structured matrices.
Parameters
----------
A : 2d ndarray
First matrix.
B : 2d ndarray
Second matrix.
alpha : float
The matrix product will be scaled by this constant.
structure : str, optional
A string describing the structure of both matrices `A` and `B`.
Only `upper_triangular` is currently supported.
Returns
-------
M : 2d ndarray
Matrix product of A and B.
"""
if len(A.shape) != 2:
raise ValueError('expected A to be a rectangular matrix')
if len(B.shape) != 2:
raise ValueError('expected B to be a rectangular matrix')
f = None
if structure == UPPER_TRIANGULAR:
if (not isspmatrix(A) and not isspmatrix(B)
and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)):
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
if f is not None:
if alpha is None:
alpha = 1.
out = f(alpha, A, B)
else:
if alpha is None:
out = A.dot(B)
else:
out = alpha * A.dot(B)
return out
class MatrixPowerOperator(LinearOperator):
def __init__(self, A, p, structure=None):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self._structure = structure
self.dtype = A.dtype
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def _rmatvec(self, x):
A_T = self._A.T
x = x.ravel()
for i in range(self._p):
x = A_T.dot(x)
return x
def _matmat(self, X):
for i in range(self._p):
X = _smart_matrix_product(self._A, X, structure=self._structure)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
class ProductOperator(LinearOperator):
"""
For now, this is limited to products of multiple square matrices.
"""
def __init__(self, *args, **kwargs):
self._structure = kwargs.get('structure', None)
for A in args:
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(
'For now, the ProductOperator implementation is '
'limited to the product of multiple square matrices.')
if args:
n = args[0].shape[0]
for A in args:
for d in A.shape:
if d != n:
raise ValueError(
'The square matrices of the ProductOperator '
'must all have the same shape.')
self.shape = (n, n)
self.ndim = len(self.shape)
self.dtype = np.find_common_type([x.dtype for x in args], [])
self._operator_sequence = args
def _matvec(self, x):
for A in reversed(self._operator_sequence):
x = A.dot(x)
return x
def _rmatvec(self, x):
x = x.ravel()
for A in self._operator_sequence:
x = A.T.dot(x)
return x
def _matmat(self, X):
for A in reversed(self._operator_sequence):
X = _smart_matrix_product(A, X, structure=self._structure)
return X
@property
def T(self):
T_args = [A.T for A in reversed(self._operator_sequence)]
return ProductOperator(*T_args)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
MatrixPowerOperator(A, p, structure=structure))
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
class _ExpmPadeHelper:
"""
Help lazily evaluate a matrix exponential.
The idea is to not do more work than we need for high expm precision,
so we lazily compute matrix powers and store or precompute
other properties of the matrix.
"""
def __init__(self, A, structure=None, use_exact_onenorm=False):
"""
Initialize the object.
Parameters
----------
A : a dense or sparse square numpy matrix or ndarray
The matrix to be exponentiated.
structure : str, optional
A string describing the structure of matrix `A`.
Only `upper_triangular` is currently supported.
use_exact_onenorm : bool, optional
If True then only the exact one-norm of matrix powers and products
will be used. Otherwise, the one-norm of powers and products
may initially be estimated.
"""
self.A = A
self._A2 = None
self._A4 = None
self._A6 = None
self._A8 = None
self._A10 = None
self._d4_exact = None
self._d6_exact = None
self._d8_exact = None
self._d10_exact = None
self._d4_approx = None
self._d6_approx = None
self._d8_approx = None
self._d10_approx = None
self.ident = _ident_like(A)
self.structure = structure
self.use_exact_onenorm = use_exact_onenorm
@property
def A2(self):
if self._A2 is None:
self._A2 = _smart_matrix_product(
self.A, self.A, structure=self.structure)
return self._A2
@property
def A4(self):
if self._A4 is None:
self._A4 = _smart_matrix_product(
self.A2, self.A2, structure=self.structure)
return self._A4
@property
def A6(self):
if self._A6 is None:
self._A6 = _smart_matrix_product(
self.A4, self.A2, structure=self.structure)
return self._A6
@property
def A8(self):
if self._A8 is None:
self._A8 = _smart_matrix_product(
self.A6, self.A2, structure=self.structure)
return self._A8
@property
def A10(self):
if self._A10 is None:
self._A10 = _smart_matrix_product(
self.A4, self.A6, structure=self.structure)
return self._A10
@property
def d4_tight(self):
if self._d4_exact is None:
self._d4_exact = _onenorm(self.A4)**(1/4.)
return self._d4_exact
@property
def d6_tight(self):
if self._d6_exact is None:
self._d6_exact = _onenorm(self.A6)**(1/6.)
return self._d6_exact
@property
def d8_tight(self):
if self._d8_exact is None:
self._d8_exact = _onenorm(self.A8)**(1/8.)
return self._d8_exact
@property
def d10_tight(self):
if self._d10_exact is None:
self._d10_exact = _onenorm(self.A10)**(1/10.)
return self._d10_exact
@property
def d4_loose(self):
if self.use_exact_onenorm:
return self.d4_tight
if self._d4_exact is not None:
return self._d4_exact
else:
if self._d4_approx is None:
self._d4_approx = _onenormest_matrix_power(self.A2, 2,
structure=self.structure)**(1/4.)
return self._d4_approx
@property
def d6_loose(self):
if self.use_exact_onenorm:
return self.d6_tight
if self._d6_exact is not None:
return self._d6_exact
else:
if self._d6_approx is None:
self._d6_approx = _onenormest_matrix_power(self.A2, 3,
structure=self.structure)**(1/6.)
return self._d6_approx
@property
def d8_loose(self):
if self.use_exact_onenorm:
return self.d8_tight
if self._d8_exact is not None:
return self._d8_exact
else:
if self._d8_approx is None:
self._d8_approx = _onenormest_matrix_power(self.A4, 2,
structure=self.structure)**(1/8.)
return self._d8_approx
@property
def d10_loose(self):
if self.use_exact_onenorm:
return self.d10_tight
if self._d10_exact is not None:
return self._d10_exact
else:
if self._d10_approx is None:
self._d10_approx = _onenormest_product((self.A4, self.A6),
structure=self.structure)**(1/10.)
return self._d10_approx
def pade3(self):
b = (120., 60., 12., 1.)
U = _smart_matrix_product(self.A,
b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[2]*self.A2 + b[0]*self.ident
return U, V
def pade5(self):
b = (30240., 15120., 3360., 420., 30., 1.)
U = _smart_matrix_product(self.A,
b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade7(self):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
U = _smart_matrix_product(self.A,
b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade9(self):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
U = _smart_matrix_product(self.A,
(b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
b[3]*self.A2 + b[1]*self.ident),
structure=self.structure)
V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
b[2]*self.A2 + b[0]*self.ident)
return U, V
def pade13_scaled(self, s):
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
B = self.A * 2**-s
B2 = self.A2 * 2**(-2*s)
B4 = self.A4 * 2**(-4*s)
B6 = self.A6 * 2**(-6*s)
U2 = _smart_matrix_product(B6,
b[13]*B6 + b[11]*B4 + b[9]*B2,
structure=self.structure)
U = _smart_matrix_product(B,
(U2 + b[7]*B6 + b[5]*B4 +
b[3]*B2 + b[1]*self.ident),
structure=self.structure)
V2 = _smart_matrix_product(B6,
b[12]*B6 + b[10]*B4 + b[8]*B2,
structure=self.structure)
V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
return U, V
def expm(A):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (M,M) array_like or sparse matrix
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
.. versionadded:: 0.12.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import expm
>>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
>>> A.toarray()
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]], dtype=int64)
>>> Aexp = expm(A)
>>> Aexp
<3x3 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Column format>
>>> Aexp.toarray()
array([[ 2.71828183, 0. , 0. ],
[ 0. , 7.3890561 , 0. ],
[ 0. , 0. , 20.08553692]])
"""
return _expm(A, use_exact_onenorm='auto')
def _expm(A, use_exact_onenorm):
# Core of expm, separated to allow testing exact and approximate
# algorithms.
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
if isinstance(A, (list, tuple, np.matrix)):
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# gracefully handle size-0 input,
# carefully handling sparse scenario
if A.shape == (0, 0):
out = np.zeros([0, 0], dtype=A.dtype)
if isspmatrix(A) or is_pydata_spmatrix(A):
return A.__class__(out)
return out
# Trivial case
if A.shape == (1, 1):
out = [[np.exp(A[0, 0])]]
# Avoid indiscriminate casting to ndarray to
# allow for sparse or other strange arrays
if isspmatrix(A) or is_pydata_spmatrix(A):
return A.__class__(out)
return np.array(out)
# Ensure input is of float type, to avoid integer overflows etc.
if ((isinstance(A, np.ndarray) or isspmatrix(A) or is_pydata_spmatrix(A))
and not np.issubdtype(A.dtype, np.inexact)):
A = A.astype(float)
# Detect upper triangularity.
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
if use_exact_onenorm == "auto":
# Hardcode a matrix order threshold for exact vs. estimated one-norms.
use_exact_onenorm = A.shape[0] < 200
# Track functions of A to help compute the matrix exponential.
h = _ExpmPadeHelper(
A, structure=structure, use_exact_onenorm=use_exact_onenorm)
# Try Pade order 3.
eta_1 = max(h.d4_loose, h.d6_loose)
if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
U, V = h.pade3()
return _solve_P_Q(U, V, structure=structure)
# Try Pade order 5.
eta_2 = max(h.d4_tight, h.d6_loose)
if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
U, V = h.pade5()
return _solve_P_Q(U, V, structure=structure)
# Try Pade orders 7 and 9.
eta_3 = max(h.d6_tight, h.d8_loose)
if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
U, V = h.pade7()
return _solve_P_Q(U, V, structure=structure)
if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
U, V = h.pade9()
return _solve_P_Q(U, V, structure=structure)
# Use Pade order 13.
eta_4 = max(h.d8_loose, h.d10_loose)
eta_5 = min(eta_3, eta_4)
theta_13 = 4.25
# Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13
if eta_5 == 0:
# Nilpotent special case
s = 0
else:
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
s = s + _ell(2**-s * h.A, 13)
U, V = h.pade13_scaled(s)
X = _solve_P_Q(U, V, structure=structure)
if structure == UPPER_TRIANGULAR:
# Invoke Code Fragment 2.1.
X = _fragment_2_1(X, h.A, s)
else:
# X = r_13(A)^(2^s) by repeated squaring.
for i in range(s):
X = X.dot(X)
return X
def _solve_P_Q(U, V, structure=None):
"""
A helper function for expm_2009.
Parameters
----------
U : ndarray
Pade numerator.
V : ndarray
Pade denominator.
structure : str, optional
A string describing the structure of both matrices `U` and `V`.
Only `upper_triangular` is currently supported.
Notes
-----
The `structure` argument is inspired by similar args
for theano and cvxopt functions.
"""
P = U + V
Q = -U + V
if isspmatrix(U) or is_pydata_spmatrix(U):
return spsolve(Q, P)
elif structure is None:
return solve(Q, P)
elif structure == UPPER_TRIANGULAR:
return solve_triangular(Q, P)
else:
raise ValueError('unsupported matrix structure: ' + str(structure))
def _exp_sinch(a, x):
"""
Stably evaluate exp(a)*sinh(x)/x
Notes
-----
The strategy of falling back to a sixth order Taylor expansion
was suggested by the Spallation Neutron Source docs
which was found on the internet by google search.
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
The details of the cutoff point and the Horner-like evaluation
was picked without reference to anything in particular.
Note that sinch is not currently implemented in scipy.special,
whereas the "engineer's" definition of sinc is implemented.
The implementation of sinc involves a scaling factor of pi
that distinguishes it from the "mathematician's" version of sinc.
"""
# If x is small then use sixth order Taylor expansion.
# How small is small? I am using the point where the relative error
# of the approximation is less than 1e-14.
# If x is large then directly evaluate sinh(x) / x.
if abs(x) < 0.0135:
x2 = x*x
return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.))))
else:
return (np.exp(a + x) - np.exp(a - x)) / (2*x)
def _eq_10_42(lam_1, lam_2, t_12):
"""
Equation (10.42) of Functions of Matrices: Theory and Computation.
Notes
-----
This is a helper function for _fragment_2_1 of expm_2009.
Equation (10.42) is on page 251 in the section on Schur algorithms.
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
expm([[lam_1, t_12], [0, lam_1])
=
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
[0, exp(lam_2)]
"""
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
# apparently suffers from cancellation, according to Higham's textbook.
# A nice implementation of sinch, defined as sinh(x)/x,
# will apparently work around the cancellation.
a = 0.5 * (lam_1 + lam_2)
b = 0.5 * (lam_1 - lam_2)
return t_12 * _exp_sinch(a, b)
def _fragment_2_1(X, T, s):
"""
A helper function for expm_2009.
Notes
-----
The argument X is modified in-place, but this modification is not the same
as the returned value of the function.
This function also takes pains to do things in ways that are compatible
with sparse matrices, for example by avoiding fancy indexing
and by using methods of the matrices whenever possible instead of
using functions of the numpy or scipy libraries themselves.
"""
# Form X = r_m(2^-s T)
# Replace diag(X) by exp(2^-s diag(T)).
n = X.shape[0]
diag_T = np.ravel(T.diagonal().copy())
# Replace diag(X) by exp(2^-s diag(T)).
scale = 2 ** -s
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
for i in range(s-1, -1, -1):
X = X.dot(X)
# Replace diag(X) by exp(2^-i diag(T)).
scale = 2 ** -i
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
# Replace (first) superdiagonal of X by explicit formula
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
# the author's 2008 textbook
# Functions of Matrices: Theory and Computation.
for k in range(n-1):
lam_1 = scale * diag_T[k]
lam_2 = scale * diag_T[k+1]
t_12 = scale * T[k, k+1]
value = _eq_10_42(lam_1, lam_2, t_12)
X[k, k+1] = value
# Return the updated X matrix.
return X
def _ell(A, m):
"""
A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2m_m = scipy.special.comb(2*m, m, exact=True)
abs_c_recip = float(choose_2m_m) * float_factorial(2*m + 1)
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53
# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1)
# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0)
|
|
import numpy as np
from random import randint, choice as random_choice
class Board(object):
"""
Represents the board of 2048 game. Internally it is a Numpy 2D array (matrix).
"""
POSSIBLE_MOVES = ["up", "down", "left", "right"]
def __init__(self, width, height, max_random_value=4):
"""
:param width: the board width
:param height: the board height
:param max_random_value: which maximal value can have new tile added after each round or at the game start
"""
self.__tile_values = np.array([2**x for x in range(1, 21)])
if max_random_value not in self.__tile_values:
raise ValueError("'max_random_value' must be from numbers of powering 2")
self.__max_random_value = max_random_value
self.__random_tile_values = self.__tile_values[self.__tile_values <= max_random_value]
self.__matrix = self.__get_init_matrix(width, height)
self.__last_random_tile_index = None
self.__score = self.matrix.sum()
@property
def matrix(self):
return self.__matrix
@matrix.setter
def matrix(self, value):
self.__matrix = value
@property
def shape(self):
"""
:return: Tuple of the gaming board shape, i.e. the Numpy 2D array dimensions -> (rows, columns)
"""
return self.matrix.shape
@property
def last_random_tile_index(self):
"""
:return: last inserted random tile index tuple: (row, column)
"""
return self.__last_random_tile_index
@property
def score(self):
return self.__score
def __get_init_matrix(self, width, height):
"""
Creates the gaming matrix with defined shape and two random initial tiles with given maximal value.
:return: initialized gaming matrix
"""
matrix = np.zeros(shape=(height, width), dtype=np.int32)
max_index_height = height - 1
max_index_width = width - 1
first_random_tile = (randint(0, max_index_height), randint(0, max_index_width))
second_random_tile = (randint(0, max_index_height), randint(0, max_index_width))
while second_random_tile == first_random_tile:
second_random_tile = (randint(0, max_index_height), randint(0, max_index_width))
matrix[first_random_tile] = np.random.choice(self.__random_tile_values)
matrix[second_random_tile] = np.random.choice(self.__random_tile_values)
return matrix
def __move_line(self, array):
"""
Moves and merges the tiles on one line of the gaming board.
1) Count how many zero tiles are there.
2) Extract the non-zero tiles in the reverse order.
3) Merge them, count arising zeros.
4) Reverse them to original order.
5) Add zeros beforem them.
example:
1) split to the zero and non-zero tiles: [2 4 4 4 0 0 2 0 0 0 2] = [0 0 0 0 0] + [2 4 4 4 2 2]
2) reverse the non-zero tiles array: [2 4 4 4 2 2] -> [2 2 4 4 4 2]
3) merge the non-zero tiles and count the arising zeros: [2 2 4 4 4 2] -> [4 8 4 2] + [0 0]
4) reverse to the original order: [4 8 4 2] -> [2 4 8 4]
5) combine with the original and 'merge' zeros: [0 0 0 0 0] + [0 0] + [2 4 8 4] = [0 0 0 0 0 0 0 2 4 8 4]
result: [2 4 4 4 0 0 2 0 0 0 2] -> [0 0 0 0 0 0 0 2 4 8 4]
:param array: the numpy 1D array to be moved and merged
>>> list(__move_line(np.array([2, 4, 4, 4, 0, 0, 2, 0, 0, 0, 2])))
[0, 0, 0, 0, 0, 0, 0, 2, 4, 8, 4]
>>> list(__move_line(np.array([2, 0, 0, 8, 0, 0, 4, 0, 0, 0, 2])))
[0, 0, 0, 0, 0, 0, 0, 2, 8, 4, 2]
"""
zeros_count = len(array[array == 0])
new_array = np.flipud(array[array > 0])
merge_array = []
i = 0
merge_zeros_count = 0
while i != new_array.shape[0]:
if i+1 == new_array.shape[0]:
merge_array.append(new_array[i])
elif new_array[i] == new_array[i+1]:
merge_array.append(2 * new_array[i])
i += 1
merge_zeros_count += 1
else:
merge_array.append(new_array[i])
i += 1
merge_array = np.flipud(merge_array)
zeros = (zeros_count + merge_zeros_count) * [0]
zeros.extend(merge_array)
return np.array(zeros)
def move(self, direction):
"""
Moves the tiles to defined direction.
It slices the matrix to rows or lines and send them ordered in the movement
direction to the __move_line function.
example:
matrix = [
[2, 2, 4, 2, 8],
[16, 32, 2, 8, 8],
[32, 2, 2, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 2, 2, 0]
]
'up' slices are columns from bottom to up: [0, 0, 32, 16, 2], [0, 0, 2, 32, 2] etc.
'right': slices are rows from left to right: [2, 2, 4, 2, 8], [16, 32, 2, 8, 8] etc.
:param direction: direction to move on: 'up', 'down', 'left', 'right'
:return: True if move is possible, False otherwise.
"""
original_matrix = np.copy(self.matrix)
if direction == "up":
lines_cols = [np.flipud(self.matrix[:, i]) for i in range(self.shape[1])]
for i, line in enumerate(lines_cols):
self.matrix[:, i] = np.flipud(self.__move_line(line))
elif direction == "down":
lines_cols = [self.matrix[:, i] for i in range(self.shape[1])]
for i, line in enumerate(lines_cols):
self.matrix[:, i] = self.__move_line(line)
elif direction == "right":
lines_rows = [self.matrix[i, :] for i in range(self.shape[0])]
for i, line in enumerate(lines_rows):
self.matrix[i, :] = self.__move_line(line)
elif direction == "left":
lines_rows = [np.flipud(self.matrix[i, :]) for i in range(self.shape[0])]
for i, line in enumerate(lines_rows):
self.matrix[i, :] = np.flipud(self.__move_line(line))
else:
raise ValueError("Unknown direction to move. Possible directions are 'up', 'down', 'left', 'right'")
self.__score = self.matrix.sum()
if np.array_equal(original_matrix, self.matrix):
return False
else:
return True
def insert_random_tile(self):
"""
Inserts the random tile.
:return: True if random tile was added, False otherwise (= the board is full).
"""
zero_indexes = np.where(self.matrix == 0)
if len(zero_indexes[0]):
random_zero_index = random_choice(list(zip(zero_indexes[0], zero_indexes[1])))
self.matrix[random_zero_index] = np.random.choice(self.__random_tile_values)
self.__last_random_tile_index = random_zero_index
self.__score = self.matrix.sum()
return True
else:
return False
def move_insert(self, direction):
"""
Combines the move() and insert_random_tile() functions.
:param direction: direction to move on: 'up', 'down', 'left', 'right'
:return: (True, True) if moved and inserted, (True, False) if moved and not inserted and (False, False) when not moved.
"""
if self.move(direction):
inserted = self.insert_random_tile()
if inserted:
return (True, True)
else:
return (True, False)
else:
return (False, False)
def is_full(self):
"""
Checks if the board contains zero tiles.
:return: True if board doesn't contain zero tiles, False otherwise.
"""
return not bool(len(np.where(self.matrix == 0)[0]))
def check_gameover(self):
"""
Checks if there are possible moves and if not the game is over.
:return: True if game is over, False otherwise.
"""
original_matrix = np.copy(self.matrix)
if self.is_full():
move_results = []
for move in self.POSSIBLE_MOVES:
move_results.append(self.move(move))
self.matrix = original_matrix
return not any(move_results)
else:
return False
|
|
"""The tests for the Template Binary sensor platform."""
from datetime import timedelta
import logging
from unittest.mock import patch
from homeassistant import setup
from homeassistant.components import binary_sensor
from homeassistant.const import (
ATTR_DEVICE_CLASS,
EVENT_HOMEASSISTANT_START,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import CoreState
import homeassistant.util.dt as dt_util
from tests.common import assert_setup_component, async_fire_time_changed
async def test_setup(hass):
"""Test the setup."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ foo }}",
"device_class": "motion",
}
},
}
}
with assert_setup_component(1):
assert await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
async def test_setup_no_sensors(hass):
"""Test setup with no sensors."""
with assert_setup_component(0):
assert await setup.async_setup_component(
hass, binary_sensor.DOMAIN, {"binary_sensor": {"platform": "template"}}
)
async def test_setup_invalid_device(hass):
"""Test the setup with invalid devices."""
with assert_setup_component(0):
assert await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{"binary_sensor": {"platform": "template", "sensors": {"foo bar": {}}}},
)
async def test_setup_invalid_device_class(hass):
"""Test setup with invalid sensor class."""
with assert_setup_component(0):
assert await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"value_template": "{{ foo }}",
"device_class": "foobarnotreal",
}
},
}
},
)
async def test_setup_invalid_missing_template(hass):
"""Test setup with invalid and missing template."""
with assert_setup_component(0):
assert await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {"test": {"device_class": "motion"}},
}
},
)
async def test_icon_template(hass):
"""Test icon template."""
with assert_setup_component(1):
assert await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"icon_template": "{% if "
"states.binary_sensor.test_state.state == "
"'Works' %}"
"mdi:check"
"{% endif %}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("icon") == ""
hass.states.async_set("binary_sensor.test_state", "Works")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["icon"] == "mdi:check"
async def test_entity_picture_template(hass):
"""Test entity_picture template."""
with assert_setup_component(1):
assert await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"entity_picture_template": "{% if "
"states.binary_sensor.test_state.state == "
"'Works' %}"
"/local/sensor.png"
"{% endif %}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("entity_picture") == ""
hass.states.async_set("binary_sensor.test_state", "Works")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["entity_picture"] == "/local/sensor.png"
async def test_attribute_templates(hass):
"""Test attribute_templates template."""
with assert_setup_component(1):
assert await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"attribute_templates": {
"test_attribute": "It {{ states.sensor.test_state.state }}."
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("test_attribute") == "It ."
hass.states.async_set("sensor.test_state", "Works2")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_state", "Works")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["test_attribute"] == "It Works."
async def test_match_all(hass):
"""Test template that is rerendered on any state lifecycle."""
with patch(
"homeassistant.components.template.binary_sensor."
"BinarySensorTemplate._update_state"
) as _update_state:
with assert_setup_component(1):
assert await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {
"match_all_template_sensor": {
"value_template": (
"{% for state in states %}"
"{% if state.entity_id == 'sensor.humidity' %}"
"{{ state.entity_id }}={{ state.state }}"
"{% endif %}"
"{% endfor %}"
),
},
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
init_calls = len(_update_state.mock_calls)
hass.states.async_set("sensor.any_state", "update")
await hass.async_block_till_done()
assert len(_update_state.mock_calls) == init_calls
async def test_event(hass):
"""Test the event."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
}
},
}
}
with assert_setup_component(1):
assert await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
async def test_template_delay_on(hass):
"""Test binary sensor template delay on."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_on": 5,
}
},
}
}
await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
# check with time changes
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
async def test_template_delay_off(hass):
"""Test binary sensor template delay off."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_off": 5,
}
},
}
}
hass.states.async_set("sensor.test_state", "on")
await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
future = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
# check with time changes
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
future = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
async def test_template_with_templated_delay_on(hass):
"""Test binary sensor template with template delay on."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_on": '{{ ({ "seconds": 6 / 2 }) }}',
}
},
}
}
await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=3)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
# check with time changes
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=3)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
async def test_template_with_templated_delay_off(hass):
"""Test binary sensor template with template delay off."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_off": '{{ ({ "seconds": 6 / 2 }) }}',
}
},
}
}
hass.states.async_set("sensor.test_state", "on")
await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
future = dt_util.utcnow() + timedelta(seconds=3)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
# check with time changes
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
future = dt_util.utcnow() + timedelta(seconds=3)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
async def test_template_with_delay_on_based_on_input(hass):
"""Test binary sensor template with template delay on based on input number."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_on": '{{ ({ "seconds": states("input_number.delay")|int }) }}',
}
},
}
}
await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
hass.states.async_set("input_number.delay", 3)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=3)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
# set input to 4 seconds
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
hass.states.async_set("input_number.delay", 4)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=2)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
future = dt_util.utcnow() + timedelta(seconds=4)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
async def test_template_with_delay_off_based_on_input(hass):
"""Test binary sensor template with template delay off based on input number."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_off": '{{ ({ "seconds": states("input_number.delay")|int }) }}',
}
},
}
}
await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
hass.states.async_set("input_number.delay", 3)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
future = dt_util.utcnow() + timedelta(seconds=3)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
# set input to 4 seconds
hass.states.async_set("sensor.test_state", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
hass.states.async_set("input_number.delay", 4)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_state", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
future = dt_util.utcnow() + timedelta(seconds=2)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "on"
future = dt_util.utcnow() + timedelta(seconds=4)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == "off"
async def test_available_without_availability_template(hass):
"""Ensure availability is true without an availability_template."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "true",
"device_class": "motion",
"delay_off": 5,
}
},
}
}
await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state != STATE_UNAVAILABLE
assert state.attributes[ATTR_DEVICE_CLASS] == "motion"
async def test_availability_template(hass):
"""Test availability template."""
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "true",
"device_class": "motion",
"delay_off": 5,
"availability_template": "{{ is_state('sensor.test_state','on') }}",
}
},
}
}
await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("sensor.test_state", STATE_OFF)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test").state == STATE_UNAVAILABLE
hass.states.async_set("sensor.test_state", STATE_ON)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state != STATE_UNAVAILABLE
assert state.attributes[ATTR_DEVICE_CLASS] == "motion"
async def test_invalid_attribute_template(hass, caplog):
"""Test that errors are logged if rendering template fails."""
hass.states.async_set("binary_sensor.test_sensor", "true")
await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {
"invalid_template": {
"value_template": "{{ states.binary_sensor.test_sensor }}",
"attribute_templates": {
"test_attribute": "{{ states.binary_sensor.unknown.attributes.picture }}"
},
}
},
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
await hass.async_start()
await hass.async_block_till_done()
assert "test_attribute" in caplog.text
assert "TemplateError" in caplog.text
async def test_invalid_availability_template_keeps_component_available(hass, caplog):
"""Test that an invalid availability keeps the device available."""
await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {
"my_sensor": {
"value_template": "{{ states.binary_sensor.test_sensor }}",
"availability_template": "{{ x - 12 }}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.my_sensor").state != STATE_UNAVAILABLE
assert ("UndefinedError: 'x' is undefined") in caplog.text
async def test_no_update_template_match_all(hass, caplog):
"""Test that we do not update sensors that match on all."""
hass.states.async_set("binary_sensor.test_sensor", "true")
hass.state = CoreState.not_running
await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {
"all_state": {"value_template": '{{ "true" }}'},
"all_icon": {
"value_template": "{{ states.binary_sensor.test_sensor.state }}",
"icon_template": "{{ 1 + 1 }}",
},
"all_entity_picture": {
"value_template": "{{ states.binary_sensor.test_sensor.state }}",
"entity_picture_template": "{{ 1 + 1 }}",
},
"all_attribute": {
"value_template": "{{ states.binary_sensor.test_sensor.state }}",
"attribute_templates": {"test_attribute": "{{ 1 + 1 }}"},
},
},
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert hass.states.get("binary_sensor.all_state").state == "off"
assert hass.states.get("binary_sensor.all_icon").state == "off"
assert hass.states.get("binary_sensor.all_entity_picture").state == "off"
assert hass.states.get("binary_sensor.all_attribute").state == "off"
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.all_state").state == "on"
assert hass.states.get("binary_sensor.all_icon").state == "on"
assert hass.states.get("binary_sensor.all_entity_picture").state == "on"
assert hass.states.get("binary_sensor.all_attribute").state == "on"
hass.states.async_set("binary_sensor.test_sensor", "false")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.all_state").state == "on"
# Will now process because we have one valid template
assert hass.states.get("binary_sensor.all_icon").state == "off"
assert hass.states.get("binary_sensor.all_entity_picture").state == "off"
assert hass.states.get("binary_sensor.all_attribute").state == "off"
await hass.helpers.entity_component.async_update_entity("binary_sensor.all_state")
await hass.helpers.entity_component.async_update_entity("binary_sensor.all_icon")
await hass.helpers.entity_component.async_update_entity(
"binary_sensor.all_entity_picture"
)
await hass.helpers.entity_component.async_update_entity(
"binary_sensor.all_attribute"
)
assert hass.states.get("binary_sensor.all_state").state == "on"
assert hass.states.get("binary_sensor.all_icon").state == "off"
assert hass.states.get("binary_sensor.all_entity_picture").state == "off"
assert hass.states.get("binary_sensor.all_attribute").state == "off"
async def test_unique_id(hass):
"""Test unique_id option only creates one binary sensor per id."""
await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_cover_01": {
"unique_id": "not-so-unique-anymore",
"value_template": "{{ true }}",
},
"test_template_cover_02": {
"unique_id": "not-so-unique-anymore",
"value_template": "{{ false }}",
},
},
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
async def test_template_validation_error(hass, caplog):
"""Test binary sensor template delay on."""
caplog.set_level(logging.ERROR)
config = {
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "True",
"icon_template": "{{ states.sensor.test_state.state }}",
"device_class": "motion",
"delay_on": 5,
},
},
},
}
await setup.async_setup_component(hass, binary_sensor.DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.attributes.get("icon") == ""
hass.states.async_set("sensor.test_state", "mdi:check")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.attributes.get("icon") == "mdi:check"
hass.states.async_set("sensor.test_state", "invalid_icon")
await hass.async_block_till_done()
assert len(caplog.records) == 1
assert caplog.records[0].message.startswith(
"Error validating template result 'invalid_icon' from template"
)
state = hass.states.get("binary_sensor.test")
assert state.attributes.get("icon") is None
|
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import socket
from babel import localedata
import eventlet.patcher
import fixtures
import gettext
import mock
import routes
import six
import webob
from glance.api.v1 import router as router_v1
from glance.api.v2 import router as router_v2
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
from glance import i18n
from glance.openstack.common import jsonutils
from glance.tests import utils as test_utils
class RequestTest(test_utils.BaseTestCase):
def _set_expected_languages(self, all_locales=None, avail_locales=None):
if all_locales is None:
all_locales = []
# Override localedata.locale_identifiers to return some locales.
def returns_some_locales(*args, **kwargs):
return all_locales
self.stubs.Set(localedata, 'locale_identifiers', returns_some_locales)
# Override gettext.find to return other than None for some languages.
def fake_gettext_find(lang_id, *args, **kwargs):
found_ret = '/glance/%s/LC_MESSAGES/glance.mo' % lang_id
if avail_locales is None:
# All locales are available.
return found_ret
languages = kwargs['languages']
if languages[0] in avail_locales:
return found_ret
return None
self.stubs.Set(gettext, 'find', fake_gettext_find)
def test_content_range(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Range"] = 'bytes 10-99/*'
range_ = request.get_content_range()
self.assertEqual(range_.start, 10)
self.assertEqual(range_.stop, 100) # non-inclusive
self.assertIsNone(range_.length)
def test_content_range_invalid(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Range"] = 'bytes=0-99'
self.assertRaises(webob.exc.HTTPBadRequest,
request.get_content_range)
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123')
self.assertRaises(exception.InvalidContentType,
request.get_content_type, ('application/xml',))
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "text/html"
self.assertRaises(exception.InvalidContentType,
request.get_content_type, ('application/xml',))
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type(('application/json',))
self.assertEqual(result, "application/json")
def test_content_type_from_accept_xml(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept_xml_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept_json_xml_quality(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_language_accept_default(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept-Language"] = "zz-ZZ,zz;q=0.8"
result = request.best_match_language()
self.assertIsNone(result)
def test_language_accept_none(self):
request = wsgi.Request.blank('/tests/123')
result = request.best_match_language()
self.assertIsNone(result)
def test_best_match_language_expected(self):
# If Accept-Language is a supported language, best_match_language()
# returns it.
self._set_expected_languages(all_locales=['it'])
req = wsgi.Request.blank('/', headers={'Accept-Language': 'it'})
self.assertEqual('it', req.best_match_language())
def test_request_match_language_unexpected(self):
# If Accept-Language is a language we do not support,
# best_match_language() returns None.
self._set_expected_languages(all_locales=['it'])
req = wsgi.Request.blank('/', headers={'Accept-Language': 'zh'})
self.assertIsNone(req.best_match_language())
@mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match')
def test_best_match_language_unknown(self, mock_best_match):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
accepted = 'unknown-lang'
request.headers = {'Accept-Language': accepted}
mock_best_match.return_value = None
self.assertIsNone(request.best_match_language())
# If Accept-Language is missing or empty, match should be None
request.headers = {'Accept-Language': ''}
self.assertIsNone(request.best_match_language())
request.headers.pop('Accept-Language')
self.assertIsNone(request.best_match_language())
def test_http_error_response_codes(self):
sample_id, member_id, tag_val, task_id = 'abc', '123', '1', '2'
"""Makes sure v1 unallowed methods return 405"""
unallowed_methods = [
('/images', ['PUT', 'DELETE', 'HEAD', 'PATCH']),
('/images/detail', ['POST', 'PUT', 'DELETE', 'PATCH']),
('/images/%s' % sample_id, ['POST', 'PATCH']),
('/images/%s/members' % sample_id,
['POST', 'DELETE', 'HEAD', 'PATCH']),
('/images/%s/members/%s' % (sample_id, member_id),
['POST', 'HEAD', 'PATCH']),
]
api = test_utils.FakeAuthMiddleware(router_v1.API(routes.Mapper()))
for uri, methods in unallowed_methods:
for method in methods:
req = webob.Request.blank(uri)
req.method = method
res = req.get_response(api)
self.assertEqual(405, res.status_int)
"""Makes sure v2 unallowed methods return 405"""
unallowed_methods = [
('/schemas/image', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/images', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/member', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/members', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/task', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/schemas/tasks', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']),
('/images', ['PUT', 'DELETE', 'PATCH', 'HEAD']),
('/images/%s' % sample_id, ['POST', 'PUT', 'HEAD']),
('/images/%s/file' % sample_id,
['POST', 'DELETE', 'PATCH', 'HEAD']),
('/images/%s/tags/%s' % (sample_id, tag_val),
['GET', 'POST', 'PATCH', 'HEAD']),
('/images/%s/members' % sample_id,
['PUT', 'DELETE', 'PATCH', 'HEAD']),
('/images/%s/members/%s' % (sample_id, member_id),
['POST', 'PATCH', 'HEAD']),
('/tasks', ['PUT', 'DELETE', 'PATCH', 'HEAD']),
('/tasks/%s' % task_id, ['POST', 'PUT', 'PATCH', 'HEAD']),
]
api = test_utils.FakeAuthMiddleware(router_v2.API(routes.Mapper()))
for uri, methods in unallowed_methods:
for method in methods:
req = webob.Request.blank(uri)
req.method = method
res = req.get_response(api)
self.assertEqual(405, res.status_int)
"""Makes sure not implemented methods return 501"""
req = webob.Request.blank('/schemas/image')
req.method = 'NonexistentMethod'
res = req.get_response(api)
self.assertEqual(501, res.status_int)
class ResourceTest(test_utils.BaseTestCase):
def test_get_action_args(self):
env = {
'wsgiorg.routing_args': [
None,
{
'controller': None,
'format': None,
'action': 'update',
'id': 12,
},
],
}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(actual, expected)
def test_get_action_args_invalid_index(self):
env = {'wsgiorg.routing_args': []}
expected = {}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(actual, expected)
def test_get_action_args_del_controller_error(self):
actions = {'format': None,
'action': 'update',
'id': 12}
env = {'wsgiorg.routing_args': [None, actions]}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(actual, expected)
def test_get_action_args_del_format_error(self):
actions = {'action': 'update', 'id': 12}
env = {'wsgiorg.routing_args': [None, actions]}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(actual, expected)
def test_dispatch(self):
class Controller(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
actual = resource.dispatch(Controller(), 'index', 'on', pants='off')
expected = ('on', 'off')
self.assertEqual(actual, expected)
def test_dispatch_default(self):
class Controller(object):
def default(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
actual = resource.dispatch(Controller(), 'index', 'on', pants='off')
expected = ('on', 'off')
self.assertEqual(actual, expected)
def test_dispatch_no_default(self):
class Controller(object):
def show(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
self.assertRaises(AttributeError, resource.dispatch, Controller(),
'index', 'on', pants='off')
def test_call(self):
class FakeController(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(FakeController(), None, None)
def dispatch(self, obj, action, *args, **kwargs):
if isinstance(obj, wsgi.JSONRequestDeserializer):
return []
if isinstance(obj, wsgi.JSONResponseSerializer):
raise webob.exc.HTTPForbidden()
self.stubs.Set(wsgi.Resource, 'dispatch', dispatch)
request = wsgi.Request.blank('/')
response = resource.__call__(request)
self.assertIsInstance(response, webob.exc.HTTPForbidden)
self.assertEqual(response.status_code, 403)
@mock.patch.object(wsgi, 'translate_exception')
def test_resource_call_error_handle_localized(self,
mock_translate_exception):
class Controller(object):
def delete(self, req, identity):
raise webob.exc.HTTPBadRequest(explanation='Not Found')
actions = {'action': 'delete', 'identity': 12}
env = {'wsgiorg.routing_args': [None, actions]}
request = wsgi.Request.blank('/tests/123', environ=env)
message_es = 'No Encontrado'
resource = wsgi.Resource(Controller(),
wsgi.JSONRequestDeserializer(),
None)
translated_exc = webob.exc.HTTPBadRequest(message_es)
mock_translate_exception.return_value = translated_exc
e = self.assertRaises(webob.exc.HTTPBadRequest,
resource, request)
self.assertEqual(message_es, str(e))
@mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match')
@mock.patch.object(i18n, 'translate')
def test_translate_exception(self, mock_translate, mock_best_match):
mock_translate.return_value = 'No Encontrado'
mock_best_match.return_value = 'de'
req = wsgi.Request.blank('/tests/123')
req.headers["Accept-Language"] = "de"
e = webob.exc.HTTPNotFound(explanation='Not Found')
e = wsgi.translate_exception(req, e)
self.assertEqual('No Encontrado', e.explanation)
class JSONResponseSerializerTest(test_utils.BaseTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = '{"key": "value"}'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(actual, expected)
def test_to_json_with_date_format_value(self):
fixture = {"date": datetime.datetime(1, 3, 8, 2)}
expected = '{"date": "0001-03-08T02:00:00"}'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(actual, expected)
def test_to_json_with_more_deep_format(self):
fixture = {"is_public": True, "name": [{"name1": "test"}]}
expected = {"is_public": True, "name": [{"name1": "test"}]}
actual = wsgi.JSONResponseSerializer().to_json(fixture)
actual = jsonutils.loads(actual)
for k in expected:
self.assertEqual(expected[k], actual[k])
def test_to_json_with_set(self):
fixture = set(["foo"])
expected = '["foo"]'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(actual, expected)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
wsgi.JSONResponseSerializer().default(response, fixture)
self.assertEqual(response.status_int, 200)
content_types = filter(lambda h: h[0] == 'Content-Type',
response.headerlist)
self.assertEqual(len(content_types), 1)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.body, '{"key": "value"}')
class JSONRequestDeserializerTest(test_utils.BaseTestCase):
def test_has_body_no_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
request.headers.pop('Content-Length')
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_zero_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
request.headers['Content-Length'] = 0
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_has_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
self.assertIn('Content-Length', request.headers)
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_no_body_no_content_length(self):
request = wsgi.Request.blank('/')
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_from_json(self):
fixture = '{"key": "value"}'
expected = {"key": "value"}
actual = wsgi.JSONRequestDeserializer().from_json(fixture)
self.assertEqual(actual, expected)
def test_from_json_malformed(self):
fixture = 'kjasdklfjsklajf'
self.assertRaises(webob.exc.HTTPBadRequest,
wsgi.JSONRequestDeserializer().from_json, fixture)
def test_default_no_body(self):
request = wsgi.Request.blank('/')
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {}
self.assertEqual(actual, expected)
def test_default_with_body(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = '{"key": "value"}'
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(actual, expected)
def test_has_body_has_transfer_encoding(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'fake_body'
request.headers['transfer-encoding'] = 0
self.assertIn('transfer-encoding', request.headers)
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_get_bind_addr_default_value(self):
expected = ('0.0.0.0', '123456')
actual = wsgi.get_bind_addr(default_port="123456")
self.assertEqual(expected, actual)
class ServerTest(test_utils.BaseTestCase):
def test_create_pool(self):
"""Ensure the wsgi thread pool is an eventlet.greenpool.GreenPool."""
actual = wsgi.Server(threads=1).create_pool()
self.assertIsInstance(actual, eventlet.greenpool.GreenPool)
@mock.patch.object(wsgi, 'get_socket')
def test_http_keepalive(self, mock_get_socket):
fake_socket = 'fake_socket'
mock_get_socket.return_value = 'fake_socket'
self.config(http_keepalive=False)
self.config(workers=0)
server = wsgi.Server(threads=1)
# mocking eventlet.wsgi server method to check it is called with
# configured 'http_keepalive' value.
with mock.patch.object(eventlet.wsgi,
'server') as mock_server:
fake_application = "fake-application"
server.start(fake_application, 0)
server.wait()
mock_server.assert_called_once_with(fake_socket,
fake_application,
log=mock.ANY,
debug=False,
custom_pool=server.pool,
keepalive=False)
class TestHelpers(test_utils.BaseTestCase):
def test_headers_are_unicode(self):
"""
Verifies that the headers returned by conversion code are unicode.
Headers are passed via http in non-testing mode, which automatically
converts them to unicode. Verifying that the method does the
conversion proves that we aren't passing data that works in tests
but will fail in production.
"""
fixture = {'name': 'fake public image',
'is_public': True,
'size': 19,
'location': "file:///tmp/glance-tests/2",
'properties': {'distro': 'Ubuntu 10.04 LTS'}}
headers = utils.image_meta_to_http_headers(fixture)
for k, v in six.iteritems(headers):
self.assertIsInstance(v, unicode)
def test_data_passed_properly_through_headers(self):
"""
Verifies that data is the same after being passed through headers
"""
fixture = {'name': 'fake public image',
'is_public': True,
'deleted': False,
'name': None,
'size': 19,
'location': "file:///tmp/glance-tests/2",
'properties': {'distro': 'Ubuntu 10.04 LTS'}}
headers = utils.image_meta_to_http_headers(fixture)
class FakeResponse():
pass
response = FakeResponse()
response.headers = headers
result = utils.get_image_meta_from_headers(response)
for k, v in six.iteritems(fixture):
if v is not None:
self.assertEqual(v, result[k])
else:
self.assertNotIn(k, result)
class GetSocketTestCase(test_utils.BaseTestCase):
def setUp(self):
super(GetSocketTestCase, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.get_bind_addr",
lambda x: ('192.168.0.13', 1234)))
addr_info_list = [(2, 1, 6, '', ('192.168.0.13', 80)),
(2, 2, 17, '', ('192.168.0.13', 80)),
(2, 3, 0, '', ('192.168.0.13', 80))]
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.socket.getaddrinfo",
lambda *x: addr_info_list))
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.time.time",
mock.Mock(side_effect=[0, 1, 5, 10, 20, 35])))
self.useFixture(fixtures.MonkeyPatch(
"glance.common.wsgi.utils.validate_key_cert",
lambda *x: None))
wsgi.CONF.cert_file = '/etc/ssl/cert'
wsgi.CONF.key_file = '/etc/ssl/key'
wsgi.CONF.ca_file = '/etc/ssl/ca_cert'
wsgi.CONF.tcp_keepidle = 600
def test_correct_get_socket(self):
mock_socket = mock.Mock()
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.ssl.wrap_socket',
mock_socket))
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.eventlet.listen',
lambda *x, **y: None))
wsgi.get_socket(1234)
self.assertIn(mock.call().setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1), mock_socket.mock_calls)
self.assertIn(mock.call().setsockopt(
socket.SOL_SOCKET,
socket.SO_KEEPALIVE,
1), mock_socket.mock_calls)
if hasattr(socket, 'TCP_KEEPIDLE'):
self.assertIn(mock.call().setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
wsgi.CONF.tcp_keepidle), mock_socket.mock_calls)
def test_get_socket_without_all_ssl_reqs(self):
wsgi.CONF.key_file = None
self.assertRaises(RuntimeError, wsgi.get_socket, 1234)
def test_get_socket_with_bind_problems(self):
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.eventlet.listen',
mock.Mock(side_effect=(
[wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None]))))
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.ssl.wrap_socket',
lambda *x, **y: None))
self.assertRaises(RuntimeError, wsgi.get_socket, 1234)
def test_get_socket_with_unexpected_socket_errno(self):
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.eventlet.listen',
mock.Mock(side_effect=wsgi.socket.error(socket.errno.ENOMEM))))
self.useFixture(fixtures.MonkeyPatch(
'glance.common.wsgi.ssl.wrap_socket',
lambda *x, **y: None))
self.assertRaises(wsgi.socket.error, wsgi.get_socket, 1234)
|
|
#!/usr/bin/env python
import xml.etree.ElementTree as ElementTree
import os
import re
import sys
# This parses buck-out/gen/jacoco/code-coverage/coverage.xml after
# `buck test --all --code-coverage --code-coverage-format xml --no-results-cache`
# has been run.
PATH_TO_CODE_COVERAGE_XML = 'buck-out/gen/jacoco/code-coverage/coverage.xml'
# If the code coverage for the project drops below this threshold,
# fail the build. This is designed to far enough below our current
# standards (80% coverage) that this should be possible to sustain
# given the inevitable ebb and flow of the code coverage level.
# Note: our Darwin test machines don't have all the packages
# installed that we do on Linux, so the code coverage there is
# naturally lower.
CODE_COVERAGE_GOAL = {
'Linux': 78,
'Darwin': 68,
}
def is_covered_package_name(package_name):
"""We exclude third-party code."""
if not package_name.startswith('com/facebook/buck/'):
return False
return True
def is_covered_class_name(class_name, generated_class_names):
"""We exclude classes generated by immutables."""
for generated_class_name in generated_class_names:
if class_name == generated_class_name or class_name.startswith(generated_class_name + '$'):
return False
return True
def get_generated_class_names():
"""Any files in a generated directory are assumed to contain a generated class."""
package_re = re.compile(r'__\w+_gen__/(.*)$')
for root, dirs, files in os.walk('buck-out/annotation'):
classes = [os.path.splitext(f)[0] for f in files if f.endswith('.java')]
if not classes:
continue
m = package_re.search(root)
if m:
package = m.group(1)
for c in classes:
yield os.path.join(package, c)
def calculate_code_coverage():
class CoverageTree:
def __init__(self, name):
self.name = name
self.children = []
def get_name(self):
return self.name
def add_child(self, child):
self.children.append(child)
def get_number_of_children(self):
return len(self.children)
def get_covered(self, name):
return sum(map(lambda x: x.get_covered(name), self.children))
def get_missed(self, name):
return sum(map(lambda x: x.get_missed(name), self.children))
def get_percentage(self, name):
return round(
100 *
self.get_covered(name) /
float(self.get_missed(name) + self.get_covered(name)),
2)
class CoverageLeaf:
def __init__(self):
self.missed = {}
self.covered = {}
def get_covered(self, name):
return self.covered[name]
def set_covered(self, name, value):
self.covered[name] = value
def get_missed(self, name):
return self.missed[name]
def set_missed(self, name, value):
self.missed[name] = value
root = ElementTree.parse(PATH_TO_CODE_COVERAGE_XML)
# The length of the longest Java package included in the report.
# Used for display purposes.
max_package_name = 0
# Coverage types measured by Jacoco.
TYPES = set([
'BRANCH',
'CLASS',
'COMPLEXITY',
'INSTRUCTION',
'LINE',
'METHOD',
])
# List determines column display order in final report.
COLUMN_NAMES = [
'INSTRUCTION',
'LINE',
'BRANCH',
'METHOD',
'CLASS',
'LOC2FIX',
]
# Column by which rows will be sorted in the final report.
SORT_TYPE = 'INSTRUCTION'
# Keys are values from TYPES; values are integers.
total_covered_by_type = {}
total_missed_plus_covered_type = {}
for coverage_type in TYPES:
total_covered_by_type[coverage_type] = 0
total_missed_plus_covered_type[coverage_type] = 0
# Values are dicts. Will have key 'package_name' as well as all values
# from TYPES as keys. For entries from TYPES, values are the corresponding
# coverage percentage for that type.
coverage_by_package = []
# Track count of untested lines to see which packages
# have the largest amount of untested code.
missed_lines_by_package = {}
total_missed_lines = 0
generated_class_names = frozenset(get_generated_class_names())
for package_element in root.findall('.//package'):
package_name = package_element.attrib['name']
if not is_covered_package_name(package_name):
continue
max_package_name = max(max_package_name, len(package_name))
coverage = CoverageTree(package_name)
coverage_by_package.append(coverage)
for class_element in package_element.findall('./class'):
class_name = class_element.attrib['name']
if not is_covered_class_name(class_name, generated_class_names):
continue
class_leaf = CoverageLeaf()
coverage.add_child(class_leaf)
for counter in class_element.findall('./counter'):
counter_type = counter.attrib.get('type')
missed = int(counter.attrib.get('missed'))
class_leaf.set_missed(counter_type, missed)
covered = int(counter.attrib.get('covered'))
class_leaf.set_covered(counter_type, covered)
total_covered_by_type[counter_type] += covered
total_missed_plus_covered_type[counter_type] += missed + covered
if counter_type == 'LINE':
missed_lines_by_package[package_name] = missed
total_missed_lines += missed
def pair_compare(p1, p2):
# High percentage should be listed first.
diff1 = cmp(p2.get_percentage(SORT_TYPE), p1.get_percentage(SORT_TYPE))
if diff1:
return diff1
# Ties are broken by lexicographic comparison.
return cmp(p1.get_name(), p2.get_name)
def label_with_padding(label):
return label + ' ' * (max_package_name - len(label)) + ' '
def column_format_str(column_name):
if column_name == 'LOC2FIX':
return '%(' + column_name + ')8d'
else:
return '%(' + column_name + ')7.2f%%'
def print_separator(sep_len):
print '-' * sep_len
def get_color_for_percentage(percentage):
# \033[92m is OKGREEN.
# \033[93m is WARNING.
platform = os.uname()[0]
return '\033[92m' if percentage >= CODE_COVERAGE_GOAL[platform] else '\033[93m'
# Print header.
# Type column headers are right-justified and truncated to 7 characters.
column_names = map(lambda x: x[0:7].rjust(7), COLUMN_NAMES)
print label_with_padding('PACKAGE') + ' ' + ' '.join(column_names)
separator_len = max_package_name + 1 + len(column_names) * 8
print_separator(separator_len)
# Create the format string to use for each row.
format_string = '%(color)s%(label)s'
for column in COLUMN_NAMES:
format_string += column_format_str(column)
format_string += '\033[0m'
# Print rows sorted by line coverage then package name.
coverage_by_package = filter(lambda x: x.get_number_of_children() > 0, coverage_by_package)
coverage_by_package.sort(cmp=pair_compare)
for item in coverage_by_package:
info = {}
pkg = item.get_name()
for type_name in TYPES:
try:
info[type_name] = item.get_percentage(type_name)
except KeyError:
# It is possible to have a module of Java code with no branches.
info['BRANCH'] = 100
info['color'] = get_color_for_percentage(item.get_percentage(SORT_TYPE))
info['label'] = label_with_padding(pkg)
info['LOC2FIX'] = missed_lines_by_package[pkg]
print format_string % info
# Print aggregate numbers.
overall_percentages = {}
for coverage_type in TYPES:
numerator = total_covered_by_type[coverage_type]
denominator = total_missed_plus_covered_type[coverage_type]
if denominator > 0:
percentage = 100.0 * numerator / denominator
else:
percentage = 100.0
overall_percentages[coverage_type] = percentage
observed_percentage = overall_percentages[SORT_TYPE]
overall_percentages['color'] = get_color_for_percentage(observed_percentage)
overall_percentages['label'] = label_with_padding('TOTAL')
overall_percentages['LOC2FIX'] = total_missed_lines
print_separator(separator_len)
print format_string % overall_percentages
return observed_percentage
def main():
"""Exits with 0 or 1 depending on whether the code coverage goal is met."""
coverage = calculate_code_coverage()
platform = os.uname()[0]
if coverage < CODE_COVERAGE_GOAL[platform]:
data = {
'expected': CODE_COVERAGE_GOAL[platform],
'observed': coverage,
}
print '\033[91mFAIL: %(observed).2f%% does not meet goal of %(expected).2f%%\033[0m' % data
sys.exit(1)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script aims to demonstrate the
benefit (with respect to precision)
of the ILU implementation compared
to e.g. ignoring sub and super diagonals
completely.
Note that the python wrapper is quite inefficient and
hence not suitable for benchmarking.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
try:
from future.builtins import *
except ImportError:
pass
from math import exp
from itertools import product
import argh
import numpy as np
from scipy.linalg import lu_factor, lu_solve
from scipy.stats import norm
from fakelu import fast_FakeLU
def rms(x, axis=None):
return np.sqrt(np.mean(x**2, axis=axis))
def rnd(dim):
return 2*(np.random.random(dim) - 0.5)
def get_test_system(N, n, ndiag, main_diag_factor, off_diag_factor, seed):
np.random.seed(seed)
A = np.zeros((N*n, N*n))
blocks, sub, sup, x_blk = [], [], [], []
b = rnd(N*n)
for bi in range(N):
cur_block = rnd((n, n))
for i in range(n):
cur_block[i, i] *= main_diag_factor
blocks.append(cur_block)
slc = slice(n*bi, n*(bi+1))
A[slc, slc] = cur_block
x_blk.append(lu_solve(lu_factor(cur_block), b[slc]))
for di in range(ndiag):
sub_ = rnd((N-di-1)*n)*off_diag_factor
sup_ = rnd((N-di-1)*n)*off_diag_factor
for i in range(n*(N-di-1)):
A[(di+1)*n + i, i] = sub_[i]
A[i, (di+1)*n + i] = sup_[i]
sub.append(sub_)
sup.append(sup_)
fLU = fast_FakeLU(A, n, ndiag)
x_ref = lu_solve(lu_factor(A), b)
x_ilu = fLU.solve(b)
return A, b, x_ref, x_ilu, x_blk
def main(N=32, n=32, ndiag=1, main_diag_factor=1.0, off_diag_factor=1.0,
base_seed=0, seed_range=1, fact_pow2_min=4, fact_pow2_max=18,
plot=False, npows=0, scan_ndiag=False, savefig='None'):
"""
Ax = b
"""
npows = npows or fact_pow2_max - fact_pow2_min
factors = np.linspace(fact_pow2_min, fact_pow2_max, npows)
ilu_rmsd, blk_rmsd = [], []
superiority = []
if scan_ndiag:
if seed_range != 1:
raise ValueError("Cannot plot mulitple seeds and scan ndiag")
ndiag_range = range(1, ndiag+1)
else:
ndiag_range = [ndiag]
combos = product(ndiag_range, range(seed_range))
#for seed in range(seed_range):
#seed = base_seed
#for ndiag in ndiag_range:
nseries = 0
for ndiag, seed in combos:
nseries += 1
ilu_rmsd_local, blk_rmsd_local = [], []
for diag_fact_pow in factors:
A, b, x_ref, x_ilu, x_blk = get_test_system(
N, n, ndiag,
main_diag_factor*2**diag_fact_pow,
off_diag_factor/2**diag_fact_pow,
seed+base_seed)
ilu_err = x_ilu - x_ref
blk_err = np.array(x_blk).flatten() - x_ref
ilu_rmsd_local.append(rms(ilu_err))
blk_rmsd_local.append(rms(blk_err))
if plot and seed_range == 1 and not scan_ndiag:
import matplotlib.pyplot as plt
if npows == 1:
for idx in (1, 2):
plt.subplot(3, 1, idx)
plt.plot(ilu_err, label='ILU error')
for idx in (1, 3):
plt.subplot(3, 1, idx)
plt.plot(blk_err, label='block error')
for idx in (1, 2, 3):
plt.subplot(3, 1, idx)
plt.legend()
plt.show()
else:
plt.semilogy(ilu_rmsd, label="ILU RMSD")
plt.semilogy(blk_rmsd, label="Block RMSD")
plt.legend()
plt.show()
ilu_rmsd.append(np.array(ilu_rmsd_local))
blk_rmsd.append(np.array(blk_rmsd_local))
superiority.append(np.array(blk_rmsd_local) / np.array(ilu_rmsd_local))
if np.any(superiority[-1] < 1e-3):
print('1000 x inferior:', seed)
if plot and (seed_range > 1 or scan_ndiag):
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 14))
if scan_ndiag:
plot_kwargs = {}
else:
decay = exp(-((seed_range-1)/50.0))
plot_kwargs = dict(alpha=1.0 - 0.9*(1-decay), linewidth=0.2 + 0.8*decay)
ax = plt.subplot(3, 1, 1)
ax.set_xscale('log', basex=10)
ax.set_yscale('log', basey=10)
clr = lambda idx, rgb: [1.0 - (nseries-idx)/float(nseries) if clridx==rgb else 0.0 for clridx in range(3)]
for si, series in enumerate(ilu_rmsd):
if scan_ndiag:
c = clr(si, 2) # blue
lbl = str(ndiag_range[si])
else:
c = 'b'
lbl = None
plt.plot(2**factors, series, color=c, label=lbl, **plot_kwargs)
plt.title("ILU")
plt.xlabel("weight")
plt.ylabel("RMSD")
if scan_ndiag:
plt.legend(loc='best')
ax = plt.subplot(3, 1, 2)
ax.set_xscale('log', basex=10)
ax.set_yscale('log', basey=10)
for si, series in enumerate(blk_rmsd):
if scan_ndiag:
c = clr(si, 1) # green
lbl = str(ndiag_range[si])
else:
c = 'g'
lbl = None
plt.plot(2**factors, series, color=c, label=lbl, **plot_kwargs)
plt.title("Block RMSD")
plt.xlabel("weight")
plt.ylabel("RMSD")
if scan_ndiag:
plt.legend(loc='best')
ax = plt.subplot(3, 1, 3)
ax.set_xscale('log', basex=10)
ax.set_yscale('log', basey=10)
for si, series in enumerate(superiority):
if scan_ndiag:
c = clr(si, 0) # red
lbl = str(ndiag_range[si])
else:
c = 'k'
lbl = None
plt.plot(2**factors, series, color=c, label=lbl, **plot_kwargs)
plt.title("BLOCK RMSD / ILU RMSD")
plt.xlabel("weight")
plt.ylabel("RMSD fraction")
if scan_ndiag:
plt.legend(loc='best')
plt.tight_layout()
if savefig == 'None':
plt.show()
else:
plt.savefig(savefig, dpi=300)
if __name__ == '__main__':
argh.dispatch_command(main)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
# pylint: disable=protected-access
_is_sequence = rnn_cell._is_sequence
_unpacked_state = rnn_cell._unpacked_state
_packed_state = rnn_cell._packed_state
# pylint: enable=protected-access
def rnn(cell, inputs, initial_state=None, dtype=None,
sequence_length=None, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time t for batch row b,
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size].
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a tensor of appropriate type and shape `[batch_size x cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state. Required if
initial_state is not provided.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
- outputs is a length T list of outputs (one for each input)
- state is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the input depth
(column size) cannot be inferred from inputs via shape inference.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Temporarily avoid EmbeddingWrapper and seq2seq badness
# TODO(lukaszkaiser): remove EmbeddingWrapper
if inputs[0].get_shape().ndims != 1:
(fixed_batch_size, input_size) = inputs[0].get_shape().with_rank(2)
if input_size.value is None:
raise ValueError(
"Input size (second dimension of inputs[0]) must be accessible via "
"shape inference, but saw value None.")
else:
fixed_batch_size = inputs[0].get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(inputs[0])[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None: # Prepare variables
sequence_length = math_ops.to_int32(sequence_length)
zero_output = array_ops.zeros(
array_ops.pack([batch_size, cell.output_size]), inputs[0].dtype)
zero_output.set_shape(
tensor_shape.TensorShape([fixed_batch_size.value, cell.output_size]))
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0: vs.get_variable_scope().reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
(output, state) = _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def state_saving_rnn(cell, inputs, state_saver, state_name,
sequence_length=None, scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of `RNNCell`.
inputs: A length T list of inputs, each a tensor of shape
`[batch_size, input_size]`.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
state_saver. If the cell returns tuples of states (i.e.,
`cell.state_size` is a tuple) then `state_name` should be a tuple of
strings having the same length as `cell.state_size`. Otherwise it should
be a single string.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the arity and
type of `state_name` does not match that of `cell.state_size`.
"""
state_size = cell.state_size
state_is_tuple = _is_sequence(state_size)
state_name_tuple = _is_sequence(state_name)
if state_is_tuple != state_name_tuple:
raise ValueError(
"state_name should be the same type as cell.state_size. "
"state_name: %s, cell.state_size: %s"
% (str(state_name), str(state_size)))
if state_is_tuple:
state_name_flat = _unpacked_state(state_name)
state_size_flat = _unpacked_state(state_size)
if len(state_name_flat) != len(state_size_flat):
raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d"
% (len(state_name_flat), len(state_size_flat)))
initial_state = _packed_state(
structure=state_name,
state=[state_saver.state(n) for n in state_name_flat])
else:
initial_state = state_saver.state(state_name)
(outputs, state) = rnn(cell, inputs, initial_state=initial_state,
sequence_length=sequence_length, scope=scope)
if state_is_tuple:
state_flat = _unpacked_state(state)
save_state = [
state_saver.save_state(n, s)
for (n, s) in zip(state_name_flat, state_flat)]
else:
save_state = [state_saver.save_state(state_name, state)]
with ops.control_dependencies(save_state):
outputs[-1] = array_ops.identity(outputs[-1])
return (outputs, state)
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape [batch_size, output_size]
new_state is a `Tensor` matrix of shape [batch_size, state_size]
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for dynamic_rnn, where the input tensor
matches max_sequence_length, and using conditionals just slows
everything down.
Returns:
A tuple of (final_output, final_state) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
state_is_tuple = _is_sequence(state)
orig_state = state
# Convert state to a list for ease of use
state = list(_unpacked_state(state)) if state_is_tuple else [state]
state_shape = [s.get_shape() for s in state]
def _copy_some_through(new_output, new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
copy_cond = (time >= sequence_length)
return ([math_ops.select(copy_cond, zero_output, new_output)]
+ [math_ops.select(copy_cond, old_s, new_s)
for (old_s, new_s) in zip(state, new_state)])
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
new_state = (
list(_unpacked_state(new_state)) if state_is_tuple else [new_state])
if len(state) != len(new_state):
raise ValueError(
"Input and output state tuple lengths do not match: %d vs. %d"
% (len(state), len(new_state)))
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: [new_output] + new_state,
# else copy some of it through
lambda: _copy_some_through(new_output, new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
new_state = (
list(_unpacked_state(new_state)) if state_is_tuple else [new_state])
if len(state) != len(new_state):
raise ValueError(
"Input and output state tuple lengths do not match: %d vs. %d"
% (len(state), len(new_state)))
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: [zero_output] + list(state)
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
(final_output, final_state) = (
final_output_and_state[0], final_output_and_state[1:])
final_output.set_shape(zero_output.get_shape())
for final_state_i, state_shape_i in zip(final_state, state_shape):
final_state_i.set_shape(state_shape_i)
if state_is_tuple:
return (
final_output,
_packed_state(structure=orig_state, state=final_state))
else:
return (final_output, final_state[0])
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
input_shape = tensor_shape.matrix(None, None)
for input_ in input_seq:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.pack(input_seq)
# TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
if lengths is not None:
lengths = math_ops.to_int64(lengths)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unpack(s_reversed)
for r in result:
r.set_shape(input_shape)
return result
def bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size x cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_state_fw is the final state of the forward rnn.
output_state_bw is the final state of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, rnn_cell.RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell.RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
name = scope or "BiRNN"
# Forward direction
with vs.variable_scope(name + "_FW") as fw_scope:
output_fw, output_state_fw = rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length, scope=fw_scope)
# Backward direction
with vs.variable_scope(name + "_BW") as bw_scope:
tmp, output_state_bw = rnn(cell_bw, _reverse_seq(inputs, sequence_length),
initial_state_bw, dtype, sequence_length, scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [array_ops.concat(1, [fw, bw])
for fw, bw in zip(output_fw, output_bw)]
return (outputs, output_state_fw, output_state_bw)
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`. Instead,
it is a single `Tensor` where the maximum time is either the first or second
dimension (see the parameter `time_major`). The corresponding output is
a single `Tensor` having the same number of time steps and batch size.
The parameter `sequence_length` is required and dynamic calculation is
automatically performed.
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, input_size]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a tensor of appropriate type and shape `[batch_size x cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state. Required if
initial_state is not provided.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
state: The final state. If `cell.state_size` is a `Tensor`, this
will be shaped `[batch_size, cell.state_size]`. If it is a tuple,
this be a tuple with shapes `[batch_size, s] for s in cell.state_size`.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
if not time_major:
inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,D) => (T,B,D)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = array_ops.shape(inputs)
batch_size = input_shape[1]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.pack(shape)
return logging_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
(outputs, final_state) = _dynamic_rnn_loop(
cell, inputs, state, parallel_iterations=parallel_iterations,
swap_memory=swap_memory, sequence_length=sequence_length)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
outputs = array_ops.transpose(outputs, [1, 0, 2]) # (T,B,D) => (B,T,D)
return (outputs, final_state)
def _dynamic_rnn_loop(
cell, inputs, initial_state, parallel_iterations, swap_memory,
sequence_length=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size].
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`.
final_state:
A `Tensor` matrix, or tuple of such matrices, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
# Construct an initial output
input_shape = array_ops.shape(inputs)
(time_steps, batch_size, _) = array_ops.unpack(input_shape, 3)
inputs_got_shape = inputs.get_shape().with_rank(3)
(const_time_steps, const_batch_size, const_depth) = inputs_got_shape.as_list()
if const_depth is None:
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference, "
"but saw value None.")
# Prepare dynamic conditional copying of state & output
zero_output = array_ops.zeros(
array_ops.pack([batch_size, cell.output_size]), inputs.dtype)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
state_size = cell.state_size
state_is_tuple = _is_sequence(state_size)
state = _unpacked_state(state) if state_is_tuple else (state,)
with ops.op_scope([], "dynamic_rnn") as scope:
base_name = scope
output_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps,
tensor_array_name=base_name + "output")
input_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps,
tensor_array_name=base_name + "input")
input_ta = input_ta.unpack(inputs)
def _time_step(time, output_ta_t, *state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: `TensorArray`, the output with existing flow.
*state: List of vector tensors.
Returns:
The tuple (time + 1, output_ta_t with updated flow) + new_state.
"""
input_t = input_ta.read(time)
# Restore some shape information
input_t.set_shape([const_batch_size, const_depth])
# Pack state back up for use by cell
state = (_packed_state(structure=state_size, state=state)
if state_is_tuple else state[0])
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
new_state = _unpacked_state(new_state) if state_is_tuple else (new_state,)
output_ta_t = output_ta_t.write(time, output)
return (time + 1, output_ta_t) + new_state
final_loop_vars = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta) + state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(output_final_ta, final_state) = (final_loop_vars[1], final_loop_vars[2:])
final_outputs = output_final_ta.pack()
# Restore some shape information
final_outputs.set_shape([
const_time_steps, const_batch_size, cell.output_size])
# Unpack final state if not using state tuples.
final_state = (
_unpacked_state(final_state) if state_is_tuple else final_state[0])
return (final_outputs, final_state)
|
|
"""
:copyright: (c) 2014 Building Energy Inc
:license: see LICENSE for more details.
"""
import json
from django.core.urlresolvers import reverse_lazy, reverse
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.auth.tokens import default_token_generator
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.core import mail
from superperms.orgs.models import (
ROLE_OWNER, Organization, OrganizationUser
)
from seed.utils.organizations import create_organization
#Custom user model compatibility
User = get_user_model()
class AdminViewsTest(TestCase):
def setUp(self):
admin_user_details = {'username': 'admin@testserver',
'email': 'admin@testserver',
'password': 'admin_passS1'}
self.admin_user = User.objects.create_superuser(**admin_user_details)
self.client.login(**admin_user_details)
user_details = {'username': 'testuser@testserver',
'email': 'testuser@testserver',
'password': 'user_passS1'}
self.user = User.objects.create_user(**user_details)
self.add_org_url = reverse_lazy('accounts:add_org')
self.add_user_url = reverse_lazy('accounts:add_user')
def _post_json(self, url, data):
"""
Handles posting a python object as json to a given url.
"""
data_json = json.dumps(data)
res = self.client.post(url,
data_json,
content_type='application/json')
res.body = json.loads(res.content)
return res
def _is_org_owner(self, user, org):
"""
Tests whether a given user is the owner of an org.
Handles traversing the somewhat ugly org object relationships.
"""
return OrganizationUser.objects.filter(
organization=org, user=user, role_level=ROLE_OWNER
).exists()
def test_add_org(self):
"""
Happy path test for creating a new org.
"""
data = {'user_id': self.admin_user.pk,
'organization_name': 'New Org'}
res = self._post_json(self.add_org_url, data)
self.assertEqual(res.body['status'], 'success')
self.assertEqual(Organization.objects.count(), 1)
new_org = Organization.objects.first()
self.assertTrue(self._is_org_owner(self.admin_user, new_org))
self.assertEqual(new_org.name, data['organization_name'])
def test_add_org_dupe(self):
"""
Trying to create an org with a dupe name fails.
"""
create_organization(user=self.admin_user, org_name='Orgname')
data = {
'user_id': self.user.pk,
'organization_name': 'Orgname'
}
res = self._post_json(self.add_org_url, data)
self.assertEqual(res.body['status'], 'error')
self.assertEqual(Organization.objects.count(), 1)
#and most importantly, the admin/owner of the org didn't change
org = Organization.objects.first()
self.assertTrue(self._is_org_owner(self.admin_user, org))
def test_add_user_existing_org(self):
"""
Test creating a new user, adding them to an existing org
in the process.
"""
org, org_user, _user_created = create_organization(
self.admin_user, name='Existing Org'
)
data = {
'organization_id': org.pk,
'first_name': 'New',
'last_name': 'User',
'email': 'new_user@testserver',
'role_level': 'ROLE_MEMBER'
}
res = self._post_json(self.add_user_url, data)
self.assertEqual(res.body['status'], 'success')
user = User.objects.get(username=data['email'])
self.assertEqual(user.email, data['email'])
#the user should be a member of the existing org
self.assertTrue(user in org.users.all())
# Since this is the only user, it's automatically the owner.
self.assertTrue(self._is_org_owner(self.admin_user, org))
self.assertEqual(Organization.objects.count(), 1)
def test_add_user_new_org(self):
"""
Create a new user and a new org at the same time.
"""
data = {'org_name': 'New Org',
'first_name': 'New',
'last_name': 'Owner',
'email': 'new_owner@testserver'}
res = self._post_json(self.add_user_url, data)
self.assertEqual(res.body['status'], 'success')
user = User.objects.get(username=data['email'])
self.assertEqual(user.email, data['email'])
#new user should be member, admin and owner of new org
org = Organization.objects.get(name='New Org')
self.assertTrue(user in org.users.all())
self.assertTrue(self._is_org_owner(user, org))
def test_add_user_no_org(self):
"""
Shouldn't be able to create a new user without either
selecting or creating an org at the same time.
"""
data = {'first_name': 'New',
'last_name': 'User',
'email': 'bad_user@testserver'}
res = self._post_json(self.add_user_url, data)
self.assertEqual(res.body['status'], 'error')
self.assertFalse(User.objects.filter(username=data['email']).exists())
self.assertFalse(User.objects.filter(email=data['email']).exists())
def test_signup_process(self):
"""
Simulates the entire new user signup process, from initial
account creation by an admin to recieving the signup email
to confirming the account and setting a password.
"""
data = {'first_name': 'New',
'last_name': 'User',
'email': 'new_user@testserver',
'org_name': 'New Org'}
res = self._post_json(self.add_user_url, data)
self.client.logout() # stop being the admin user
self.assertEqual(res.body['status'], 'success') # to help debug fails
user = User.objects.get(email=data['email'])
#user's password doesn't work yet
self.assertFalse(user.has_usable_password())
token = default_token_generator.make_token(user)
signup_url = reverse("landing:signup", kwargs={
'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),
"token": token
})
#make sure we sent an email to the right address
#and it contains the signup url
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertTrue(signup_url in msg.body)
self.assertTrue(data['email'] in msg.to)
#actually go to that url to make sure it works
res = self.client.get(signup_url)
self.assertEqual(res.status_code, 200)
#post the new password
password_post = {'new_password1': 'newpassS2',
'new_password2': 'newpassS2'}
res = self.client.post(signup_url, data=password_post)
#reload the user
user = User.objects.get(pk=user.pk)
#user is now has a working password
self.assertTrue(user.has_usable_password())
def test_signup_process_force_lowercase_email(self):
"""
Simulates the signup and login forcing login username to lowercase
"""
data = {'first_name': 'New',
'last_name': 'User',
'email': '[email protected]',
'org_name': 'New Org'}
res = self._post_json(self.add_user_url, data)
self.client.logout() # stop being the admin user
self.assertEqual(res.body['status'], 'success') # to help debug fails
user = User.objects.get(email=data['email'])
#user's password doesn't work yet
self.assertFalse(user.has_usable_password())
token = default_token_generator.make_token(user)
signup_url = reverse("landing:signup", kwargs={
'uidb64': urlsafe_base64_encode(force_bytes(user.pk)),
"token": token
})
#make sure we sent an email to the right address
#and it contains the signup url
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertTrue(signup_url in msg.body)
self.assertTrue(data['email'] in msg.to)
#actually go to that url to make sure it works
res = self.client.get(signup_url)
self.assertEqual(res.status_code, 200)
#post the new password
password_post = {'new_password1': 'newpassS3',
'new_password2': 'newpassS3'}
res = self.client.post(signup_url, data=password_post)
#reload the user
user = User.objects.get(pk=user.pk)
#user is now has a working password and lowercase username
self.assertTrue(user.has_usable_password())
self.assertEqual(user.email, data['email'])
self.assertEqual(user.username, data['email'].lower())
# test that login works
resp = self.client.post(
reverse('landing:login'),
{'email': data['email'], 'password': 'newpassS3'}
)
# good logins will have 302 and no content
user = User.objects.get(pk=user.pk)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.content, '')
|
|
# Utility functions for performing image inference
#
# Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee. Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users. These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item. Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
import os
import sys
import time
import tensorrt as trt
import tensorflow as tf
from PIL import Image
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import utils.engine as engine_utils # TRT Engine creation/save/load utils
import utils.model as model_utils # UFF conversion uttils
# ../../common.py
sys.path.insert(1,
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir
)
)
import common
# TensorRT logger singleton
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
class TRTInference(object):
"""Manages TensorRT objects for model inference."""
def __init__(self, trt_engine_path, uff_model_path, trt_engine_datatype=trt.DataType.FLOAT, batch_size=1):
"""Initializes TensorRT objects needed for model inference.
Args:
trt_engine_path (str): path where TensorRT engine should be stored
uff_model_path (str): path of .uff model
trt_engine_datatype (trt.DataType):
requested precision of TensorRT engine used for inference
batch_size (int): batch size for which engine
should be optimized for
"""
# We first load all custom plugins shipped with TensorRT,
# some of them will be needed during inference
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
# Initialize runtime needed for loading TensorRT engine from file
self.trt_runtime = trt.Runtime(TRT_LOGGER)
# TRT engine placeholder
self.trt_engine = None
# Display requested engine settings to stdout
print("TensorRT inference engine settings:")
print(" * Inference precision - {}".format(trt_engine_datatype))
print(" * Max batch size - {}\n".format(batch_size))
# If engine is not cached, we need to build it
if not os.path.exists(trt_engine_path):
# This function uses supplied .uff file
# alongside with UffParser to build TensorRT
# engine. For more details, check implmentation
self.trt_engine = engine_utils.build_engine(
uff_model_path, TRT_LOGGER,
trt_engine_datatype=trt_engine_datatype,
batch_size=batch_size)
# Save the engine to file
engine_utils.save_engine(self.trt_engine, trt_engine_path)
# If we get here, the file with engine exists, so we can load it
if not self.trt_engine:
print("Loading cached TensorRT engine from {}".format(
trt_engine_path))
self.trt_engine = engine_utils.load_engine(
self.trt_runtime, trt_engine_path)
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = \
engine_utils.allocate_buffers(self.trt_engine)
# Execution context is needed for inference
self.context = self.trt_engine.create_execution_context()
# Allocate memory for multiple usage [e.g. multiple batch inference]
input_volume = trt.volume(model_utils.ModelData.INPUT_SHAPE)
self.numpy_array = np.zeros((self.trt_engine.max_batch_size, input_volume))
def infer(self, image_path):
"""Infers model on given image.
Args:
image_path (str): image to run object detection model on
"""
# Load image into CPU
img = self._load_img(image_path)
# Copy it into appropriate place into memory
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, img.ravel())
# When infering on single image, we measure inference
# time to output it to the user
inference_start_time = time.time()
# Fetch output from the model
[detection_out, keepCount_out] = common.do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream)
# Output inference time
print("TensorRT inference time: {} ms".format(
int(round((time.time() - inference_start_time) * 1000))))
# And return results
return detection_out, keepCount_out
def infer_batch(self, image_paths):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.trt_engine.max_batch_size
actual_batch_size = len(image_paths)
if actual_batch_size > max_batch_size:
raise ValueError(
"image_paths list bigger ({}) than engine max batch size ({})".format(actual_batch_size, max_batch_size))
# Load all images to CPU...
imgs = self._load_imgs(image_paths)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, imgs.ravel())
# ...fetch model outputs...
[detection_out, keep_count_out] = common.do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size)
# ...and return results.
return detection_out, keep_count_out
def _load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image).reshape(
(im_height, im_width, model_utils.ModelData.get_input_channels())
).astype(np.uint8)
def _load_imgs(self, image_paths):
batch_size = self.trt_engine.max_batch_size
for idx, image_path in enumerate(image_paths):
img_np = self._load_img(image_path)
self.numpy_array[idx] = img_np
return self.numpy_array
def _load_img(self, image_path):
image = Image.open(image_path)
model_input_width = model_utils.ModelData.get_input_width()
model_input_height = model_utils.ModelData.get_input_height()
# Note: Bilinear interpolation used by Pillow is a little bit
# different than the one used by Tensorflow, so if network receives
# an image that is not 300x300, the network output may differ
# from the one output by Tensorflow
image_resized = image.resize(
size=(model_input_width, model_input_height),
resample=Image.BILINEAR
)
img_np = self._load_image_into_numpy_array(image_resized)
# HWC -> CHW
img_np = img_np.transpose((2, 0, 1))
# Normalize to [-1.0, 1.0] interval (expected by model)
img_np = (2.0 / 255.0) * img_np - 1.0
img_np = img_np.ravel()
return img_np
# This class is similar as TRTInference inference, but it manages Tensorflow
class TensorflowInference(object):
def __init__(self, pb_model_path):
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph)
def infer(self, image_path):
img_np = self._load_img(image_path)
return self._run_tensorflow_graph(np.expand_dims(img_np, axis=0))
def infer_batch(self, image_paths):
img_np = self._load_imgs(image_paths)
return self._run_tensorflow_graph(img_np)
def _run_tensorflow_graph(self, image_input):
ops = self.detection_graph.get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes',
'detection_scores', 'detection_classes'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = self.detection_graph.get_tensor_by_name(
tensor_name)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
output_dict = self.sess.run(tensor_dict,
feed_dict={image_tensor: image_input})
# All outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = output_dict['num_detections'].astype(np.int32)
output_dict['detection_classes'] = output_dict[
'detection_classes'].astype(np.uint8)
return output_dict
def _load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image).reshape(
(im_height, im_width, model_utils.ModelData.get_input_channels())
).astype(np.uint8)
def _load_imgs(self, image_paths):
numpy_array = np.zeros((len(image_paths),) + (300, 300, 3))
for idx, image_path in enumerate(image_paths):
img_np = self._load_img(image_path)
numpy_array[idx] = img_np
return numpy_array
def _load_img(self, image_path):
img = Image.open(image_path)
img_np = self._load_image_into_numpy_array(img)
return img_np
|
|
# -*- coding: utf-8 -*-
import datetime, time
from boto.exception import BotoServerError
from django.core.exceptions import PermissionDenied
from django.http import Http404
from apps.canvas_auth.models import User, AnonymousUser
from canvas.browse import (get_browse_tiles, get_user_stickered, TileDetails, LastReplyTileDetails,
get_front_comments, Navigation, get_user_data)
from canvas.management.commands import send_24h_email
from canvas.models import (Comment, Visibility, Category, DisabledCategory, FollowCategory, CommentSticker,
YouTubeContent, ExternalContent, send_email,
WelcomeEmailRecipient, flagged_comments, Content)
from canvas.details_models import CommentDetails
from canvas.tests.tests_helpers import (create_comment, create_content, create_group, create_user, CanvasTestCase,
FakeRequest, create_staff, create_rich_user)
from canvas import api, stickers, mocks, util
from canvas.notifications.email_channel import EmailChannel
from configuration import Config
from services import Services, override_service, FakeTimeProvider, FakeMetrics, FakeExperimentPlacer
class TestCommentStickers(CanvasTestCase):
def test_sticker_from_user(self):
user = create_user()
comment = create_comment()
self.assertFalse(CommentSticker.get_sticker_from_user(comment.id, user))
self.api_post('/api/sticker/comment', {'type_id': '1', 'comment_id': comment.id}, user=user)
self.assertTrue(CommentSticker.get_sticker_from_user(comment.id, user))
class TestCategory(CanvasTestCase):
def test_everything_url(self):
self.assertEquals(Category.ALL.details()['url'], '/x/everything')
def test_following_url(self):
self.assertEquals(Category.MY.details()['url'], '/x/following')
def test_nonspecial_url(self):
group = create_group(name="jailbait")
self.assertEquals(group.details()['url'], '/x/jailbait')
def test_top_curation(self):
group = create_group()
for x in range(3):
user = create_user()
FollowCategory(user=user, category=group).save()
group.details.force()
self.assertTrue(group.name in [top['name'] for top in Category.get_top_details()],
repr(Category.get_top_details()))
group.visibility = Visibility.CURATED
group.save()
self.assertFalse(group.name in [top['name'] for top in Category.get_top_details()])
def test_disabled_group(self):
group = create_group()
self.assertFalse(isinstance(Category.get(group.name), DisabledCategory))
group.visibility = Visibility.DISABLED
group.save()
self.assertTrue(isinstance(Category.get(group.name), DisabledCategory))
group.visibility = Visibility.CURATED
group.save()
self.assertFalse(isinstance(Category.get(group.name), DisabledCategory))
def test_whitelisted_categories(self):
group = create_group(name=Config['featured_groups'][0])
group2 = create_group(name='foobar')
self.assertTrue(group.id in Category.get_whitelisted())
self.assertFalse(group2.id in Category.get_whitelisted())
class TestUserData(CanvasTestCase):
def test_user_stickered_deleted_comment(self):
cmt = self.post_comment(reply_content=create_content().id)
# Now another user sticks it.
user = create_user()
result = self.api_post('/api/sticker/comment', {'type_id': '1', 'comment_id': cmt.id}, user=user)
# Then the author deletes it.
self.api_post('/api/comment/delete', {'comment_id': cmt.id}, user=cmt.author)
stickered = get_user_stickered(user)
self.assertEqual(len(stickered), 0)
def test_user_data_with_invalid_type_404s(self):
user = create_user()
nav_data = {'userpage_type': 'stickered', 'user': create_user().username}
nav = Navigation.load_json_or_404(nav_data)
nav.userpage_type = 'INVALID_TYPE'
self.assertRaises(Http404, lambda: get_user_data(user, nav))
class TestCommentDetails(CanvasTestCase):
def after_setUp(self):
# Create a bunch of comments
for i in range(1, 10):
create_comment(author=create_user())
self.comments = Comment.all_objects.all()
def test_from_queryset_with_pins(self):
self.assertTrue(self.comments)
tiles = TileDetails.from_queryset_with_pins(self.comments)
self.assertTrue(tiles)
for tile in tiles:
self.assertNotEqual(tile.pins, None)
self.assertIsInstance(tile.comment, CommentDetails)
def test_from_queryset_with_viewer_stickers(self):
user = create_user()
def tiles():
return TileDetails.from_queryset_with_viewer_stickers(user, self.comments)
for tile in tiles():
self.assertEqual(tile.viewer_sticker, None)
self.api_post('/api/sticker/comment', {'type_id': '1', 'comment_id': tile.comment.id},
user=user)
for tile in tiles():
self.assertEqual(tile.viewer_sticker.type_id, 1)
def test_properties_dont_get_serialized(self):
# CommentDetails should only serialize its dict contents, not any of its member properties.
cmt = create_comment().details()
cmt.test_foo_property = 1
d = util.loads(util.dumps(cmt))
self.assertFalse('test_foo_property' in d)
def test_empty_reply_content(self):
cmt = create_comment().details()
self.assertEqual(cmt.reply_content, {})
class TestCommentDetailsStickers(CanvasTestCase):
def _make_stickers(self, sticks=['smiley', 'banana', 'frowny', 'frowny'], top='banana', per=2):
self.top_id = stickers.get(top).type_id
self.stickers = map(stickers.get, sticks)
self.cmt = self.post_comment(reply_content=create_content().id)
from canvas import economy
for sticker in self.stickers:
for _ in xrange(per):
user = create_rich_user()
if sticker.cost:
user.kv.stickers.add_limited_sticker(sticker)
economy.purchase_stickers(user, sticker.type_id, 1)
#user.redis.user_kv.hset('sticker:%s:count' % STORE_ITEM, 1)
self.api_post('/api/sticker/comment', {
'type_id': sticker.type_id,
'comment_id': self.cmt.id,
}, user=user)
def test_sorted_sticker_counts(self):
self._make_stickers()
counts = self.cmt.details().sorted_sticker_counts()
self.assertEqual(counts[0]['type_id'], self.top_id)
self.assertEqual(counts[0]['count'], 2)
def test_top_sticker(self):
self._make_stickers()
top_stick = self.cmt.details().top_sticker()
self.assertFalse(top_stick is None)
self.assertEqual(top_stick['type_id'], self.top_id)
def test_smiley_vs_frowny(self):
self._make_stickers()
counts = self.cmt.details().sorted_sticker_counts()
self.assertEqual(counts[2]['type_id'], stickers.get('smiley').type_id)
self.assertEqual(counts[1]['type_id'], stickers.get('frowny').type_id)
def test_num1(self):
self._make_stickers(sticks=['cool', 'smiley'], top='cool', per=1)
top_stick = self.cmt.details().top_sticker()
self.assertEqual(top_stick['type_id'], self.top_id)
class TestComment(CanvasTestCase):
def test_get_deep_replies(self):
op = create_comment()
def reply(to):
return create_comment(parent_comment=op, replied_comment=to)
r1 = reply(op)
r2 = reply(op)
r3 = reply(op)
r4 = reply(r3)
r5 = reply(r4)
r6 = reply(r4)
r7 = reply(op)
self.assertEqual(len(r3.get_deep_replies()), 3)
r8 = reply(r3)
self.assertEqual(len(r3.get_deep_replies()), 4)
def test_update_score(self):
user = create_user()
comment = create_comment(author=user)
for sticker in stickers.primary_types:
user = create_user()
user.kv.stickers.currency.increment(100)
# Sticker the comment a bunch.
request = FakeRequest(user)
api._sticker_comment(request, comment, sticker.type_id)
# Update score
comment.update_score()
def test_make_post_anonymous_by_author(self):
usr = create_user()
cmt = create_comment(author=usr, anonymous=True)
self.assertTrue(cmt.anonymous)
cmt.make_non_anonymous(usr)
self.assertFalse(cmt.anonymous)
def test_make_post_anonymous_by_other(self):
author = create_user()
bad_user = create_user()
cmt = create_comment(author=author, anonymous=True)
with self.assertRaises(PermissionDenied):
cmt.make_non_anonymous(bad_user)
def test_details_replies_no_replies(self):
cmt = create_comment(timestamp=123)
d = cmt.details()
self.assertEqual(d.reply_count, 0)
self.assertEqual(d.last_reply_id, None)
# This should be the timestamp of the OP in this case.
self.assertEqual(d.last_reply_time, 123)
def test_details_replies_one_reply(self):
with override_service('time', FakeTimeProvider):
cmt = create_comment()
Services.time.step()
child = create_comment(parent_comment=cmt)
d = cmt.details()
self.assertEqual(d.reply_count, 1)
self.assertEqual(d.last_reply_id, child.id)
self.assertAlmostEqual(d.last_reply_time, child.timestamp, places=4)
def test_details_replies_two_replies(self):
cmt = create_comment()
first = create_comment(parent_comment=cmt, timestamp=1)
second = create_comment(parent_comment=cmt, timestamp=2)
d = cmt.details()
self.assertEqual(d.reply_count, 2)
self.assertEqual(d.last_reply_id, second.id)
self.assertEqual(d.last_reply_time, second.timestamp)
def test_details_disabled_parent_url(self):
cmt = self.post_comment(reply_content=create_content().id)
reply = self.post_comment(parent_comment=cmt.id, reply_content=create_content().id)
self.assertNotEqual(cmt.details().url, None)
self.assertEqual(reply.details().parent_url, cmt.details().url)
cmt.moderate_and_save(Visibility.UNPUBLISHED, cmt.author)
self.assertEqual(reply.details.force().parent_url, None)
def test_details_replies_two_replies_last_curated(self):
# The last reply should include curated replies to prevent "stuck" active/pinned curated OPs auto-curating
# their replies.
cmt = create_comment()
first = create_comment(parent_comment=cmt, timestamp=1)
second = create_comment(parent_comment=cmt, timestamp=2)
second.moderate_and_save(Visibility.CURATED, second.author)
d = cmt.details()
self.assertEqual(d.reply_count, 2)
self.assertEqual(d.last_reply_id, second.id)
self.assertEqual(d.last_reply_time, second.timestamp)
def test_details_repost_zero_for_different_content(self):
content = create_content()
cmt = create_comment(reply_content=content)
self.assertEqual(cmt.details().repost_count, 0)
content2 = create_content()
cmt2 = create_comment(reply_content=content2)
self.assertEqual(cmt.details.force().repost_count, 0)
self.assertEqual(cmt2.details().repost_count, 0)
def test_details_repost_one_for_repost(self):
content = create_content()
cmt = create_comment(reply_content=content)
cmt2 = create_comment(reply_content=content)
self.assertEqual(cmt.details.force().repost_count, 0)
self.assertEqual(cmt2.details().repost_count, 1)
def test_details_repost_zero_for_text_posts(self):
# These will have the same reply_content_id (None), but we should handle that and not count them as reposts.
cmt = create_comment()
cmt2 = create_comment()
self.assertEqual(cmt.details().repost_count, 0)
self.assertEqual(cmt2.details().repost_count, 0)
def test_details_repost_zero_for_audio_remix(self):
content = create_content()
cmt = create_comment(reply_content=content)
cmt2 = create_comment(reply_content=content)
external_content = ExternalContent.from_dict(dict(
type="yt",
end_time=10.0,
start_time=0.0,
source_url="123445555"
))
external_content.parent_comment = cmt2
external_content.save()
self.assertEqual(cmt2.details().repost_count, 0)
def test_details_repost_op_isnt_curated(self):
content = create_content()
cmt = self.post_comment(reply_content=content.id)
cmt2 = self.post_comment(reply_content=content.id)
self.assertEqual(cmt.details().visibility, Visibility.PUBLIC)
self.assertEqual(cmt2.details().visibility, Visibility.PUBLIC)
def test_details_repost_reply_is_curated(self):
content = create_content()
cmt = self.post_comment(reply_content=content.id)
cmt2 = self.post_comment(reply_content=content.id, parent_comment=cmt.id)
self.assertEqual(cmt.details().visibility, Visibility.PUBLIC)
self.assertEqual(cmt2.details().visibility, Visibility.CURATED)
def test_details_reply_to_public_is_public(self):
content = create_content()
cmt = self.post_comment(reply_content=content.id)
cmt2 = self.post_comment(reply_text="bar", parent_comment=cmt.id)
self.assertEqual(cmt2.details().visibility, Visibility.PUBLIC)
def test_details_reply_to_curated_is_curated(self):
content = create_content()
cmt = self.post_comment(reply_content=content.id)
cmt.moderate_and_save(Visibility.CURATED, cmt.author)
cmt2 = self.post_comment(reply_text="bar", parent_comment=cmt.id)
self.assertEqual(cmt2.details().visibility, Visibility.CURATED)
def test_details_reply_to_hidden_is_curated(self):
content = create_content()
cmt = self.post_comment(reply_content=content.id)
cmt.moderate_and_save(Visibility.HIDDEN, cmt.author)
cmt2 = self.post_comment(reply_text="bar", parent_comment=cmt.id)
self.assertEqual(cmt2.details().visibility, Visibility.CURATED)
def test_details_reply_to_disabled_fails(self):
content = create_content()
cmt = self.post_comment(reply_content=content.id)
cmt.moderate_and_save(Visibility.DISABLED, cmt.author)
response = self.post_comment(fetch_comment=False, reply_text="bar", parent_comment=cmt.id)
self.assertFalse(response['success'])
def test_downvoted_comment(self):
cmt = self.post_comment(reply_content=create_content().id)
for _ in xrange(Comment.DOWNVOTES_REQUIRED):
self.assertFalse(cmt.is_downvoted())
self.assertFalse(cmt.is_collapsed())
self.api_post('/api/sticker/comment', {
'type_id': stickers.downvote.type_id,
'comment_id': cmt.id,
}, user=create_user())
self.assertTrue(cmt.is_downvoted())
self.assertTrue(cmt.is_collapsed())
def test_reply_to_offtopic_op_url(self):
op = self.post_offtopic_comment()
reply = self.post_comment(parent_comment=op.id, reply_text="hello")
self.assertEqual(op.get_absolute_url(), reply.get_absolute_url()[:len(op.get_absolute_url())])
self.assertNotEqual(op.get_absolute_url(), reply.get_absolute_url())
def test_reply_to_offtopic_op_parent_url(self):
op = self.post_offtopic_comment()
reply = self.post_comment(parent_comment=op.id, reply_text="hello")
self.assertEqual(reply.get_parent_url(), op.get_absolute_url())
def test_share_page_url(self):
op = self.post_offtopic_comment()
reply = self.post_comment(parent_comment=op.id, reply_text="hello")
self.assertEqual(reply.get_share_page_url()[:3], '/d/')
class TestContent(CanvasTestCase):
def after_setUp(self):
self.old_function = Content._get_details
def before_tearDown(self):
Content._get_details = self.old_function
def test_details_does_not_trigger_recursion(self):
that = self
def test_wrapper(self, **kwargs):
test_wrapper.calls_to_get_details += 1
return that.old_function(self, **kwargs)
test_wrapper.calls_to_get_details = 0
Content._get_details = test_wrapper
op = create_comment(reply_content=create_content())
reply = op
for i in range(15):
last = reply
reply = create_comment(parent_comment=op,
reply_content=create_content(remix_of=last.reply_content))
reply.details()
self.assertEqual(test_wrapper.calls_to_get_details, 4)
class TestLastReplyTileDetails(CanvasTestCase):
def test_in_reply_to(self):
op = create_comment()
reply = create_comment(parent_comment=op)
tile = LastReplyTileDetails.from_comment_id(op.id)
self.assertEqual(tile.comment.thread.op.id, op.id)
class TestCanvasUser(CanvasTestCase):
def test_unsubscribe(self):
user = create_user()
# Sanity checks
# Note that the default is the EmailChannel.
assert user.kv.subscriptions
self.assertTrue(user.kv.subscriptions.can_receive('remixed'))
# Now unsubscribe
user.kv.subscriptions.unsubscribe('remixed')
self.assertFalse(user.kv.subscriptions.can_receive('remixed'))
class TestExternalContent(CanvasTestCase):
def test_you_tube_content(self):
comment = create_comment()
content = YouTubeContent(parent_comment=comment)
content.source_url = "12345"
content.content_type = "yt"
START_TIME = 400
content.start_time = START_TIME
assert content.start_time == START_TIME
details = content.details()
assert "start_time" in details
assert details.get("start_time") == START_TIME
END_TIME = 410
content.end_time = END_TIME
assert content.end_time == END_TIME
details = content.details()
assert "end_time" in details
assert details.get("end_time") == END_TIME
loop_length = content.loop_length
assert int(loop_length) == END_TIME - START_TIME
content.save()
# Make sure we can pull the external content from the comment
assert content in comment.external_content.all()
ec = comment.external_content.all()[0]
assert ec
assert ec.proxy
print ec.proxy.to_client()
print ec.proxy.source_url
assert ec.proxy.source_url
details = comment.details()
self.assertTrue(hasattr(details, 'external_content'))
ec_details = details.external_content.pop()
self.assertTrue(ec_details)
print ec_details
self.assertTrue('source_url' in ec_details)
def test_from_dict(self):
comment = create_comment()
external_content_dict = dict(type="yt", end_time=10.0, start_time=0.0, source_url="123445555")
external_content = ExternalContent.from_dict(external_content_dict)
assert isinstance(external_content, ExternalContent)
assert isinstance(external_content, YouTubeContent)
class TestUserKV(CanvasTestCase):
def setUp(self):
CanvasTestCase.setUp(self)
self.sticker = stickers.Sticker(1234, "foobar", limited=True, maximum=10, cost=10)
stickers.add_sticker(self.sticker)
def tearDown(self):
CanvasTestCase.tearDown(self)
stickers.remove_sticker(self.sticker)
def test_sticker_kv_purchase_markers(self):
sticker = self.sticker
user = create_user()
assert user.kv.stickers.did_purchase(sticker) == False
user.kv.stickers.mark_sticker_purchased(sticker)
assert user.kv.stickers.did_purchase(sticker) == True
class TestEmail(CanvasTestCase):
def test_ses_blacklist_silently_fails(self):
def send_fail(messages):
raise BotoServerError(400, "Bad Request",
"""<ErrorResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<Error>
<Type>Sender</Type>
<Code>MessageRejected</Code>
<Message>Address blacklisted.</Message>
</Error>
<RequestId>a693e02d-00f2-11e1-9a52-ed3836840b28</RequestId>
</ErrorResponse>""")
with mocks.override_send_messages(send_fail):
send_email('[email protected]', '[email protected]', 'subjek', 'test', {})
def test_repeating_exception_bubbles_out(self):
def send_fail(messages):
raise Exception
with self.assertRaises(Exception):
with mocks.override_send_messages(send_fail):
send_email('[email protected]', '[email protected]', 'subjek', 'test', {})
class TestTileData(CanvasTestCase):
def after_setUp(self):
self.user = create_user()
self.COMMENT_COUNT = 7
self.GROUP = create_group(name=Config['featured_groups'][0])
self.TODAY = datetime.datetime(year=2011, month=2, day=3)
with override_service('time', FakeTimeProvider):
#TODO refactor into tests_helpers and consolidate w/ other tests that do this (email_channel, models)
Services.time.t = time.mktime(self.TODAY.timetuple())
self.comments = [self.post_comment(reply_content=create_content().id, category=self.GROUP.name)
for _ in xrange(self.COMMENT_COUNT - 1)]
self.comments.append(self.post_comment(reply_content=create_content().id, category=self.GROUP.name,
parent_comment=self.comments[-1].id))
Services.time.step(60*60)
for cmt in self.comments:
self.api_post('/api/sticker/comment', {'type_id': '1', 'comment_id': cmt.id}, user=create_user())
Services.time.step()
Services.time.step(60*60)
cmt.update_score()
def _tiles(self):
return get_browse_tiles(self.user, Navigation(sort='hot', offset=0, category=Category.ALL))
def test_get_browse_tiles_without_dupes(self):
tiles = get_browse_tiles(create_user(), Navigation(sort='hot', offset=0, category=Category.ALL))
self.assertTrue(tiles)
tile_ids = [tile.comment.id for tile in tiles]
self.assertEqual(sorted(tile_ids), sorted(list(set(tile_ids))))
def test_tiles_exist(self):
self.assertTrue(self._tiles())
def test_get_browse_tiles_with_hidden_comments(self):
for cmt in self.comments:
self.user.redis.hidden_comments.hide_comment(cmt)
self.assertFalse(self._tiles())
def test_get_browse_tiles_with_hidden_threads(self):
for cmt in self.comments:
self.user.redis.hidden_threads.hide_thread(cmt)
self.assertFalse(self._tiles())
def test_logged_out_best_everything_returns_enough_comments(self):
with override_service('time', FakeTimeProvider):
Services.time.t = time.mktime(self.TODAY.timetuple())
for category in [Category.ALL] + list(Category.objects.all()):
category.merge_top_scores()
cmts = get_front_comments(AnonymousUser(), Navigation(sort='best',
offset=0,
year=2011,
category=Category.ALL))
self.assertEqual(len(cmts), self.COMMENT_COUNT)
class TestWelcomeEmailRecipients(CanvasTestCase):
def test_already_received(self):
with override_service('time', FakeTimeProvider):
# Create dummy first, so count of users and count of recipients is unequal.
create_user()
Services.time.step(60*60*48)
user = create_user()
self.assertFalse(user in send_24h_email.recipients())
Services.time.step(60*60*48)
WelcomeEmailRecipient.objects.create(recipient=user)
recipients = send_24h_email.recipients()
self.assertFalse(user in recipients)
self.assertFalse(recipients)
def test_not_yet_receieved(self):
with override_service('time', FakeTimeProvider):
user = create_user()
Services.time.step(60*60*24)
recipients = send_24h_email.recipients()
self.assertTrue(user in recipients)
def test_send_email_happens_once_per_recipient(self):
with override_service('time', FakeTimeProvider):
user = create_staff()
Services.time.step(60*60*24)
(recipient,) = send_24h_email.recipients()
self.assertEqual(recipient, user)
with override_service('metrics', FakeMetrics):
def send():
for user in send_24h_email.recipients():
send_24h_email.send_welcome_email(user)
self.assertEqual(0, len(Services.metrics.email_sent.records))
send()
self.assertEqual(1, len(Services.metrics.email_sent.records), "The digest email wasn't sent.")
send()
self.assertEqual(1, len(Services.metrics.email_sent.records), "The email was sent twice.")
def test_really_old_users_dont_get_it(self):
with override_service('time', FakeTimeProvider):
user = create_user()
Services.time.step(60*60*24)
self.assertTrue(user in send_24h_email.recipients())
Services.time.step(60*60*24*30) # a month later.
self.assertFalse(user in send_24h_email.recipients())
class TestFlaggedData(CanvasTestCase):
def after_setUp(self):
self.author = create_user()
self.cmt = self.post_comment(reply_content=create_content().id, user=self.author)
self.api_post('/api/comment/flag', {'comment_id': self.cmt.id})
def test_real_username(self):
(cmt,) = flagged_comments()
self.assertEqual(self.author.username, cmt.to_client()['real_username'])
self.assertEqual(self.author.username, cmt.real_username)
|
|
import random
import datetime
from decimal import Decimal
from dateutil.relativedelta import relativedelta
import pycountry
import trytond.tests.test_tryton
from trytond.tests.test_tryton import POOL, USER, CONTEXT
from nereid.testing import NereidTestCase
from trytond.transaction import Transaction
from trytond.config import config
config.set('database', 'path', '/tmp')
class BaseTestCase(NereidTestCase):
"""
Base test Case for nereid webshop
"""
def setUp(self):
trytond.tests.test_tryton.install_module('nereid_webshop')
self.FiscalYear = POOL.get('account.fiscalyear')
self.Account = POOL.get('account.account')
self.PaymentTerm = POOL.get('account.invoice.payment_term')
self.Currency = POOL.get('currency.currency')
self.Company = POOL.get('company.company')
self.Party = POOL.get('party.party')
self.Sale = POOL.get('sale.sale')
self.Cart = POOL.get('nereid.cart')
self.Product = POOL.get('product.product')
self.ProductTemplate = POOL.get('product.template')
self.Language = POOL.get('ir.lang')
self.NereidWebsite = POOL.get('nereid.website')
self.SaleChannel = POOL.get('sale.channel')
self.Uom = POOL.get('product.uom')
self.Country = POOL.get('country.country')
self.Subdivision = POOL.get('country.subdivision')
self.Currency = POOL.get('currency.currency')
self.NereidUser = POOL.get('nereid.user')
self.User = POOL.get('res.user')
self.PriceList = POOL.get('product.price_list')
self.Location = POOL.get('stock.location')
self.Party = POOL.get('party.party')
self.Locale = POOL.get('nereid.website.locale')
self.Tax = POOL.get('account.tax')
self.Node = POOL.get('product.tree_node')
self.ArticleCategory = POOL.get('nereid.cms.article.category')
self.Article = POOL.get('nereid.cms.article')
self.Category = POOL.get('product.category')
self.StaticFolder = POOL.get('nereid.static.folder')
self.StaticFile = POOL.get('nereid.static.file')
self.SaleConfig = POOL.get('sale.configuration')
self.ProductNodeRelationship = POOL.get(
'product.product-product.tree_node'
)
self.MenuItem = POOL.get('nereid.cms.menuitem')
self.templates = {
'shopping-cart.jinja':
'Cart:{{ cart.id }},{{get_cart_size()|round|int}},'
'{{cart.sale.total_amount}}',
'product.jinja':
'{{ product.name }}',
'catalog/gift-card.html':
'{{ product.id }}',
}
def _get_account_by_kind(self, kind, company=None, silent=True):
"""Returns an account with given spec
:param kind: receivable/payable/expense/revenue
:param silent: dont raise error if account is not found
"""
if company is None:
company, = self.Company.search([], limit=1)
accounts = self.Account.search([
('kind', '=', kind),
('company', '=', company)
], limit=1)
if not accounts and not silent:
raise Exception("Account not found")
return accounts[0] if accounts else False
def _create_product_category(self, name, vlist):
"""
Creates a product category
Name is mandatory while other value may be provided as keyword
arguments
:param name: Name of the product category
:param vlist: List of dictionaries of values to create
"""
for values in vlist:
values['name'] = name
return self.Category.create(vlist)
def _create_product_template(
self, name, vlist, uri, uom=u'Unit', displayed_on_eshop=True,
list_price=Decimal('10'), cost_price=Decimal('5')
):
"""
Create a product template with products and return its ID
:param name: Name of the product
:param vlist: List of dictionaries of values to create
:param uri: uri of product template
:param uom: Note it is the name of UOM (not symbol or code)
:param displayed_on_eshop: Boolean field to display product
on shop or not
"""
_code_list = []
code = random.choice('ABCDEFGHIJK')
while code in _code_list:
code = random.choice('ABCDEFGHIJK')
else:
_code_list.append(code)
for values in vlist:
values['name'] = name
values['default_uom'], = self.Uom.search(
[('name', '=', uom)], limit=1
)
values['sale_uom'], = self.Uom.search(
[('name', '=', uom)], limit=1
)
values['products'] = [
('create', [{
'uri': uri,
'displayed_on_eshop': displayed_on_eshop,
'code': code,
'list_price': list_price,
'cost_price': cost_price,
}])
]
return self.ProductTemplate.create(vlist)[0]
def create_test_products(self):
# Create product templates with products
self._create_product_template(
'product 1',
[{
'category': self.category.id,
'type': 'goods',
'salable': True,
'account_expense': self._get_account_by_kind('expense').id,
'account_revenue': self._get_account_by_kind('revenue').id,
}],
uri='product-1',
list_price=Decimal('10'),
cost_price=Decimal('5'),
)
self._create_product_template(
'product 2',
[{
'category': self.category2.id,
'type': 'goods',
'salable': True,
'account_expense': self._get_account_by_kind('expense').id,
'account_revenue': self._get_account_by_kind('revenue').id,
}],
uri='product-2',
list_price=Decimal('20'),
cost_price=Decimal('5'),
)
self._create_product_template(
'product 3',
[{
'category': self.category3.id,
'type': 'goods',
'account_expense': self._get_account_by_kind('expense').id,
'account_revenue': self._get_account_by_kind('revenue').id,
}],
uri='product-3',
list_price=Decimal('30'),
cost_price=Decimal('5'),
)
self._create_product_template(
'product 4',
[{
'category': self.category3.id,
'type': 'goods',
'account_expense': self._get_account_by_kind('expense').id,
'account_revenue': self._get_account_by_kind('revenue').id,
}],
uri='product-4',
displayed_on_eshop=False,
list_price=Decimal('30'),
cost_price=Decimal('5'),
)
def _create_auth_net_gateway_for_site(self, method='credit_card'):
"""
A helper function that creates the authorize.net gateway and assigns
it to the websites.
"""
PaymentGateway = POOL.get('payment_gateway.gateway')
NereidWebsite = POOL.get('nereid.website')
Journal = POOL.get('account.journal')
cash_journal, = Journal.search([
('name', '=', 'Cash')
])
self.account_cash, = self.Account.search([
('kind', '=', 'other'),
('name', '=', 'Main Cash'),
('company', '=', self.company.id)
])
cash_journal.debit_account = self.account_cash
cash_journal.credit_account = self.account_cash
cash_journal.save()
with Transaction().set_context({'use_dummy': True}):
gatway = PaymentGateway(
name='Authorize.net',
journal=cash_journal,
provider='dummy',
method=method,
authorize_net_login='327deWY74422',
authorize_net_transaction_key='32jF65cTxja88ZA2',
)
gatway.save()
websites = NereidWebsite.search([])
NereidWebsite.write(websites, {
'accept_credit_card': True,
'save_payment_profile': True,
'credit_card_gateway': gatway.id,
})
def _create_fiscal_year(self, date=None, company=None):
"""
Creates a fiscal year and requried sequences
"""
Sequence = POOL.get('ir.sequence')
SequenceStrict = POOL.get('ir.sequence.strict')
if date is None:
date = datetime.date.today()
if company is None:
company, = self.Company.search([], limit=1)
invoice_sequence, = SequenceStrict.create([{
'name': '%s' % date.year,
'code': 'account.invoice',
'company': company,
}])
fiscal_year, = self.FiscalYear.create([{
'name': '%s' % date.year,
'start_date': date + relativedelta(month=1, day=1),
'end_date': date + relativedelta(month=12, day=31),
'company': company,
'post_move_sequence': Sequence.create([{
'name': '%s' % date.year,
'code': 'account.move',
'company': company,
}])[0],
'out_invoice_sequence': invoice_sequence,
'in_invoice_sequence': invoice_sequence,
'out_credit_note_sequence': invoice_sequence,
'in_credit_note_sequence': invoice_sequence,
}])
self.FiscalYear.create_period([fiscal_year])
return fiscal_year
def _create_coa_minimal(self, company):
"""Create a minimal chart of accounts
"""
AccountTemplate = POOL.get('account.account.template')
account_create_chart = POOL.get(
'account.create_chart', type="wizard")
account_template, = AccountTemplate.search(
[('parent', '=', None)]
)
session_id, _, _ = account_create_chart.create()
create_chart = account_create_chart(session_id)
create_chart.account.account_template = account_template
create_chart.account.company = company
create_chart.transition_create_account()
receivable, = self.Account.search([
('kind', '=', 'receivable'),
('company', '=', company),
])
payable, = self.Account.search([
('kind', '=', 'payable'),
('company', '=', company),
])
create_chart.properties.company = company
create_chart.properties.account_receivable = receivable
create_chart.properties.account_payable = payable
create_chart.transition_create_properties()
def _create_payment_term(self):
"""Create a simple payment term with all advance
"""
return self.PaymentTerm.create([{
'name': 'Direct',
'lines': [('create', [{'type': 'remainder'}])]
}])
def _create_countries(self, count=5):
"""
Create some sample countries and subdivisions
"""
for country in list(pycountry.countries)[0:count]:
countries = self.Country.create([{
'name': country.name,
'code': country.alpha2,
}])
try:
divisions = pycountry.subdivisions.get(
country_code=country.alpha2
)
except KeyError:
pass
else:
for subdivision in list(divisions)[0:count]:
self.Subdivision.create([{
'country': countries[0].id,
'name': subdivision.name,
'code': subdivision.code,
'type': subdivision.type.lower(),
}])
def _create_pricelists(self):
"""
Create the pricelists
"""
# Setup the pricelists
self.party_pl_margin = Decimal('1.10')
self.guest_pl_margin = Decimal('1.20')
user_price_list, = self.PriceList.create([{
'name': 'PL 1',
'company': self.company.id,
'lines': [
('create', [{
'formula': 'unit_price * %s' % self.party_pl_margin
}])
],
}])
guest_price_list, = self.PriceList.create([{
'name': 'PL 2',
'company': self.company.id,
'lines': [
('create', [{
'formula': 'unit_price * %s' % self.guest_pl_margin
}])
],
}])
return guest_price_list.id, user_price_list.id
def setup_defaults(self):
"""
Setup the defaults
"""
with Transaction().set_context(company=None):
self.usd, = self.Currency.create([{
'name': 'US Dollar',
'code': 'USD',
'symbol': '$',
}])
self.party, = self.Party.create([{
'name': 'Openlabs',
}])
self.company, = self.Company.create([{
'party': self.party.id,
'currency': self.usd
}])
self.User.write(
[self.User(USER)], {
'main_company': self.company.id,
'company': self.company.id,
}
)
CONTEXT.update(self.User.get_preferences(context_only=True))
# Create Fiscal Year
self._create_fiscal_year(company=self.company.id)
# Create Chart of Accounts
self._create_coa_minimal(company=self.company.id)
# Create a payment term
payment_term, = self._create_payment_term()
channel_price_list, user_price_list = self._create_pricelists()
party1, = self.Party.create([{
'name': 'Guest User',
}])
party2, = self.Party.create([{
'name': 'Registered User',
'sale_price_list': user_price_list,
}])
self.party2 = party2
party3, = self.Party.create([{
'name': 'Registered User 2',
}])
sale_config = self.SaleConfig(1)
sale_config.payment_authorize_on = 'manual'
sale_config.payment_capture_on = 'sale_process'
sale_config.gift_card_method = 'order'
sale_config.save()
# Create users and assign the pricelists to them
self.guest_user, = self.NereidUser.create([{
'party': party1.id,
'display_name': 'Guest User',
'email': '[email protected]',
'password': 'password',
'company': self.company.id,
}])
self.registered_user, = self.NereidUser.create([{
'party': party2.id,
'display_name': 'Registered User',
'email': '[email protected]',
'password': 'password',
'company': self.company.id,
}])
self.registered_user2, = self.NereidUser.create([{
'party': party3.id,
'display_name': 'Registered User 2',
'email': '[email protected]',
'password': 'password2',
'company': self.company.id,
}])
self._create_countries()
self.available_countries = self.Country.search([], limit=5)
warehouse, = self.Location.search([
('type', '=', 'warehouse')
], limit=1)
location, = self.Location.search([
('type', '=', 'storage')
], limit=1)
en_us, = self.Language.search([('code', '=', 'en_US')])
self.locale_en_us, = self.Locale.create([{
'code': 'en_US',
'language': en_us.id,
'currency': self.usd.id,
}])
self.sale_tax, = self.Tax.create([{
'name': 'Sales Tax',
'description': 'Sales Tax',
'type': 'percentage',
'rate': Decimal('0.05'), # Rate 5%
'company': self.company.id,
'invoice_account': self._get_account_by_kind('other').id,
'credit_note_account': self._get_account_by_kind('other').id,
}])
self.channel, = self.SaleChannel.create([{
'name': 'Default Channel',
'price_list': channel_price_list,
'warehouse': warehouse,
'payment_term': payment_term,
'company': self.company.id,
'currency': self.company.currency.id,
'invoice_method': 'order',
'shipment_method': 'order',
'source': 'webshop',
'create_users': [('add', [USER])],
}])
self.User.set_preferences({'current_channel': self.channel})
self.User.write(
[self.User(USER)], {
'main_company': self.company.id,
'company': self.company.id,
'current_channel': self.channel,
}
)
self.default_node, = self.Node.create([{
'name': 'root',
'slug': 'root',
}])
self.default_menuitem, = self.MenuItem.create([{
'type_': 'view',
'title': 'Test Title'
}])
self.NereidWebsite.create([{
'name': 'localhost',
'channel': self.channel,
'company': self.company.id,
'application_user': USER,
'default_locale': self.locale_en_us.id,
'guest_user': self.guest_user,
'countries': [('add', self.available_countries)],
'currencies': [('add', [self.usd.id])],
'homepage_menu': self.default_menuitem.id,
}])
# Create an article category
article_categ, = self.ArticleCategory.create([{
'title': 'Test Categ',
'unique_name': 'test-categ',
}])
self.Article.create([{
'title': 'Test Article',
'uri': 'test-article',
'content': 'Test Content',
'sequence': 10,
'categories': [('add', [article_categ.id])],
}])
# Product categories
self.category, = self._create_product_category('categ1', [{}])
self.category2, = self._create_product_category('categ2', [{}])
self.category3, = self._create_product_category('categ3', [{}])
self.Account.write(
self.Account.search([]), {'party_required': True}
)
def login(self, client, username, password, assert_=True):
"""
Login method.
:param client: Instance of the test client
:param username: The username, usually email
:param password: The password to login
:param assert_: Boolean value to indicate if the login has to be
ensured. If the login failed an assertion error would
be raised
"""
rv = client.post(
'/login', data={
'email': username,
'password': password,
}
)
if assert_:
self.assertEquals(rv.status_code, 302)
return rv
|
|
############################################################
# Copyright 2010 Sandia Corporation.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
############################################################
# Contact: Philippe Pebay, Sandia National Laboratories, [email protected]
############################################################
############################################################
from vtk import *
import sys
import getopt
############################################################
############################################################
# Global variable for convenience
verbosity = 0
############################################################
############################################################
# Usage function
def Usage( outModelPrefix, outDataName ):
print "Usage:"
print "\t [-h] Help: print this message and exit"
print "\t -d <filename> name of CSV input data file"
print "\t [-c <filename>] name of CSV file specifying columns of interest. Default: all columns are of interest"
print "\t -e <engine> Type of statistics engine. Available engines are:"
print "\t descriptive"
print "\t order"
print "\t contingency"
print "\t correlative"
print "\t multicorrelative"
print "\t pca"
print "\t kmeans"
print "\t [-o <bitcode>] Engine options bitcode. Default is 0. Available bits are:"
print "\t 1st bit: assess"
print "\t 2nd bit: test"
print "\t [-m <prefix>] prefix of CSV input model file(s). Default: calculate model from scratch"
print "\t [-p <number>] number of primary tables in input model (only used for order statistics)"
print "\t [-u] update input model (if data are provided as well). NB: update happens before assessment"
print "\t [-s <prefix>] prefix of CSV output model (statistics) file(s)"
print "\t [-a <filename>] name of CSV output data (annotated) file"
print "\t [-t <filename>] name of CSV statistical test results file"
print "\t [-v] Increase verbosity (from no flag = silent to -vvv = print all tables)"
sys.exit( 1 )
############################################################
############################################################
# Parse command line
def ParseCommandLine():
# Declare use of global variable
global verbosity
# Default values
options = 0
inDataName = ""
inModelPrefix = ""
inModelTables = 0
updateModel = False
haruspexName = ""
outModelPrefix = ""
outDataName = ""
outTestName = ""
columnsListName =""
# Try to hash command line with respect to allowable flags
try:
opts,args = getopt.getopt(sys.argv[1:], 'hd:e:o:m:p:us:a:t:c:v')
except getopt.GetoptError:
Usage( outModelPrefix, outDataName )
sys.exit( 1 )
# First verify that the helper has not been requested (supersedes everything else)
# NB: handled first and separately so default values cannot be altered in helper message
for o,a in opts:
if o == "-h":
Usage( outModelPrefix, outDataName )
# Parse arguments and assign corresponding variables
for o,a in opts:
if o == "-d":
inDataName = a
elif o == "-e":
haruspexName = a
elif o == "-o":
options = a
elif o == "-m":
inModelPrefix = a
elif o == "-p":
inModelTables = a
elif o == "-u":
updateModel = True
elif o == "-s":
outModelPrefix = a
elif o == "-a":
outDataName = a
elif o == "-t":
outTestName = a
elif o == "-c":
columnsListName = a
elif o == "-v":
verbosity += 1
if not inDataName:
print "ERROR: a data file name required!"
sys.exit( 1 )
if not haruspexName:
print "ERROR: a statistics engine name is required!"
sys.exit( 1 )
if verbosity > 0:
print "# Parsed command line:"
print " Type of statistics:", haruspexName
if columnsListName != "":
print " Columns of interest in file:", columnsListName
else:
print " Columns of interest: all"
print " Input data file:", inDataName
if inModelPrefix != "":
print " Input model file prefix:", inModelPrefix
if inModelTables > 0:
print " Specified input model tables :", inModelTables
else:
print " No input model"
print " Output data file:", outDataName
print " Output model file prefix:", outModelPrefix
print
return [ inDataName, \
inModelPrefix, \
inModelTables, \
updateModel, \
columnsListName, \
haruspexName, \
options, \
outDataName, \
outTestName, \
outModelPrefix ]
############################################################
############################################################
# Turn haruspex name into vtkStatistics object and ancillary parameters
def InstantiateStatistics( haruspexName ):
# Declare use of global variable
global verbosity
if haruspexName == "descriptive":
haruspex = vtkDescriptiveStatistics()
elif haruspexName == "order":
haruspex = vtkOrderStatistics()
elif haruspexName == "contingency":
haruspex = vtkContingencyStatistics()
elif haruspexName == "correlative":
haruspex = vtkCorrelativeStatistics()
elif haruspexName == "multicorrelative":
haruspex = vtkMultiCorrelativeStatistics()
elif haruspexName == "pca":
haruspex = vtkPCAStatistics()
elif haruspexName == "kmeans":
haruspex = vtkKMeansStatistics()
else:
print "ERROR: Invalid statistics engine:", haruspexName
sys.exit( 1 )
if verbosity > 0:
print "# Instantiated a", haruspex.GetClassName(), "object"
print
return haruspex
############################################################
############################################################
# Read input CSV model table as input port
def ReadInModelTable( inModelPrefix, tabIndex ):
# Declare use of global variable
global verbosity
if verbosity > 0:
print "# Reading input model table", tabIndex
# Set CSV reader parameters
inTableReader = vtkDelimitedTextReader()
inTableReader.SetFieldDelimiterCharacters(",")
inTableReader.SetHaveHeaders( True )
inTableReader.SetDetectNumericColumns( True )
inTableReader.SetFileName( inModelPrefix + "-" + str( tabIndex ) + ".csv" )
inTableReader.Update()
if verbosity > 0:
table = inTableReader.GetOutput()
print " Number of columns:", table.GetNumberOfColumns()
print " Number of rows:", table.GetNumberOfRows()
if verbosity > 1:
inTableReader.GetOutput().Dump( 16 )
print
return inTableReader
############################################################
############################################################
# Read input CSV data as input port
def ReadInData( inDataName ):
# Declare use of global variable
global verbosity
if verbosity > 0:
print "# Reading input data"
# Set CSV reader parameters
inDataReader = vtkDelimitedTextReader()
inDataReader.SetFieldDelimiterCharacters(",")
inDataReader.SetHaveHeaders( True )
inDataReader.SetDetectNumericColumns( True )
inDataReader.SetFileName( inDataName )
inDataReader.Update()
if verbosity > 0:
table = inDataReader.GetOutput()
print " Number of columns:", table.GetNumberOfColumns()
print " Number of rows:", table.GetNumberOfRows()
print
if verbosity > 2:
print "# Input data:"
inDataReader.GetOutput().Dump( 16 )
print
return inDataReader
############################################################
############################################################
# Read list of columns of interest
def ReadColumnsList( columnsListName ):
# Declare use of global variable
global verbosity
if verbosity > 0:
print "# Reading list of columns of interest:"
# Set CSV reader parameters
columnsListReader = vtkDelimitedTextReader()
columnsListReader.SetFieldDelimiterCharacters(",")
columnsListReader.SetHaveHeaders( False )
columnsListReader.SetDetectNumericColumns( True )
columnsListReader.SetFileName( columnsListName )
columnsListReader.Update()
# Figure number of columns of interest
table = columnsListReader.GetOutput()
n = table.GetNumberOfColumns()
if verbosity > 0:
print " Number of columns of interest:", n
# Now construct list of colums of interest
columnsList = []
for i in range( 0, n ):
columnsList.append( table.GetColumn( i ).GetValue( 0 ) )
if verbosity > 1:
print " Columns of interest are:", columnsList
if verbosity > 0:
print
return columnsList
############################################################
############################################################
# Write table from haruspex output port (i.e., for data or tests)
def WriteOutTable( haruspex, outPort, outFileName, outPortName, threshold ):
# Declare use of global variable
global verbosity
if outFileName == "":
if verbosity > 0:
print "# No output table of", outPortName, "required"
print
return
if verbosity > 0:
print "# Saving output table of", outPortName
# Set CSV writer parameters
outTableWriter = vtkDelimitedTextWriter()
outTableWriter.SetFieldDelimiter(",")
outTableWriter.SetFileName( outFileName )
outTableWriter.SetInputConnection( haruspex.GetOutputPort( outPort ) )
outTableWriter.Update()
if verbosity > 0:
print " Wrote", outPortName
if verbosity > threshold:
haruspex.GetOutput( outPort ).Dump( 16 )
print
############################################################
############################################################
# Write haruspex output model
def WriteOutModel( haruspex, outModelPrefix ):
# Declare use of global variable
global verbosity
if outModelPrefix == "":
if verbosity > 0:
print "# No output model (statistics) required"
print
return
if verbosity > 0:
print "# Saving output model (statistics):"
# Set CSV writer parameters
outModelWriter = vtkDelimitedTextWriter()
outModelWriter.SetFieldDelimiter(",")
# Verify that model is a vtkMultiBlockDataSet, error out otherwise
outModelType = haruspex.GetOutputDataObject( 1 ).GetClassName()
if outModelType != "vtkMultiBlockDataSet":
print "ERROR: unsupported type of output model!"
sys.exit( 1 )
# Must iterate over all blocks of the vtkMultiBlockDataSet
outModel = haruspex.GetOutputDataObject( 1 )
n = outModel.GetNumberOfBlocks()
for i in range( 0, n ):
# Straightforward CSV file dump of a vtkTable
outModelName = outModelPrefix + "-" + str( i )+ ".csv"
outModelWriter.SetFileName( outModelName )
table = outModel.GetBlock( i )
outModelWriter.SetInput( table )
outModelWriter.Update()
if verbosity > 0:
print " Wrote", outModelName
if verbosity > 1:
table.Dump( 16 )
print
############################################################
############################################################
# Calculate statistics
def CalculateStatistics( inDataReader, inModelReader, nPrimaryTables, updateModel, columnsList, haruspex, options ):
# Declare use of global variable
global verbosity
if verbosity > 0:
print "# Calculating statistics:"
# Output port of data reader becomes input connection of haruspex
haruspex.AddInputConnection( inDataReader.GetOutputPort() )
# Get the output table of the data reader, which becomes the input data
inData = inDataReader.GetOutput()
# Figure number of columns of interest. If no list was provided, use them all
if columnsList == []:
columnsList = range( 0, inData.GetNumberOfColumns() )
n = len( columnsList )
# Generate list of columns of interest, depending on number of variables
if haruspex.IsA( "vtkDescriptiveStatistics" ) or haruspex.IsA( "vtkOrderStatistics" ):
# Univariate case: one request for each columns
for i in range( 0, n ):
colName = inData.GetColumnName( columnsList[i] )
if verbosity > 0:
print " Requesting column", colName
haruspex.AddColumn( colName )
elif haruspex.IsA( "vtkCorrelativeStatistics" ) or haruspex.IsA( "vtkContingencyStatistics" ):
# Bivariate case: generate all possible pairs
for i in range( 0, n ):
colNameX = inData.GetColumnName( columnsList[i] )
for j in range( i+1, n ):
colNameY = inData.GetColumnName( columnsList[j] )
if verbosity > 0:
print " Requesting column pair (", colNameX, ",", colNameY, ")"
haruspex.AddColumnPair( colNameX, colNameY )
else:
# Multivariate case: generate single request containing all columns
for i in range( 0, n ):
colName = inData.GetColumnName( columnsList[i] )
haruspex.SetColumnStatus( colName, 1 )
if verbosity > 0:
print " Adding column", colName, "to the request"
# Complete column selection request
haruspex.RequestSelectedColumns()
# Figure which options were requested
if int( options ) % 2:
assessOption = True
if verbosity > 0:
print " Assess option is on"
else:
assessOption = False
if verbosity > 0:
print " Assess option is off"
options = int( options ) >> 1
if int( options ) % 2:
haruspex.SetTestOption( True )
if verbosity > 0:
print " Test option is on"
else:
haruspex.SetTestOption( False )
if verbosity > 0:
print " Test option is off"
if verbosity > 0:
print
# If an input model was provided, then update it first, otherwise run in a single pass
if inModelReader == None:
# No model reader: then Learn, Derive, and possibly Assess in a single pass
haruspex.SetLearnOption( True )
haruspex.SetDeriveOption( True )
haruspex.SetAssessOption( assessOption )
haruspex.Update()
else:
# Then create vtkMultiBlockDataSet with correspondingly many blocks
inModel = vtkMultiBlockDataSet()
inModel.SetNumberOfBlocks( nPrimaryTables )
# Now iterate over all readers to obtain tables
for t in range( 0, nPrimaryTables ):
inTableReader = inModelReader[t]
inTable = inTableReader.GetOutput()
# Handle special case of primary tables of order statistics
if ( haruspex.GetClassName() == "vtkOrderStatistics" ):
if verbosity > 0:
if ( t == 0 ):
if ( nPrimaryTables > 1 ):
print "# Converting cardinality column of", nPrimaryTables, "input histograms"
else:
print "# Converting cardinality column of 1 input histogram"
# Create a programmable filter whose input is the histogram
convertOrderTab = vtkProgrammableFilter()
convertOrderTab.SetInput( inTable )
# Define table converter callback for programmable filter
def ConvertOrderTableCallback():
readTable = convertOrderTab.GetInput()
convTable = convertOrderTab.GetOutput()
# Create cardinality column with appropriate type
nRow = readTable.GetNumberOfRows()
vCol = readTable.GetColumnByName( "Value" )
convTable.AddColumn( vCol )
cCol = vtkIdTypeArray()
cCol.SetName( "Cardinality" )
cCol.SetNumberOfValues( nRow )
convTable.AddColumn( cCol )
# Loop over all input rows and create output rows
for r in range( 0, nRow ):
# Retrieve cardinalities and save them as vtkIdTypes
c = readTable.GetValueByName( r, "Cardinality" ).ToInt()
cCol.SetValue( r, c )
# Set callback and run programmable filer
convertOrderTab.SetExecuteMethod( ConvertOrderTableCallback )
convertOrderTab.Update()
# Retrieve converted table from filter output
colName = inData.GetColumnName( columnsList[t] )
inModel.GetMetaData( t ).Set( vtkCompositeDataSet.NAME(), colName )
inTable = convertOrderTab.GetOutput()
if verbosity > 0:
print " Variable", colName, "converted"
if verbosity > 1:
inTable.Dump( 16 )
# Handle special case of second table of contingency statistics
if ( t > 0 and haruspex.GetClassName() == "vtkContingencyStatistics" ):
if verbosity > 0:
print "# Converting input contingency table to appropriate column types"
# Create a programmable filter whose input is the contingency table
convertContingencyTab = vtkProgrammableFilter()
convertContingencyTab.SetInput( inTable )
# Define table converter callback for programmable filter
def ConvertContingencyTableCallback():
readTable = convertContingencyTab.GetInput()
convTable = convertContingencyTab.GetOutput()
# Create columns with appropriate names and formats
kCol = vtkIdTypeArray()
kCol.SetName( "Key" )
convTable.AddColumn( kCol )
xCol = vtkStringArray()
xCol.SetName( "x" )
convTable.AddColumn( xCol )
yCol = vtkStringArray()
yCol.SetName( "y" )
convTable.AddColumn( yCol )
cCol = vtkIdTypeArray()
cCol.SetName( "Cardinality" )
convTable.AddColumn( cCol )
# Loop over all input rows and create output rows
nRow = readTable.GetNumberOfRows()
row = vtkVariantArray()
row.SetNumberOfValues( 4 )
for r in range( 0, nRow ):
# Retrieve primary statistics and convert to correct type
k = readTable.GetValueByName( r, "Key" ).ToInt()
row.SetValue( 0, k )
x = readTable.GetValueByName( r, "x" ).ToString()
row.SetValue( 1, x )
y = readTable.GetValueByName( r, "y" ).ToString()
row.SetValue( 2, y )
c = readTable.GetValueByName( r, "Cardinality" ).ToInt()
row.SetValue( 3, c )
convTable.InsertNextRow( row )
# Set callback and run programmable filer
convertContingencyTab.SetExecuteMethod( ConvertContingencyTableCallback )
convertContingencyTab.Update()
# Retrieve converted table from filter output
inTable = convertContingencyTab.GetOutput()
if verbosity > 1:
inTable.Dump( 16 )
# Set retrieved table to corresponding model block
inModel.SetBlock( t, inTable )
# If model update is required, then learn new model and aggregate, otherwise assess directly
if updateModel == True:
# Store model it for subsequent aggregation
collection = vtkDataObjectCollection()
collection.AddItem( inModel )
# Then learn a new primary model (do not derive nor assess)
haruspex.SetLearnOption( True )
haruspex.SetDeriveOption( False )
haruspex.SetAssessOption( False )
haruspex.Update()
# Aggregate old and new models
collection.AddItem( haruspex.GetOutputDataObject( 1 ) )
aggregated = vtkMultiBlockDataSet()
haruspex.Aggregate( collection, aggregated )
# Finally, derive and possibly assess using the aggregated model (do not learn)
haruspex.SetInput( 2, aggregated )
haruspex.SetLearnOption( False )
haruspex.SetDeriveOption( True )
haruspex.SetAssessOption( assessOption )
haruspex.Update()
else:
# Only derive and possibly assess using the input model (do not aggregate)
haruspex.SetInput( 2, inModel )
haruspex.SetLearnOption( False )
haruspex.SetDeriveOption( True )
haruspex.SetAssessOption( assessOption )
haruspex.Update()
print
############################################################
############################################################
# Main function
def main():
# Parse command line
[ inDataName, \
inModelPrefix, \
inModelTables, \
updateModel, \
columnsListName, \
haruspexName, \
options, \
outDataName, \
outTestName, \
outModelPrefix ] = ParseCommandLine()
# Verify that haruspex name makes sense and if so instantiate accordingly
haruspex = InstantiateStatistics( haruspexName )
# Set input data reader
inDataReader = ReadInData( inDataName )
# Set input model readers if prefix was provided
if inModelPrefix != "":
inModelReader = []
nPrimaryTables = haruspex.GetNumberOfPrimaryTables()
# Handle special case of variable number of tables which must be specified
if nPrimaryTables == -1:
nPrimaryTables = int( inModelTables )
if ( haruspexName == "order" and nPrimaryTables < 1 ):
print "ERROR: a number of primary tables must be given for order statistics"
sys.exit( 1 )
# Now loop over all primary tables
for t in range( 0, nPrimaryTables ):
tableReader = ReadInModelTable( inModelPrefix, t )
inModelReader.append( tableReader )
else:
inModelReader = None
nPrimaryTables = 0
# Read list of columns of interest
if columnsListName:
columnsList = ReadColumnsList( columnsListName )
else:
columnsList = []
# Calculate statistics
CalculateStatistics( inDataReader, inModelReader, nPrimaryTables, updateModel, columnsList, haruspex, options )
# Save output (annotated) data
WriteOutTable( haruspex, 0, outDataName, "annotated data", 2 )
# Save output of statistical tests
WriteOutTable( haruspex, 2, outTestName, "statistical test results", 1 )
# Save output model (statistics)
WriteOutModel( haruspex, outModelPrefix )
############################################################
############################################################
if __name__ == "__main__":
main()
############################################################
|
|
from operator import attrgetter
from sqlalchemy import Column, Date, ForeignKey, Integer, Table, Unicode
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import backref, reconstructor, relationship
from sqlalchemy.orm.collections import MappedCollection
from sqlalchemy.util import OrderedDict
from cardboard.db import Base, Session
card_ability = Table("card_ability", Base.metadata,
Column("card", Unicode, ForeignKey("card.name"), primary_key=True),
Column("ability", Integer, ForeignKey("ability.id"), primary_key=True),
)
card_type = Table("card_type", Base.metadata,
Column("card", Unicode, ForeignKey("card.name"), primary_key=True),
Column("type", Unicode, ForeignKey("type.name"), primary_key=True),
)
card_subtype = Table("card_subtype", Base.metadata,
Column("card", Unicode, ForeignKey("card.name"), primary_key=True),
Column("subtype", Unicode, ForeignKey("subtype.name"), primary_key=True),
)
card_supertype = Table("card_supertype", Base.metadata,
Column("card", Unicode, ForeignKey("card.name"), primary_key=True),
Column(
"supertype", Unicode, ForeignKey("supertype.name"), primary_key=True
),
)
class OrderedMappedCollection(OrderedDict, MappedCollection):
def __init__(self, keyfunc, *args, **kwargs):
MappedCollection.__init__(self, keyfunc=keyfunc)
OrderedDict.__init__(self, *args, **kwargs)
class Ability(Base):
id = Column(Integer, primary_key=True)
description = Column(Unicode, unique=True)
def __repr__(self):
elip = " ... " if len(self.description) > 50 else ""
return "<Ability Model: {.description:.50}{}>".format(self, elip)
class Card(Base):
name = Column(Unicode, primary_key=True)
mana_cost = Column(Unicode)
type_objects = relationship(
"Type", secondary=card_type, collection_class=set, backref=backref(
"cards", lazy="dynamic", collection_class=set,
),
)
subtype_objects = relationship(
"Subtype", secondary=card_subtype, collection_class=set,
backref=backref("cards", lazy="dynamic", collection_class=set),
)
supertype_objects = relationship(
"Supertype", secondary=card_supertype, collection_class=set,
backref=backref("cards", lazy="dynamic", collection_class=set),
)
ability_objects = relationship("Ability",
backref=backref("cards", lazy="dynamic"), secondary=card_ability
)
types = association_proxy(
"type_objects", "name", creator=lambda name : Type(name=name)
)
subtypes = association_proxy(
"subtype_objects", "name", creator=lambda name : Subtype(name=name)
)
supertypes = association_proxy(
"supertype_objects", "name", creator=lambda name : Supertype(name=name)
)
abilities = association_proxy(
"ability_objects", "description",
creator=lambda description : Ability(description=description)
)
power = Column(Unicode(3))
toughness = Column(Unicode(3))
loyalty = Column(Integer)
sets = association_proxy(
"set_appearances", "rarity",
creator=lambda s, r : SetAppearance(set=s, rarity=r)
)
def __repr__(self):
return "<Card Model: {.name}>".format(self)
@property
def first_appeared_in(self):
# XXX: I don't know how to properly do the order_by using SQLAlchemy
# yet. See here for an example:
# https://groups.google.com/forum/#!topic/sqlalchemy/cQ_Y2gJWj28
return min(self.sets, key=attrgetter("released"))
@property
def last_appeared_in(self):
# XXX
return max(self.sets, key=attrgetter("released"))
class Set(Base):
code = Column(Unicode(5), primary_key=True)
name = Column(Unicode, nullable=False, unique=True)
released = Column(Date, nullable=False)
cards = association_proxy(
"card_appearances", "rarity",
creator=lambda c, r : SetAppearance(card=c, rarity=r),
)
def __repr__(self):
return "<Set Model: {.name}>".format(self)
@property
def new_cards(self):
"""
The cards that first appear in this set, and not in any earlier one.
"""
# XXX: Once this is worked out on the Card model there will probably be
# a better way to do it here too.
return {card for card in self.cards if card.first_appeared_in == self}
@property
def reprints(self):
"""
The cards that were reprinted from earlier sets.
"""
return {card for card in self.cards if card.first_appeared_in != self}
class SetAppearance(Base):
card_name = Column(Integer, ForeignKey("card.name"), primary_key=True)
set_code = Column(Integer, ForeignKey("set.code"), primary_key=True)
rarity = Column(Unicode(1), primary_key=True)
card = relationship(Card,
backref=backref(
"set_appearances",
cascade="all, delete-orphan",
collection_class=(
lambda : OrderedMappedCollection(attrgetter("set"))
),
)
)
set = relationship(Set,
backref=backref(
"card_appearances",
cascade="all, delete-orphan",
order_by=set_code,
collection_class=(
lambda : OrderedMappedCollection(attrgetter("card"))
),
)
)
def __repr__(self):
name = getattr(self.card, "name", None)
code = getattr(self.set, "code", None)
return "<{} ({}-{.rarity})>".format(name, code, self)
class Type(Base):
name = Column(Unicode, primary_key=True)
def __repr__(self):
return "<Type Model: {.name}>".format(self)
class Subtype(Base):
name = Column(Unicode, primary_key=True)
def __repr__(self):
return "<Subtype Model: {.name}>".format(self)
class Supertype(Base):
name = Column(Unicode, primary_key=True)
def __repr__(self):
return "<Supertype Model: {.name}>".format(self)
Base.metadata.create_all()
|
|
from panda3d.core import *
from panda3d.direct import *
from direct.gui.DirectGui import *
from direct.showbase import DirectObject
from otp.avatar import AvatarPanel
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TTDialog
from otp.distributed import CentralLogger
IGNORE_SCALE = 0.06
STOP_IGNORE_SCALE = 0.04
class AvatarPanelBase(AvatarPanel.AvatarPanel):
def __init__(self, avatar, FriendsListPanel = None):
self.dialog = None
self.category = None
AvatarPanel.AvatarPanel.__init__(self, avatar, FriendsListPanel)
return
def getIgnoreButtonInfo(self):
if base.cr.avatarFriendsManager.checkIgnored(self.avId):
return (TTLocalizer.AvatarPanelStopIgnoring, self.handleStopIgnoring, STOP_IGNORE_SCALE)
else:
return (TTLocalizer.AvatarPanelIgnore, self.handleIgnore, IGNORE_SCALE)
def handleIgnore(self):
isAvatarFriend = base.cr.isFriend(self.avatar.doId)
isPlayerFriend = base.cr.playerFriendsManager.isAvatarOwnerPlayerFriend(self.avatar.doId)
isFriend = isAvatarFriend or isPlayerFriend
if isFriend:
self.dialog = TTDialog.TTGlobalDialog(
style=TTDialog.CancelOnly,
text=TTLocalizer.IgnorePanelAddFriendAvatar % self.avName,
text_wordwrap=18.5,
text_scale=0.06,
cancelButtonText=TTLocalizer.lCancel,
doneEvent='IgnoreBlocked',
command=self.freeLocalAvatar)
else:
self.dialog = TTDialog.TTGlobalDialog(
style=TTDialog.TwoChoice,
text=TTLocalizer.IgnorePanelAddIgnore % self.avName,
text_wordwrap=18.5,
text_scale=TTLocalizer.APBdialog,
okButtonText=TTLocalizer.AvatarPanelIgnore,
cancelButtonText=TTLocalizer.lCancel,
doneEvent='IgnoreConfirm',
command=self.handleIgnoreConfirm)
DirectLabel(
parent=self.dialog,
relief=None,
pos=(0, TTLocalizer.APBdirectLabelPosY, 0.125),
text=TTLocalizer.IgnorePanelTitle,
textMayChange=0,
text_scale=0.08)
self.dialog.show()
self.__acceptStoppedStateMsg()
self.requestStopped()
return
def handleStopIgnoring(self):
self.dialog = TTDialog.TTGlobalDialog(style=TTDialog.TwoChoice, text=TTLocalizer.IgnorePanelRemoveIgnore % self.avName, text_wordwrap=18.5, text_scale=0.06, okButtonText=TTLocalizer.AvatarPanelStopIgnoring, cancelButtonText=TTLocalizer.lCancel, buttonPadSF=4.0, doneEvent='StopIgnoringConfirm', command=self.handleStopIgnoringConfirm)
DirectLabel(parent=self.dialog, relief=None, pos=(0, TTLocalizer.APBdirectLabelPosY, 0.15), text=TTLocalizer.IgnorePanelTitle, textMayChange=0, text_scale=0.08)
self.dialog.show()
self.__acceptStoppedStateMsg()
self.requestStopped()
return
def handleIgnoreConfirm(self, value):
if value == -1:
self.freeLocalAvatar()
return
base.cr.avatarFriendsManager.addIgnore(self.avId)
self.dialog = TTDialog.TTGlobalDialog(style=TTDialog.Acknowledge, text=TTLocalizer.IgnorePanelIgnore % self.avName, text_wordwrap=18.5, text_scale=0.06, topPad=0.1, doneEvent='IgnoreComplete', command=self.handleDoneIgnoring)
DirectLabel(parent=self.dialog, relief=None, pos=(0, TTLocalizer.APBdirectLabelPosY, 0.15), text=TTLocalizer.IgnorePanelTitle, textMayChange=0, text_scale=0.08)
self.dialog.show()
self.__acceptStoppedStateMsg()
self.requestStopped()
return
def handleStopIgnoringConfirm(self, value):
if value == -1:
self.freeLocalAvatar()
return
base.cr.avatarFriendsManager.removeIgnore(self.avId)
self.dialog = TTDialog.TTGlobalDialog(style=TTDialog.Acknowledge, text=TTLocalizer.IgnorePanelEndIgnore % self.avName, text_wordwrap=18.5, text_scale=0.06, topPad=0.1, doneEvent='StopIgnoringComplete', command=self.handleDoneIgnoring)
DirectLabel(parent=self.dialog, relief=None, pos=(0, TTLocalizer.APBdirectLabelPosY, 0.15), text=TTLocalizer.IgnorePanelTitle, textMayChange=0, text_scale=0.08)
self.dialog.show()
self.__acceptStoppedStateMsg()
self.requestStopped()
return
def handleDoneIgnoring(self, value):
self.freeLocalAvatar()
def handleReport(self):
if base.cr.csm.hasReportedPlayer(self.avId):
self.alreadyReported()
else:
self.confirmReport()
def confirmReport(self):
if base.cr.isFriend(self.avId) or base.cr.playerFriendsManager.isPlayerFriend(self.avId):
string = TTLocalizer.ReportPanelBodyFriends
titlePos = 0.41
else:
string = TTLocalizer.ReportPanelBody
titlePos = 0.35
self.dialog = TTDialog.TTGlobalDialog(style=TTDialog.TwoChoice, text=string % self.avName, text_wordwrap=18.5, text_scale=0.06, okButtonText=TTLocalizer.AvatarPanelReport, cancelButtonText=TTLocalizer.lCancel, doneEvent='ReportConfirm', command=self.handleReportConfirm)
DirectLabel(parent=self.dialog, relief=None, pos=(0, 0, titlePos), text=TTLocalizer.ReportPanelTitle, textMayChange=0, text_scale=0.08)
self.dialog.show()
self.__acceptStoppedStateMsg()
self.requestStopped()
return
def handleReportConfirm(self, value):
self.cleanupDialog()
if value == 1:
self.chooseReportCategory()
else:
self.requestWalk()
def alreadyReported(self):
self.dialog = TTDialog.TTGlobalDialog(style=TTDialog.Acknowledge, text=TTLocalizer.ReportPanelAlreadyReported % self.avName, text_wordwrap=18.5, text_scale=0.06, topPad=0.1, doneEvent='AlreadyReported', command=self.handleAlreadyReported)
DirectLabel(parent=self.dialog, relief=None, pos=(0, 0, 0.2), text=TTLocalizer.ReportPanelTitle, textMayChange=0, text_scale=0.08)
self.dialog.show()
self.__acceptStoppedStateMsg()
self.requestStopped()
return
def handleAlreadyReported(self, value):
self.freeLocalAvatar()
def chooseReportCategory(self):
self.dialog = TTDialog.TTGlobalDialog(pos=(0, 0, 0.4), style=TTDialog.CancelOnly, text=TTLocalizer.ReportPanelCategoryBody % (self.avName, self.avName), text_wordwrap=18.5, text_scale=0.06, topPad=0.05, midPad=0.75, cancelButtonText=TTLocalizer.lCancel, doneEvent='ReportCategory', command=self.handleReportCategory)
DirectLabel(parent=self.dialog, relief=None, pos=(0, 0, 0.225), text=TTLocalizer.ReportPanelTitle, textMayChange=0, text_scale=0.08)
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
DirectButton(parent=self.dialog, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=(2.125, 1.0, 1.0), text=TTLocalizer.ReportPanelCategoryLanguage, text_scale=0.06, text_pos=(0, -0.0124), pos=(0, 0, -0.3), command=self.handleReportCategory, extraArgs=[0])
DirectButton(parent=self.dialog, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=(2.15, 1.0, 1.0), text=TTLocalizer.ReportPanelCategoryPii, text_scale=0.06, text_pos=(0, -0.0125), pos=(0, 0, -0.425), command=self.handleReportCategory, extraArgs=[1])
DirectButton(parent=self.dialog, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=(2.125, 1.0, 1.0), text=TTLocalizer.ReportPanelCategoryRude, text_scale=0.06, text_pos=(0, -0.0125), pos=(0, 0, -0.55), command=self.handleReportCategory, extraArgs=[2])
DirectButton(parent=self.dialog, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=(2.125, 1.0, 1.0), text=TTLocalizer.ReportPanelCategoryName, text_scale=0.06, text_pos=(0, -0.0125), pos=(0, 0, -0.675), command=self.handleReportCategory, extraArgs=[3])
DirectButton(parent=self.dialog, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=(2.125, 1.0, 1.0), text=TTLocalizer.ReportPanelCategoryHacking, text_scale=0.06, text_pos=(0, -0.0125), pos=(0, 0, -0.8), command=self.handleReportCategory, extraArgs=[4])
guiButton.removeNode()
self.dialog.show()
self.__acceptStoppedStateMsg()
self.requestStopped()
return
def handleReportCategory(self, value):
self.cleanupDialog()
if value >= 0:
self.category = value
self.confirmReportCategory(value)
else:
self.requestWalk()
def confirmReportCategory(self, category):
string = TTLocalizer.ReportPanelConfirmations[category]
string += '\n\n' + TTLocalizer.ReportPanelWarning
self.dialog = TTDialog.TTGlobalDialog(style=TTDialog.TwoChoice, text=string % self.avName, text_wordwrap=18.5, text_scale=0.06, topPad=0.1, okButtonText=TTLocalizer.AvatarPanelReport, cancelButtonText=TTLocalizer.lCancel, doneEvent='ReportConfirmCategory', command=self.handleReportCategoryConfirm)
DirectLabel(parent=self.dialog, relief=None, pos=(0, 0, 0.5), text=TTLocalizer.ReportPanelTitle, textMayChange=0, text_scale=0.08)
self.dialog.show()
self.__acceptStoppedStateMsg()
return
def handleReportCategoryConfirm(self, value):
self.cleanupDialog()
removed = 0
isPlayer = 0
if value > 0:
base.cr.csm.d_reportPlayer(self.avId, self.category)
if base.cr.isFriend(self.avId):
base.cr.removeFriend(self.avId)
removed = 1
if base.cr.playerFriendsManager.isPlayerFriend(self.playerId):
if self.playerId:
base.cr.playerFriendsManager.sendRequestRemove(self.playerId)
removed = 1
isPlayer = 1
self.reportComplete(removed, isPlayer)
else:
self.requestWalk()
def reportComplete(self, removed, isPlayer):
string = TTLocalizer.ReportPanelThanks
titlePos = 0.25
if removed:
if isPlayer:
string += ' ' + TTLocalizer.ReportPanelRemovedPlayerFriend % self.playerId
else:
string += ' ' + TTLocalizer.ReportPanelRemovedFriend % self.avName
titlePos = 0.3
self.dialog = TTDialog.TTGlobalDialog(style=TTDialog.Acknowledge, text=string, text_wordwrap=18.5, text_scale=0.06, topPad=0.1, doneEvent='ReportComplete', command=self.handleReportComplete)
DirectLabel(parent=self.dialog, relief=None, pos=(0, 0, titlePos), text=TTLocalizer.ReportPanelTitle, textMayChange=0, text_scale=0.08)
self.dialog.show()
self.__acceptStoppedStateMsg()
return
def handleReportComplete(self, value):
self.freeLocalAvatar()
def freeLocalAvatar(self, value = None):
self.cleanupDialog()
self.requestWalk()
def cleanupDialog(self):
if self.dialog:
self.dialog.ignore('exitingStoppedState')
self.dialog.cleanup()
self.dialog = None
return
def requestStopped(self):
if not base.cr.playGame.getPlace().fsm.getCurrentState().getName() == 'stickerBook':
if base.cr.playGame.getPlace().fsm.hasStateNamed('stopped'):
base.cr.playGame.getPlace().fsm.request('stopped')
else:
self.notify.warning('skipping request to stopped in %s' % base.cr.playGame.getPlace())
else:
self.cleanup()
def requestWalk(self):
if base.cr.playGame.getPlace().fsm.hasStateNamed('finalBattle'):
base.cr.playGame.getPlace().fsm.request('finalBattle')
elif base.cr.playGame.getPlace().fsm.hasStateNamed('walk'):
if base.cr.playGame.getPlace().getState() == 'stopped':
base.cr.playGame.getPlace().fsm.request('walk')
else:
self.notify.warning('skipping request to walk in %s' % base.cr.playGame.getPlace())
def __acceptStoppedStateMsg(self):
self.dialog.ignore('exitingStoppedState')
self.dialog.accept('exitingStoppedState', self.cleanupDialog)
|
|
########################################################################
# Copyright (c) 2001-2006 Ciranova, Inc. All Rights Reserved. #
# #
# Permission is hereby granted, free of charge, to any person #
# obtaining a copy of this software and associated documentation #
# ("Ciranova Open Code"), to use the Ciranova Open Code without #
# restriction, including without limitation the right to use, copy, #
# modify, merge, publish, distribute, sublicense, and sell copies of #
# the Ciranova Open Code, and to permit persons to whom the Ciranova #
# Open Code is furnished to do so, subject to the following #
# conditions: #
# #
# The above copyright notice and this permission notice must be #
# included in all copies and all distribution, redistribution, and #
# sublicensing of the Ciranova Open Code. THE CIRANOVA OPEN CODE IS #
# PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED #
# OR STATUTORY INCLUDING WITHOUT LIMITATION ANY WARRANTY OF #
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND #
# NONINFRINGEMENT. IN NO EVENT SHALL CIRANOVA, INC. BE LIABLE FOR ANY #
# INDIRECT, PUNITIVE, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES #
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE CIRANOVA OPEN CODE #
# OR ANY USE OF THE CIRANOVA OPEN CODE, OR BE LIABLE FOR ANY CLAIM, #
# DAMAGES OR OTHER LIABILITY, HOWEVER IT ARISES AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN AN ACTION FOR CONTRACT, STRICT LIABILITY OR #
# TORT (INCLUDING NEGLIGENCE), OR OTHERWISE, ARISING FROM, OUT OF OR #
# IN CONNECTION WITH THE CIRANOVA OPEN CODE OR ANY USE OF THE #
# CIRANOVA OPEN CODE. The Ciranova Open Code is subject to U.S. #
# export control laws and may be subject to export or import #
# regulations in other countries, and all use of the Ciranova Open #
# Code must be in compliance with such laws and regulations. If any #
# license for the Ciranova Open Code is obtained pursuant to a #
# government contract, all use, distribution and/or copying by the #
# U.S. government shall be subject to this permission notice and any #
# applicable FAR provisions. #
########################################################################
########################################################################
#
# Mosfet.py
#
########################################################################
"""Module: Mosfet
This module implements a MosfetTemplate class for creating MOS
transistor PyCells.
MosfetTemplate provides the following capabilities:
- (float ) transistor width
- (float ) transistor length
- (integer) fingers, number of transistors
- (boolean) left diffusion contact
- (float ) left diffusion contact coverage
- (boolean) left transistor gate contact
- (float ) left transistor gate contact coverage
- (boolean) center diffusion contacts
- (float ) center diffusion contact coverage
- (boolean) center transistor gates contact
- (float ) center transistor gates contact coverage
- (boolean) right diffusion contact
- (float ) right diffusion contact coverage
- (boolean) right transistor gate contact
- (float ) right transistor gate contact coverage
- Stretch handles for contacts
- Stretch handles for gate w & l
- Auto-abutment
- Electrical connectivity, i.e. nets, pins, terminals.
Class variables:
- (string ) poly, Layer name
- (string ) diffusion, Layer name
- (string ) well, Layer name
- (string ) implant, Layer name
- (string ) contact, Layer name
- (string ) metal1, Layer name
Technology file requirements:
- (minEnclosure poly diffusion)
- (minEnclosure diffusion poly )
- (minSpacing contact poly )
- (minSpacing poly )
- (minWidth contact )
Additional requirements exist in Via module.
Module dependencies:
- cni.dlo, CiraNova PyCell APIs.
- Via, Contact PyCells
Exceptions:
- ValueError, for missing DRC rules in technology file.
EDA tool integration:
Stretch handles are specific features of layout editors.
Standard OpenAccess semantics do not exist. To support
stretch handles, we define a standard protocol, and create
customized interpreters for each layout editor. This
enables us to support stretch handles in multiple layout
editors without changes to the Python API or the PyCell
implementation.
Other notes:
[1] Dogbone configurations aren't implemented in this code.
For current processes, 90nm and below, the transistor
endcap to L-shaped source/drain diffusion spacing is
typically bigger. This type of conditional rule is
better modeled in upcoming API functions; hence, we
defer the implementation.
[2] Only creates pins for leftmost diffusion, rightmost diffusion,
and leftmost gate. Unclear what to do about the center gates
and diffusions, since this could be either a series or a
parallel structure.
"""
__revision__ = "$Id: Mosfet_vtl.py 134 2008-03-21 21:33:04Z [email protected] $"
__author__ = "Lyndon C. Lim"
from cni.dlo import (
Box,
Direction,
DloGen,
FailAction,
Grouping,
Instance,
Layer,
Location,
ParamArray,
ParamSpecArray,
Pin,
Point,
RangeConstraint,
Rect,
Term,
TermType,
Text,
)
from cni.integ.common import (
stretchHandle,
autoAbutment,
)
import traceback
from Via import (
ViaInstance,
)
class Dictionary:
pass
#### Layer rules in Santana.tech must be kept up-to-date for this to run correctly!
class MosfetTemplate( DloGen):
"""Defines a MosfetTemplate class.
"""
poly = "poly"
diffusion = "active"
well = "nwell or pwell"
implant = "pimplant"
contact = "contact"
metal1 = "metal1"
@classmethod
def defineParamSpecs(cls, specs):
"""Define the PyCell parameters. The order of invocation of
specs() becomes the order on the form.
Arguments:
specs - (ParamSpecArray) PyCell parameters
"""
oxide = "thin"
tranType = {"pimplant":"pmos_vtl", "nimplant":"nmos_vtl"}[cls.implant]
l = specs.tech.getMosfetParams( tranType, oxide, "minLength")
# No dogbone allowed.
w = specs.tech.getPhysicalRule( "minWidth", specs.tech.getLayer(cls.contact)) + \
2.0 * specs.tech.getPhysicalRule( "minEnclosure", specs.tech.getLayer(cls.diffusion), specs.tech.getLayer(cls.contact))
w = max( w, specs.tech.getMosfetParams( tranType, oxide, "minWidth"))
specs( "w", w, constraint = RangeConstraint( w, 1000*w, FailAction.USE_DEFAULT))
specs( "l", l, constraint = RangeConstraint( l, 1000*l, FailAction.USE_DEFAULT))
specs( "fingers", 1),
parameters = (
("diffContactLeft", True ),
("diffContactLeftCov", 1.0 ),
("gateContactLeft", False ),
("gateContactLeftCov", 1.0 ),
("diffContactCenter", False ),
("diffContactCenterCov", 1.0 ),
("gateContactCenter", False ),
("gateContactCenterCov", 1.0 ),
("diffContactRight", True ),
("diffContactRightCov", 1.0 ),
("gateContactRight", False ),
("gateContactRightCov", 1.0 ),
)
rangeConstraint = RangeConstraint(0.0, 1.0, FailAction.USE_DEFAULT)
for parameter in parameters:
if isinstance( parameter[1], float):
specs( parameter[0], parameter[1], constraint=rangeConstraint)
else:
specs( parameter[0], parameter[1])
def setupParams( self, params):
"""Process PyCell parameters, prior to geometric construction.
Decisions about process rules and PyCell-specific behaviors
should be confined to this method.
Create most useful format for variables to be used in later
methods.
Arguments:
params - (ParamArray) PyCell parameters
"""
for key in params:
self.__dict__[key] = params[ key]
for key in (
"diffContactLeftCov",
"gateContactLeftCov",
"diffContactCenterCov",
"gateContactCenterCov",
"diffContactRightCov",
"gateContactRightCov" ):
# Contact coverage parameters are 0.0 - 1.0
self.__dict__[key] = min( max( self.__dict__[key], 0), 1.0)
# Convert to process layer names
if self.implant == "pimplant":
self.encLayers = [ self.tech.getLayer( "nwell")]
self.well = self.tech.getLayer( "nwell")
else:
self.encLayers = [ self.tech.getLayer( "pwell")]
self.well = self.tech.getLayer( "pwell")
self.poly = self.tech.getLayer( self.poly )
self.diffusion = self.tech.getLayer( self.diffusion )
self.implant = self.tech.getLayer( self.implant )
self.contact = self.tech.getLayer( self.contact )
self.metal1 = self.tech.getLayer( self.metal1 )
# Implant not an enclosing layer in our kit
# self.encLayers.append( self.implant)
self.instance = 0 # counter for instance names
# Get process design rule information
self.Endcap = self.tech.getPhysicalRule( "minEnclosure", self.poly, self.diffusion)
self.ContSpacePoly = self.tech.getPhysicalRule( "minSpacing", self.contact, self.poly)
self.DiffSpace = self.tech.getPhysicalRule( "minSpacing", self.diffusion)
self.GateSpace = self.tech.getPhysicalRule( "minSpacing", self.poly)
self.ContWidth = self.tech.getPhysicalRule( "minWidth", self.contact)
self.grid = self.tech.getGridResolution()
self.gridX2 = self.grid * 2.0
self.gridd2 = self.grid / 2.0
self.w = round(self.w / self.gridX2) * self.gridX2
self.l = round(self.l / self.gridX2) * self.gridX2
self.lDiv2 = self.l / 2.0
self.GatePitch = self.GateSpace + self.l
self.GatePitchDiv2 = (self.GateSpace + self.l) / 2.0
self.GateSpaceDiv2 = self.GateSpace / 2.0
self.ContGatePitch = self.ContSpacePoly + self.lDiv2 + (self.ContWidth / 2.0)
def genTopology( self):
"""Define topology (connectivity) for multi-device circuit PyCells.
"""
pass
def sizeDevices( self):
"""Define device sizes within multi-device circuit PyCells.
"""
pass
def createGate( self,
x=0,
y=0,
terminal=False):
"""Create the poly rectangle which represents the MOS
transistor gate.
Override this method to create custom gates.
Arguments:
x - (integer) x coordinate of gate center
y - (integer) y coordinate of lower diffusion edge
"""
left = x - self.lDiv2
right = x + self.lDiv2
# Create transistor gate
gateRect = Rect( self.poly,
Box( left, (y - self.Endcap),
right, (y + self.w + self.Endcap),
)
)
# Stretch handles for w & l
stretchHandle(
shape = gateRect,
name = ("stretch%d" % self.instance),
parameter = "w",
location = Location.UPPER_CENTER,
direction = Direction.NORTH_SOUTH,
display = ("w = %.2f" % self.w),
stretchType = "relative",
userScale = "1.0",
userSnap = "0.0025",
)
stretchHandle(
shape = gateRect,
name = ("stretch%d" % self.instance),
parameter = "l",
location = Location.CENTER_RIGHT,
direction = Direction.EAST_WEST,
display = ("l = %.2f" % self.l),
stretchType = "relative",
userScale = "1.0",
userSnap = "0.0025",
)
# Create weakly-connected pins
if terminal:
# Bottom gate pin
Pin(
"%sS%d" % (terminal, self.instance),
terminal,
Rect(
self.poly,
Box( left, (y - self.Endcap),
right, y,
)
)
)
# Top gate pin
Pin(
"%sN%d" % (terminal, self.instance),
terminal,
Rect(
self.poly,
Box( left, (y + self.w),
right, (y + self.w + self.Endcap),
)
)
)
self.instance += 1
return( gateRect)
def createGateCont( self,
gateRect=False,
coverage=1.0,
stretch=False,
terminal=False):
"""Create a gate contact by instantiating a poly contact PyCell.
Arguments:
gateRect - (PhysicalComponent) Gate rectangle for alignment.
coverage - (float ) Percentage of poly width to be covered
by contact
stretch - (string ) Name of stretch handle property for
gate contact
"""
gateCont = ViaInstance(
"pcont",
ParamArray(),
None,
"I%d" % self.instance,
)
self.place( gateCont, Direction.SOUTH, gateRect, 0)
width = self.l * coverage
gateCont.resize(
width = width,
via = self.contact,
metalLayer = self.poly,
)
# Create overlapping poly rectangle for stretch handle
polyRect = gateCont.promoteMetal( self.poly)
bbox = polyRect.getBBox()
width = max( width, bbox.getWidth()) / 2
center = bbox.getCenterX()
bbox.setLeft( center - width)
bbox.setRight( center + width)
polyRect.setBBox( bbox)
# Stretch handle for gate contact coverage
stretchHandle(
shape = polyRect,
name = ("stretch%d" % self.instance),
parameter = stretch,
location = Location.CENTER_RIGHT,
direction = Direction.EAST_WEST,
stretchType = "relative",
userScale = "1.0",
userSnap = "0.0025",
minVal = 0.0,
maxVal = 1.0,
)
# Create weakly-connected pins
if terminal:
Pin(
("%sC%d" % (terminal, self.instance)),
terminal,
Rect( self.poly, bbox)
)
self.instance += 1
return( gateCont)
def createSourceDrain( self,
diffusionType="full",
withContact=True,
x=0,
y=0,
coverage=1.0,
stretch=False,
terminal=False):
"""Create a source or drain diffusion.
Option to create diffusion contact instance.
Option to create matching diffusion terminal.
Option to create a stretch handle property.
Override this method to create custom contacts.
Arguments:
diffusionType - (string) "full", "left", "right"
withContact - (boolean) Create contact
x - (float ) x coordinate for center of contact
y - (float ) y coordinate for lower diffusion edge
coverage - (float ) Percentage of source/drain diffusion to
be covered by contact
stretch - (string ) Name of stretch handle property
"""
# Create source/drain contact
if withContact:
diffCont = ViaInstance(
"dcont",
ParamArray( origin="lowerCenter"),
None,
"I%d" % self.instance,
)
diffCont.setOrigin( Point(x, y-0.03))
height = self.w * coverage
diffCont.resize(
height = height,
via = self.contact,
metalLayer = self.diffusion,
)
# Create overlapping diffusion rectangle for stretch handle
diffRect = diffCont.promoteMetal( self.diffusion)
bbox = diffRect.getBBox()
height = max( height, bbox.getHeight())
bbox.setTop( bbox.getBottom() + height)
diffRect.setBBox( bbox)
# Stretch handle for diffusion contact coverage
stretchHandle(
shape = diffRect,
name = ("stretch%d" % self.instance),
parameter = stretch,
location = Location.UPPER_CENTER,
direction = Direction.NORTH_SOUTH,
stretchType = "relative",
userScale = "1.0",
userSnap = "0.0025",
minVal = 0.0,
maxVal = 1.0,
)
self.instance += 1
# Create source/drain diffusion
if withContact:
bbox = Box(
bbox.getLeft(), y,
bbox.getRight(), (y + self.w),
)
else:
if (diffusionType == "left"):
bbox = Box(
x, y,
(x + self.GateSpaceDiv2), (y + self.w),
)
elif (diffusionType == "right"):
bbox = Box(
(x - self.GateSpaceDiv2), y,
x, (y + self.w),
)
elif (diffusionType == "full"):
bbox = Box(
(x - self.GateSpaceDiv2), y,
(x + self.GateSpaceDiv2), (y + self.w),
)
else:
raise ValueError, "Unknown: diffusionType=%s" % diffusionType
if terminal:
p0 = Pin(
terminal,
terminal,
Rect( self.diffusion, bbox)
)
pinRect = p0.getShapes()[0]
autoAbutment(
pinRect,
self.w,
[ Direction.WEST],
"cniMOS",
abut2PinEqual = [ { "spacing":0.0}, { "diffLeftStyle":"DiffHalf" }, { "diffLeftStyle":"DiffHalf" } ],
abut2PinBigger = [ { "spacing":0.0}, { "diffLeftStyle":"DiffEdgeAbut" }, { "diffLeftStyle":"DiffEdgeAbut" } ],
abut3PinBigger = [ { "spacing":0.0}, { "diffLeftStyle":"ContactEdgeAbut2"}, { "diffLeftStyle":"ContactEdgeAbut2"} ],
abut3PinEqual = [ { "spacing":0.0}, { "diffLeftStyle":"DiffAbut" }, { "diffLeftStyle":"ContactEdgeAbut2"} ],
abut2PinSmaller = [ { "spacing":0.0}, { "diffLeftStyle":"DiffEdgeAbut" }, { "diffLeftStyle":"DiffEdgeAbut" } ],
abut3PinSmaller = [ { "spacing":0.0}, { "diffLeftStyle":"DiffEdgeAbut" }, { "diffLeftStyle":"DiffEdgeAbut" } ],
noAbut = [ { "spacing":0.4}],
function = "cniAbut",
#shape = pinRect,
#abutDirection = diffusionType,
#abutClass = "cniMOS",
#abutFunction = "cniAbut",
#spacingRule = self.DiffSpace,
)
else:
pinRect = Rect( self.diffusion, bbox)
return( pinRect)
def genLayout( self):
"""Main body of geometric construction code. Create the
leftmost contact and transistor gate. Loop to create center
contacts and gates. Create the rightmost gate and contact.
Avoid modifying or overriding this method. PyCell-specific
behaviors and calculations should be kept out of this method.
"""
# obj is used to track the rightmost object, to calculate
# the diffusion coordinates.
# dbox is the bounding box of the underlying diffusion.
dbox = Dictionary()
dbox.bottom = 0
dbox.top = self.w
origin = Dictionary()
xCoord = 0
origin.y = 0
objectPitch = {
True:self.ContGatePitch,
False:self.GatePitchDiv2,
}
# Mark PyCell as containing stretch handles
self.props["cniStretch"] = "CiraNova"
# For integration with layout editors, save parameter
# settings in the submaster. They are not saved on the
# instance in the default case.
# For auto-abutment
self.props["diffContactLeft"] = self.diffContactLeft
self.props["diffContactRight"] = self.diffContactRight
# For stretch handles
self.props["w"] = self.w
self.props["l"] = self.l
# Create electrical terminals needed for pins
Term("G", TermType.INPUT)
Term("S", TermType.INPUT_OUTPUT)
Term("D", TermType.INPUT_OUTPUT)
# Create leftmost diffusion contact
obj = self.createSourceDrain(
diffusionType = "left",
withContact = self.diffContactLeft,
coverage = self.diffContactLeftCov,
stretch = "diffContactLeftCov",
terminal = "S",
x = xCoord,
)
dbox.left = obj.getBBox( self.diffusion).getLeft()
# Create leftmost gate w/optional gate contact
xCoord += objectPitch[self.diffContactLeft]# + 0.0025
obj = self.createGate( x=xCoord, terminal="G")
origin.x = obj.getBBox().left
if self.gateContactLeft:
self.createGateCont(
gateRect = obj,
coverage = self.gateContactLeftCov,
stretch = "gateContactLeftCov",
terminal = "G",
)
# Loop to create center gates and contacts
for i in range( self.fingers - 2):
# Create diffusion contact on left of gate
xCoord += objectPitch[self.diffContactCenter] + 0.0025
self.createSourceDrain(
diffusionType = "full",
withContact = self.diffContactCenter,
coverage = self.diffContactCenterCov,
stretch = "diffContactCenterCov",
x = xCoord,
)
# Create gate w/optional gate contact
if self.diffContactCenter:
xCoord += objectPitch[self.diffContactCenter] + 0.0025
else:
xCoord += objectPitch[self.diffContactCenter] - 0.0025
obj = self.createGate( x=xCoord, terminal="G")
if self.gateContactCenter:
self.createGateCont(
gateRect = obj,
coverage = self.gateContactCenterCov,
stretch = "gateContactCenterCov",
terminal = "G",
)
# Create rightmost gate w/optional gate contact
if self.fingers > 1:
if self.diffContactCenter:
xCoord += objectPitch[self.diffContactCenter] + 0.0025
else:
xCoord += objectPitch[self.diffContactCenter] - 0.0025
self.createSourceDrain(
diffusionType = "full",
withContact = self.diffContactCenter,
coverage = self.diffContactCenterCov,
stretch = "diffContactCenterCov",
x = xCoord,
)
xCoord += objectPitch[self.diffContactCenter] + 0.0025
obj = self.createGate( x=xCoord, terminal="G")
if self.gateContactRight:
self.createGateCont(
gateRect = obj,
coverage = self.gateContactRightCov,
stretch = "gateContactRightCov",
terminal = "G",
)
# Create rightmost diffusion contact
xCoord += objectPitch[self.diffContactRight]# + 0.0025
obj = self.createSourceDrain(
diffusionType = "right",
withContact = self.diffContactRight,
coverage = self.diffContactRightCov,
stretch = "diffContactRightCov",
x = xCoord,
terminal = "D",
)
dbox.right = obj.getBBox(self.diffusion).getRight()
# Create overall diffusion box
Rect(
self.diffusion,
Box( dbox.left, dbox.bottom, dbox.right, dbox.top)
)
# Create implant box, to overlap diffusion rather than whole cell
Rect(
self.implant,
Box( dbox.left, dbox.bottom, dbox.right, dbox.top)
)
Rect(
self.well,
Box( dbox.left - 0.055, dbox.bottom - 0.055, dbox.right + 0.055, dbox.top + 0.055 )
)
# Create other outline layers
all = Grouping( "all", self.getComps())
# all.add( self.fgAddEnclosingRects( all, self.encLayers)) This wasn't working, replaced with above rectangles
# Setting the origin is important.
# Avoid shifting of instance locations during auto-abutment.
# Correctly track mouse motion during stretching.
all.moveBy( -origin.x, -origin.y)
@classmethod
def unitTest( cls, paramsMaker, lib, cell, view, ignoreError=True):
"""Test single instance or specific method of the PyCell.
"""
# Note: Pass in paramMaker so parameters are constructed in
# the correct tech context (within the current DloGen).
def unitTestMethod( self):
"""Define how to build the unit test.
"""
# Get default parameters from specs, then update
# with explicitly supplied specs for unitTest.
specs = ParamSpecArray()
self.defineParamSpecs( specs)
params = ParamArray( specs)
params.update( paramsMaker())
print
print( "Creating design: %s" % repr(self))
print( " using technology: %s" % self.tech.id())
print( " by %s.generate(%r)" % (self.__class__.__name__, params))
specs.verify( params)
self.generate( params)
self.save()
try:
cls.withNewDlo( unitTestMethod, lib, cell, view)
except:
if ignoreError:
# Error messages go to debug log
print
print( "Exception caught.")
traceback.print_exc()
else:
raise
class Nmos_vtl( MosfetTemplate):
"""Define Nmos class to implement NMOS MOS transistors.
"""
implant = "nimplant"
class Pmos_vtl( MosfetTemplate):
"""Define Nmos class to implement PMOS MOS transistors.
"""
implant = "pimplant"
########################################################################
#
# End
#
########################################################################
###############################################################################
#
# Define self-tests
#
###############################################################################
if __name__ == "__main__":
def smalltest( self):
"""Create layout instances for quick development debugging.
"""
i = 0
x = 0
y = 0
param = ParamArray(
w = 0.6,
l = 0.18,
fingers = 1,
diffContactLeft = True,
diffContactLeftCov = 0.7,
gateContactLeft = False,
gateContactLeftCov = 0.7,
diffContactCenter = False,
diffContactCenterCov = 0.5,
gateContactCenter = False,
gateContactCenterCov = 0.5,
diffContactRight = False,
diffContactRightCov = 1.0,
gateContactRight = True,
gateContactRightCov = 1.0,
)
for master in [ "nmos_vtl", "pmos_vtl"]:
inst = Instance(("%s" % master), param, None, ("I%d" % i))
inst.setOrigin( Point( x,y))
i += 1
if (i % 4):
x += 10
else:
x = 0
y += 10
param = ParamArray(
w = 2.0,
l = 1.5,
fingers = 1,
diffContactLeft = True,
diffContactLeftCov = 0.3,
gateContactLeft = True,
gateContactLeftCov = 0.3,
diffContactCenter = True,
diffContactCenterCov = 0.5,
gateContactCenter = True,
gateContactCenterCov = 0.5,
diffContactRight = True,
diffContactRightCov = 0.7,
gateContactRight = True,
gateContactRightCov = 0.7,
)
for master in [ "nmos_vtl", "pmos_vtl"]:
inst = Instance(("%s" % master), param, None, ("I%d" % i))
inst.setOrigin( Point( x,y))
i += 1
if (i % 4):
x += 10
else:
x = 0
y += 10
param = ParamArray(
w = 2.0,
l = 1.5,
fingers = 2,
diffContactLeft = True,
diffContactLeftCov = 0.3,
gateContactLeft = True,
gateContactLeftCov = 0.3,
diffContactCenter = True,
diffContactCenterCov = 0.5,
gateContactCenter = True,
gateContactCenterCov = 0.5,
diffContactRight = True,
diffContactRightCov = 1.0,
gateContactRight = True,
gateContactRightCov = 1.0,
)
for master in [ "nmos_vtl", "pmos_vtl"]:
inst = Instance(("%s" % master), param, None, ("I%d" % i))
inst.setOrigin( Point( x,y))
i += 1
if (i % 4):
x += 10
else:
x = 0
y += 10
param = ParamArray(
w = 2.0,
l = 1.5,
fingers = 2,
diffContactLeft = False,
diffContactLeftCov = 1.0,
gateContactLeft = True,
gateContactLeftCov = 1.0,
diffContactCenter = False,
diffContactCenterCov = 0.5,
gateContactCenter = True,
gateContactCenterCov = 0.6,
diffContactRight = True,
diffContactRightCov = 0.4,
gateContactRight = False,
gateContactRightCov = 0.4,
)
for master in [ "nmos_vtl", "pmos_vtl"]:
inst = Instance(("%s" % master), param, None, ("I%d" % i))
inst.setOrigin( Point( x,y))
i += 1
if (i % 4):
x += 10
else:
x = 0
y += 20
self.save()
def bigtest( self):
"""Create layout instances for comprehensive testing, such as DRC or
regression testing.
"""
i = 0
x = 0
y = 0
for w in [ 0.09, 2.0]:
for l in [ 0.05, 1.0]:
for fingers in [ 1, 2]:
for diffContactLeftCov in [ 0.0, 0.33, 1.0]:
for gateContactLeftCov in [ 0.0, 0.33, 1.0]:
for diffContactCenterCov in [ 0.0, 0.33, 1.0]:
for gateContactCenterCov in [ 0.0, 0.33, 1.0]:
for diffContactRightCov in [ 0.0, 0.33, 1.0]:
for gateContactRightCov in [ 0.0, 0.33, 1.0]:
param = ParamArray(
w = w,
l = l,
fingers = fingers,
diffContactLeft = (not diffContactLeftCov),
diffContactLeftCov = diffContactLeftCov,
gateContactLeft = (not gateContactLeftCov),
gateContactLeftCov = gateContactLeftCov,
diffContactCenter = (not diffContactCenterCov),
diffContactCenterCov = diffContactCenterCov,
gateContactCenter = (not gateContactCenterCov),
gateContactCenterCov = gateContactCenterCov,
diffContactRight = (not diffContactRightCov),
diffContactRightCov = diffContactRightCov,
gateContactRight = (not gateContactRightCov),
gateContactRightCov = gateContactRightCov,
)
for master in [ "nmos_vtl", "pmos_vtl"]:
inst = Instance(("%s" % master), param, None, ("I%d" % i))
inst.setOrigin( Point( x,y))
i += 1
if (i % 100):
x += 20
else:
x = 0
y += 20
print("Total number of instances created: %d" % i)
self.save()
# TEST is defined externally from this file.
# For building the test cases, invoke like this:
# cnpy -c "TEST='SMALL';execfile('Mosfet.py')"
if "TEST" in vars():
if vars()["TEST"] == "SMALL":
MosfetTemplate.unitTest(lambda: ParamArray(), "MyPyCellLib", "UNITTEST_Mosfet", "layout")
DloGen.withNewDlo( smalltest, "MyPyCellLib", "SMALLTEST_Mosfet", "layout")
elif vars()["TEST"] == "BIG":
DloGen.withNewDlo( bigtest, "MyPyCellLib", "BIGTEST_Mosfet", "layout")
else:
DloGen.withNewDlo( smalltest, "MyPyCellLib", "SMALLTEST_Mosfet", "layout")
# end
|
|
# -*- coding: utf-8 -*-
import os, inspect
from compiler.error import AssemblerError, ParseError, ExtensionError
from compiler.executor import Executor
from compiler.tokenizer import Tokenizer, Token
from compiler.token import TokenType, SYMBOLS, KEYWORDS
from compiler.assembler import Assembler
from compiler.expression import Stack, Expression, ExpressionSolver
from compiler.instruction import Instruction, AsmExpressionContainer, JumpFlag
from compiler.memory import Memory
from compiler.utils import Utils
class ScriptCompiler(Executor):
def __init__(self, *, testing=False):
self.testing = testing
self.debug = False
self.mem = Memory()
self.jump_table = {}
self.solver = ExpressionSolver()
def compile_from_file(self, filename, *, debug=False):
path = os.path.abspath(filename)
ext = os.path.splitext(path)[1]
if ext != ".script":
raise ExtensionError("Unknown extension: \'{0}\'".format(ext))
with open(path, "r") as f:
return self.compile(f.read(), debug=debug)
def compile(self, string, *, debug=False):
self.debug = debug
# Read file contents and interpret it
t = Tokenizer()
t.load(string)
self.tokens = t.tokenize()
print("\nTokens:")
for t in self.tokens: print(" {0}\t\t{1}".format(str(t.value), str(t.token)))
(exprs, asm) = self._parse(self.tokens)
a = Assembler(mem_size=100, testing=self.testing)
output = a.load(asm)
return output
def _print_expr_tree(self, exprs, prefix=""):
if len(exprs) == 0: return
idx = 0
curr = exprs[idx]
while curr != None:
print("{0}{1}".format(prefix, curr))
if len(curr.expressions) != 0:
self._print_expr_tree(curr.expressions, prefix + "\t")
if idx + 1 < len(exprs):
idx += 1
curr = exprs[idx]
else:
curr = None
def _parse_expr_recursive(self, tokens):
exprs = []
temp = []
expr_stack = Stack()
block_expr = None
in_block = False
level = 0
idx = 0
while idx < len(tokens):
# increment
t = tokens[idx]
idx += 1
# start parsing tokens
if t.token == TokenType.FuncStart: # {
# discard token
# increment level
level += 1
# init an expression on the stack
e = Expression(temp)
expr_stack.push(e)
temp = []
# set inblock to true
if not in_block: in_block = True
else: pass # already in a block
elif t.token == TokenType.FuncEnd: # }
# discard token
# increment level
level -= 1
if level > 0:
curr = expr_stack.pop()
prev = expr_stack.pop()
prev.expressions.append(curr)
expr_stack.push(prev)
elif level == 0:
in_block = False
curr = expr_stack.pop()
# we're now at the lowest level and there is no
# other block on the stack (...shouldn't be atleast).
exprs.append(curr)
else:
pass # error?
elif t.token == TokenType.SemiColon:
# discard token
# now turn temp list into an expression
e = Expression(temp)
temp = []
if in_block:
curr = expr_stack.pop()
curr.expressions.append(e)
expr_stack.push(curr)
else:
exprs.append(e)
else: # just add the token to the temp list
temp.append(t)
self._print_expr_tree(exprs) # debug
return exprs
def _handle_assignment(self, ex):
"""
if the identifier does not exist, create a reference,
solve the expression with the 'result_var' set to this identifier.
if the identifier exists, create a temp reference to add the
expression result into, then add the instructions to move the temp
result variable into the reference.
"""
identifier = str(ex.tokens[0].value)
# skip the identifier and the '=' char
relevant_tokens = ex.tokens[2:]
asm = AsmExpressionContainer(ex)
# reference does not exist
if not self.mem.has_reference(identifier):
if len(relevant_tokens) == 1 and relevant_tokens[0].value.isdigit():
# one token that is an int value
self.mem.add_reference(identifier, relevant_tokens[0].value)
elif len(relevant_tokens) == 1 and self.mem.has_reference(relevant_tokens[0].value):
# one token that is an identifier
self.mem.add_reference(identifier, self.mem.get_reference(relevant_tokens[0].value))
else:
# several tokens, let's solve it
self.mem.add_reference(identifier)
instructions = self.solver.gen_runtime_expression(relevant_tokens,
self.mem, result_var=identifier)
asm.merge(instructions)
# reference exists
else:
temp = Memory.gen_temp_name()
#self.mem.add_reference(temp)
if len(relevant_tokens) == 1 and relevant_tokens[0].value.isdigit():
# one token that is an int value
self.mem.add_reference(temp, relevant_tokens[0].value)
elif len(relevant_tokens) == 1 and self.mem.has_reference(relevant_tokens[0].value):
# one token that is an identifier
self.mem.add_reference(temp, self.mem.get_reference(relevant_tokens[0].value))
else:
# several tokens, let's solve it
self.mem.add_reference(temp)
instructions = self.solver.gen_runtime_expression(relevant_tokens,
self.mem, result_var=temp)
asm.merge(instructions)
# the 'temp' variabel may be loaded in the
# AC, but just to be sure we do it again.
asm.add(Instruction("LDA", variable=temp, comment="variable 're-assignment'"))
asm.add(Instruction("STA", variable=identifier))
return asm
def _handle_if(self, ex):
# skip the identifier and the '=' char
relevant_tokens = ex.tokens[2:len(ex.tokens)-1]
asm = AsmExpressionContainer(ex)
result_var = ""
if len(relevant_tokens) == 1 and relevant_tokens[0].token == TokenType.Identifier \
and not relevant_tokens[0].value.isdigit():
# single token with a value, should be dynamic
#print("IT'S AN IDENTIFIER")
var_name = str(relevant_tokens[0].value)
result_var = var_name
#self.mem.add_reference(temp, self.mem.get_reference(relevant_tokens[0].value))
else:
temp = Memory.gen_temp_name()
#val = int(self.solver.solve_expr(ex.tokens[2:len(ex.tokens)-1], self.mem, None))
#ex.value = val
#var_name = add_mem_ref(val)
if len(relevant_tokens) == 1 and relevant_tokens[0].value.isdigit():
# one token that is an int value
self.mem.add_reference(temp, relevant_tokens[0].value)
elif len(relevant_tokens) == 1 and self.mem.has_reference(relevant_tokens[0].value):
# one token that is an identifier
#self.mem.add_reference(temp, self.mem.get_reference(relevant_tokens[0].value))
temp = relevant_tokens[0].value
else:
# several tokens, let's solve it
self.mem.add_reference(temp)
instructions = self.solver.gen_runtime_expression(relevant_tokens,
self.mem, result_var=temp)
asm.merge(instructions)
result_var = temp
asm.load(result_var)
#print("a.load(var_name); == " + var_name)
jp_name = Memory.gen_jump_name()
#asm.load(temp)
asm.add(Instruction("BRZ", jump=jp_name, comment="jump if zero"))
for e in ex.expressions:
ae = self._handle_expr(e)
if ae is not None:
asm.asm_expressions.append(ae)
for aa in asm.asm_expressions:
instrs = aa.get_instructions()
for i in instrs:
asm.add(i)
asm.add(JumpFlag(jp_name))
return asm
def _handle_func_call(self, ex):
# TODO: function lookup table with arument count and such
# cause right now all we have is "print" and "read"
identifier = str(ex.tokens[2].value)
a = AsmExpressionContainer(ex)
name = str(ex.tokens[0].value)
if name == "print":
# identifier is a constant
# so we just print it
if identifier.isdigit():
temp = Memory.gen_temp_name()
self.mem.add_reference(temp, identifier)
a.load(temp)
a.do_print()
else:
a.load(identifier)
a.do_print()
elif name == "read":
a.do_read()
if self.mem.has_reference(identifier):
temp = Memory.gen_temp_name()
self.mem.add_reference(temp)
a.add(Instruction("STA", variable=temp, comment="store input"))
a.add(Instruction("LDA", variable=temp, comment="variable 're-assignment'"))
a.add(Instruction("STA", variable=identifier))
else:
print("im so done with this shit")
return a
def _handle_expr(self, ex):
"""
evaluate an expression and generate assembly for it
"""
# returns true or false
def expr_matches(expr, tokens):
if len(expr.tokens) < len(tokens): return False
for idx, val in enumerate(tokens):
if str(val) != str(expr.tokens[idx].token):
return False
return True
match_assignment = lambda x: expr_matches(x, [TokenType.Identifier, TokenType.Equals])
match_condition = lambda x: expr_matches(x, [TokenType.Conditional, TokenType.LParen])
match_func = lambda x: expr_matches(x, [TokenType.Function, TokenType.LParen])
# VARIABLE ASSIGMENT
if match_assignment(ex):
asm = self._handle_assignment(ex)
return asm
elif match_condition(ex): # IF STATEMENT
asm = self._handle_if(ex)
return asm
elif match_func(ex):
asm = self._handle_func_call(ex)
return asm
return None
def _bind_jumps(self, instructions):
def find_jump(instructions, alias):
for idx, instr in enumerate(instructions):
if instr.is_jump_endpoint:
for j in instr.jumps:
if alias == j.alias:
return (idx, instr)
return None
for inst in instructions:
if inst.invalidate_jump_bindings:
need = inst.jump
(line_idx, jump_inst) = find_jump(instructions, need)
if line_idx is None:
print("Error: What the f-...this shouldnt happen...")
inst.set_adr(line_idx)
return instructions
def _merge_jumps(self, instructions):
copy = [i for i in instructions]
skip = 0
for idx, inst in enumerate(copy):
jumps = []
inc = 1
if skip != 0:
skip -= 1
continue
if isinstance(inst, JumpFlag):
# with the way we create the instructions,
# there will always be another Instruction
# after a jump command.
jumps.append(inst)
nxt = copy[idx + inc]
while isinstance(nxt, JumpFlag):
jumps.append(nxt)
inc += 1
skip += 1
nxt = copy[idx + inc]
# next is now an Instruction (hopefully)
if not isinstance(nxt, Instruction):
print("Error: Instance was not an Instruction")
for jp in jumps:
nxt.add_jump(jp)
# Delete all the JumpFlags from the copy list
has_jumps = lambda lst: any([True for l in lst if isinstance(l, JumpFlag)])
while has_jumps(copy):
for idx, j in enumerate(copy):
if isinstance(j, JumpFlag):
del copy[idx]
continue
return copy
def _parse(self, tokens):
exprs = self._parse_expr_recursive(tokens)
asm_list = [] # AsmExpression
for ex in exprs:
asm_expr = self._handle_expr(ex)
if Utils.check_none_critical(asm_expr):
Utils.debug("Compiler Error!: 'asm_expr' cannot be None.")
asm_list.append(asm_expr)
g = []
mem_asm = self.mem.gen_asm()
g.extend(mem_asm)
# get the rest of the instructions
for expr in asm_list:
g.extend(expr.get_instructions())
g.append(Instruction("HLT", comment="exit"))
print("\nDebug preview:\n")
for idx, gg in enumerate(g):
print(str(idx) + ": " + str(gg))
instructions = self._merge_jumps(g)
instructions = self.mem.bind_mem(instructions)
if instructions is None:
print("Critical Error!: Memory bindings.")
return None
instructions = self._bind_jumps(instructions)
if Utils.check_none_critical(instructions):
print("Critical Error!: Jump bindings.")
return None
assembly = "\n".join([a.asm() for a in instructions])
print("\nCompiled:\n")
for idx, gg in enumerate(instructions):
print(str(idx) + ": " + str(gg))
return [], assembly
|
|
# Copyright 2012-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy action implemenations"""
import sys
from cliff import command
from cliff import lister
from cliff import show
from keystoneauth1 import exceptions
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
import yaml
from congressclient.common import utils
def _format_rule(rule):
"""Break up rule string so it fits on screen."""
rule_split = jsonutils.dumps(rule).split(":-")
formatted_string = rule_split[0] + ":-\n"
for rule in rule_split[1].split("), "):
formatted_string += rule + '\n'
return formatted_string
def get_rule_id_from_name(client, parsed_args):
results = client.list_policy_rules(parsed_args.policy_name)['results']
rule_id = None
for result in results:
if result.get('name') == parsed_args.rule_id:
if rule_id is None:
rule_id = result.get('id')
else:
raise exceptions.Conflict(
"[Multiple rules with same name: %s]" %
parsed_args.rule_id)
if rule_id is None:
raise exceptions.NotFound(
"[No rule found with name: %s]" % parsed_args.rule_id)
return rule_id
class CreatePolicyRule(show.ShowOne):
"""Create a policy rule."""
log = logging.getLogger(__name__ + '.CreatePolicyRule')
def get_parser(self, prog_name):
parser = super(CreatePolicyRule, self).get_parser(prog_name)
parser.add_argument(
'policy_name',
metavar="<policy-name>",
help="Name or identifier of the policy")
parser.add_argument(
'rule',
metavar="<rule>",
help="Policy rule")
parser.add_argument(
'--name', dest="rule_name",
help="Name of the policy rule")
parser.add_argument(
'--comment', dest="comment",
help="Comment about policy rule")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
# set default max-width
if parsed_args.max_width == 0:
parsed_args.max_width = 80
client = self.app.client_manager.congressclient
body = {'rule': parsed_args.rule}
if parsed_args.rule_name:
body['name'] = parsed_args.rule_name
if parsed_args.comment:
body['comment'] = parsed_args.comment
data = client.create_policy_rule(parsed_args.policy_name, body)
return zip(*sorted(six.iteritems(data)))
class DeletePolicyRule(command.Command):
"""Delete a policy rule."""
log = logging.getLogger(__name__ + '.DeletePolicyRule')
def get_parser(self, prog_name):
parser = super(DeletePolicyRule, self).get_parser(prog_name)
parser.add_argument(
'policy_name',
metavar="<policy-name>",
help="Name of the policy to delete")
parser.add_argument(
'rule_id',
metavar="<rule-id/rule-name>",
help="ID/Name of the policy rule to delete")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
client = self.app.client_manager.congressclient
results = client.list_policy_rules(parsed_args.policy_name)
rule_id = utils.get_resource_id_from_name(
parsed_args.rule_id, results)
client.delete_policy_rule(parsed_args.policy_name, rule_id)
class ListPolicyRules(command.Command):
"""List policy rules."""
log = logging.getLogger(__name__ + '.ListPolicyRules')
def get_parser(self, prog_name):
parser = super(ListPolicyRules, self).get_parser(prog_name)
parser.add_argument(
'policy_name',
metavar="<policy-name>",
help="Name of the policy")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
client = self.app.client_manager.congressclient
results = client.list_policy_rules(parsed_args.policy_name)['results']
for result in results:
print("// ID: %s" % str(result['id']))
print("// Name: %s" % str(result.get('name')))
if result['comment'] != "None" and result['comment']:
print("// %s" % str(result['comment']))
print(result['rule'])
print('')
return 0
class SimulatePolicy(command.Command):
"""Show the result of simulation."""
log = logging.getLogger(__name__ + '.SimulatePolicy')
def get_parser(self, prog_name):
parser = super(SimulatePolicy, self).get_parser(prog_name)
parser.add_argument(
'policy',
metavar="<policy>",
help="Name of the policy")
parser.add_argument(
'query',
metavar="<query>",
help="String representing query (policy rule or literal)")
parser.add_argument(
'sequence',
metavar="<sequence>",
help="String representing sequence of updates/actions")
parser.add_argument(
'action_policy',
metavar="<action_policy>",
help="Name of the policy with actions",
default=None)
parser.add_argument(
'--delta',
action='store_true',
default=False,
help="Return difference in query caused by update sequence")
parser.add_argument(
'--trace',
action='store_true',
default=False,
help="Include trace describing computation")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
client = self.app.client_manager.congressclient
args = {}
args['query'] = parsed_args.query
args['sequence'] = parsed_args.sequence
if parsed_args.action_policy is not None:
args['action_policy'] = parsed_args.action_policy
if parsed_args.delta:
args['delta'] = parsed_args.delta
if parsed_args.trace:
args['trace'] = parsed_args.trace
body = {'query': parsed_args.query,
'sequence': parsed_args.sequence,
'action_policy': parsed_args.action_policy}
results = client.execute_policy_action(
policy_name=parsed_args.policy,
action="simulate",
trace=parsed_args.trace,
delta=parsed_args.delta,
body=body)
for result in results['result']:
print(result)
if 'trace' in results:
print(results['trace'])
return 0
class ListPolicyTables(lister.Lister):
"""List policy tables."""
log = logging.getLogger(__name__ + '.ListPolicyTables')
def get_parser(self, prog_name):
parser = super(ListPolicyTables, self).get_parser(prog_name)
parser.add_argument(
'policy_name',
metavar="<policy-name>",
help="Name of the policy")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
# set default max-width
if parsed_args.max_width == 0:
parsed_args.max_width = 80
client = self.app.client_manager.congressclient
data = client.list_policy_tables(parsed_args.policy_name)['results']
columns = ['id']
formatters = {'PolicyTables': utils.format_list}
return (columns,
(utils.get_dict_properties(s, columns,
formatters=formatters)
for s in data))
class ListPolicy(lister.Lister):
"""List Policy."""
log = logging.getLogger(__name__ + '.ListPolicy')
def get_parser(self, prog_name):
parser = super(ListPolicy, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
# set default max-width
if parsed_args.max_width == 0:
parsed_args.max_width = 80
client = self.app.client_manager.congressclient
data = client.list_policy()['results']
columns = ['id', 'name', 'owner_id', 'kind', 'description']
formatters = {'Policies': utils.format_list}
return (columns,
(utils.get_dict_properties(s, columns,
formatters=formatters)
for s in data))
class CreatePolicy(show.ShowOne):
"""Create a policy."""
log = logging.getLogger(__name__ + '.CreatePolicy')
def get_parser(self, prog_name):
parser = super(CreatePolicy, self).get_parser(prog_name)
parser.add_argument(
'policy_name',
metavar="<policy_name>",
help="Name of the policy")
parser.add_argument(
'--description',
metavar="<description>",
help="Policy description")
parser.add_argument(
'--abbreviation',
metavar="<abbreviation>",
help="Policy abbreviation (used in traces). The length of the "
"string must be equal to or less than 5 characters. Defaults "
"to the first five characters of policy_name if not set.")
parser.add_argument(
'--kind',
metavar="<kind>",
choices=['nonrecursive', 'database', 'action', 'materialized',
'z3'],
help="Kind of policy: "
"{nonrecursive, database, action, materialized, z3}")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
# set default max-width
if parsed_args.max_width == 0:
parsed_args.max_width = 80
client = self.app.client_manager.congressclient
body = {'name': parsed_args.policy_name,
'description': parsed_args.description,
'abbreviation': parsed_args.abbreviation,
'kind': parsed_args.kind}
data = client.create_policy(body)
return zip(*sorted(six.iteritems(data)))
class CreatePolicyFromFile(show.ShowOne):
"""Create a policy."""
log = logging.getLogger(__name__ + '.CreatePolicy')
def get_parser(self, prog_name):
parser = super(CreatePolicyFromFile, self).get_parser(prog_name)
parser.add_argument(
'policy_file_path',
metavar="<policy_file_path>",
help="Path to policy file")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
# set default max-width
if parsed_args.max_width == 0:
parsed_args.max_width = 80
client = self.app.client_manager.congressclient
with open(parsed_args.policy_file_path, "r") as stream:
policies = yaml.load_all(stream)
try:
body = next(policies)
except StopIteration:
raise Exception('No policy found in file.')
try:
body = next(policies)
raise Exception(
'More than one policy found in file. None imported.')
except StopIteration:
pass
data = client.create_policy(body)
def rule_dict_to_string(rules):
rule_str_list = [rule['rule'] for rule in rules]
return "\n".join(rule_str_list)
data['rules'] = rule_dict_to_string(data['rules'])
return zip(*sorted(six.iteritems(data)))
class DeletePolicy(command.Command):
"""Delete a policy."""
log = logging.getLogger(__name__ + '.DeletePolicy')
def get_parser(self, prog_name):
parser = super(DeletePolicy, self).get_parser(prog_name)
parser.add_argument(
'policy',
metavar="<policy>",
help="ID or name of the policy to delete")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
client = self.app.client_manager.congressclient
client.delete_policy(parsed_args.policy)
class ListPolicyRows(lister.Lister):
"""List policy rows."""
log = logging.getLogger(__name__ + '.ListPolicyRows')
def get_parser(self, prog_name):
parser = super(ListPolicyRows, self).get_parser(prog_name)
parser.add_argument(
'policy_name',
metavar="<policy-name>",
help="Name of the policy to show")
parser.add_argument(
'table',
metavar="<table>",
help="Table to get the policy rows from")
parser.add_argument(
'--trace',
action='store_true',
default=False,
help="Display explanation of result")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
# set default max-width
if parsed_args.max_width == 0:
parsed_args.max_width = 80
client = self.app.client_manager.congressclient
answer = client.list_policy_rows(parsed_args.policy_name,
parsed_args.table,
parsed_args.trace)
if 'trace' in answer:
sys.stdout.write(answer['trace'] + '\n')
results = answer['results']
columns = []
if results:
columns = ['Col%s' % (i)
for i in range(0, len(results[0]['data']))]
self.log.debug("Columns: " + str(columns))
return (columns, (x['data'] for x in results))
class ShowPolicyRule(show.ShowOne):
"""Show a policy rule."""
log = logging.getLogger(__name__ + '.ShowPolicyRule')
def get_parser(self, prog_name):
parser = super(ShowPolicyRule, self).get_parser(prog_name)
parser.add_argument(
'policy_name',
metavar="<policy-name>",
help="Name or identifier of the policy")
parser.add_argument(
'rule_id',
metavar="<rule-id/rule-name>",
help="Policy rule id or rule name")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
# set default max-width
if parsed_args.max_width == 0:
parsed_args.max_width = 80
client = self.app.client_manager.congressclient
results = client.list_policy_rules(parsed_args.policy_name)
rule_id = utils.get_resource_id_from_name(
parsed_args.rule_id, results)
data = client.show_policy_rule(parsed_args.policy_name, rule_id)
return zip(*sorted(six.iteritems(data)))
class ShowPolicyTable(show.ShowOne):
"""Show policy table properties."""
log = logging.getLogger(__name__ + '.ShowPolicyTable')
def get_parser(self, prog_name):
parser = super(ShowPolicyTable, self).get_parser(prog_name)
parser.add_argument(
'policy_name',
metavar='<policy-name>',
help="Name of policy")
parser.add_argument(
'table_id',
metavar='<table-id>',
help="Table id")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
# set default max-width
if parsed_args.max_width == 0:
parsed_args.max_width = 80
client = self.app.client_manager.congressclient
data = client.show_policy_table(parsed_args.policy_name,
parsed_args.table_id)
return zip(*sorted(six.iteritems(data)))
class ShowPolicy(show.ShowOne):
"""Show policy properties."""
log = logging.getLogger(__name__ + '.ShowPolicy')
def get_parser(self, prog_name):
parser = super(ShowPolicy, self).get_parser(prog_name)
parser.add_argument(
'policy_name',
metavar='<policy-name>',
help="Name of policy")
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
# set default max-width
if parsed_args.max_width == 0:
parsed_args.max_width = 80
client = self.app.client_manager.congressclient
results = client.list_policy()
policy_id = utils.get_resource_id_from_name(
parsed_args.policy_name, results)
data = client.show_policy(policy_id)
return zip(*sorted(six.iteritems(data)))
|
|
"""The test for the bayesian sensor platform."""
import unittest
from homeassistant.setup import setup_component
from homeassistant.components.bayesian import binary_sensor as bayesian
from tests.common import get_test_home_assistant
class TestBayesianBinarySensor(unittest.TestCase):
"""Test the threshold sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_sensor_numeric_state(self):
"""Test sensor on numeric state platform observations."""
config = {
"binary_sensor": {
"platform": "bayesian",
"name": "Test_Binary",
"observations": [
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored",
"below": 10,
"above": 5,
"prob_given_true": 0.6,
},
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored1",
"below": 7,
"above": 5,
"prob_given_true": 0.9,
"prob_given_false": 0.1,
},
],
"prior": 0.2,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", 6)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 6)
self.hass.states.set("sensor.test_monitored1", 6)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [
{"prob_false": 0.4, "prob_true": 0.6},
{"prob_false": 0.1, "prob_true": 0.9},
] == state.attributes.get("observations")
assert round(abs(0.77 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", 6)
self.hass.states.set("sensor.test_monitored1", 0)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", 15)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.state == "off"
def test_sensor_state(self):
"""Test sensor on state platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.states.set("sensor.test_monitored", "on")
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [{"prob_true": 0.8, "prob_false": 0.4}] == state.attributes.get(
"observations"
)
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
def test_threshold(self):
"""Test sensor on probabilty threshold limits."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "on",
"prob_given_true": 1.0,
}
],
"prior": 0.5,
"probability_threshold": 1.0,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(1.0 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
def test_multiple_observations(self):
"""Test sensor with multiple observations of same entity."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "blue",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
},
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "red",
"prob_given_true": 0.2,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.states.set("sensor.test_monitored", "off")
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", "blue")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "blue")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [{"prob_true": 0.8, "prob_false": 0.4}] == state.attributes.get(
"observations"
)
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", "blue")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "red")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(0.11 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
def test_probability_updates(self):
"""Test probability update function."""
prob_true = [0.3, 0.6, 0.8]
prob_false = [0.7, 0.4, 0.2]
prior = 0.5
for pt, pf in zip(prob_true, prob_false):
prior = bayesian.update_probability(prior, pt, pf)
assert round(abs(0.720000 - prior), 7) == 0
prob_true = [0.8, 0.3, 0.9]
prob_false = [0.6, 0.4, 0.2]
prior = 0.7
for pt, pf in zip(prob_true, prob_false):
prior = bayesian.update_probability(prior, pt, pf)
assert round(abs(0.9130434782608695 - prior), 7) == 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.