gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for classes in iobase.py."""
# pytype: skip-file
from __future__ import absolute_import
import unittest
import mock
import apache_beam as beam
from apache_beam.io.concat_source import ConcatSource
from apache_beam.io.concat_source_test import RangeSource
from apache_beam.io import iobase
from apache_beam.io.iobase import SourceBundle
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class SDFBoundedSourceRestrictionProviderTest(unittest.TestCase):
def setUp(self):
self.initial_range_start = 0
self.initial_range_stop = 4
self.initial_range_source = RangeSource(
self.initial_range_start, self.initial_range_stop)
self.sdf_restriction_provider = (
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestrictionProvider(
self.initial_range_source, desired_chunk_size=2))
def test_initial_restriction(self):
unused_element = None
restriction = (
self.sdf_restriction_provider.initial_restriction(unused_element))
self.assertTrue(
isinstance(
restriction,
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestriction))
self.assertTrue(isinstance(restriction._source_bundle, SourceBundle))
self.assertEqual(
self.initial_range_start, restriction._source_bundle.start_position)
self.assertEqual(
self.initial_range_stop, restriction._source_bundle.stop_position)
self.assertTrue(isinstance(restriction._source_bundle.source, RangeSource))
self.assertEqual(restriction._range_tracker, None)
def test_create_tracker(self):
expected_start = 1
expected_stop = 3
source_bundle = SourceBundle(
expected_stop - expected_start,
RangeSource(1, 3),
expected_start,
expected_stop)
restriction_tracker = (
self.sdf_restriction_provider.create_tracker(
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestriction(
source_bundle)))
self.assertTrue(
isinstance(
restriction_tracker,
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestrictionTracker)
)
self.assertEqual(expected_start, restriction_tracker.start_pos())
self.assertEqual(expected_stop, restriction_tracker.stop_pos())
def test_simple_source_split(self):
unused_element = None
restriction = (
self.sdf_restriction_provider.initial_restriction(unused_element))
expect_splits = [(0, 2), (2, 4)]
split_bundles = list(
self.sdf_restriction_provider.split(unused_element, restriction))
self.assertTrue(
all([
isinstance(bundle._source_bundle, SourceBundle)
for bundle in split_bundles
]))
splits = ([(
bundle._source_bundle.start_position,
bundle._source_bundle.stop_position) for bundle in split_bundles])
self.assertEqual(expect_splits, list(splits))
def test_concat_source_split(self):
unused_element = None
initial_concat_source = ConcatSource([self.initial_range_source])
sdf_concat_restriction_provider = (
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestrictionProvider(
initial_concat_source, desired_chunk_size=2))
restriction = (
self.sdf_restriction_provider.initial_restriction(unused_element))
expect_splits = [(0, 2), (2, 4)]
split_bundles = list(
sdf_concat_restriction_provider.split(unused_element, restriction))
self.assertTrue(
all([
isinstance(bundle._source_bundle, SourceBundle)
for bundle in split_bundles
]))
splits = ([(
bundle._source_bundle.start_position,
bundle._source_bundle.stop_position) for bundle in split_bundles])
self.assertEqual(expect_splits, list(splits))
def test_restriction_size(self):
unused_element = None
restriction = (
self.sdf_restriction_provider.initial_restriction(unused_element))
split_1, split_2 = self.sdf_restriction_provider.split(unused_element,
restriction)
split_1_size = self.sdf_restriction_provider.restriction_size(
unused_element, split_1)
split_2_size = self.sdf_restriction_provider.restriction_size(
unused_element, split_2)
self.assertEqual(2, split_1_size)
self.assertEqual(2, split_2_size)
class SDFBoundedSourceRestrictionTrackerTest(unittest.TestCase):
def setUp(self):
self.initial_start_pos = 0
self.initial_stop_pos = 4
source_bundle = SourceBundle(
self.initial_stop_pos - self.initial_start_pos,
RangeSource(self.initial_start_pos, self.initial_stop_pos),
self.initial_start_pos,
self.initial_stop_pos)
self.sdf_restriction_tracker = (
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestrictionTracker(
iobase._SDFBoundedSourceWrapper._SDFBoundedSourceRestriction(
source_bundle)))
def test_current_restriction_before_split(self):
current_restriction = (self.sdf_restriction_tracker.current_restriction())
self.assertEqual(
self.initial_start_pos,
current_restriction._source_bundle.start_position)
self.assertEqual(
self.initial_stop_pos, current_restriction._source_bundle.stop_position)
self.assertEqual(
self.initial_start_pos,
current_restriction._range_tracker.start_position())
self.assertEqual(
self.initial_stop_pos,
current_restriction._range_tracker.stop_position())
def test_current_restriction_after_split(self):
fraction_of_remainder = 0.5
self.sdf_restriction_tracker.try_claim(1)
expected_restriction, _ = (
self.sdf_restriction_tracker.try_split(fraction_of_remainder))
current_restriction = self.sdf_restriction_tracker.current_restriction()
self.assertEqual(
expected_restriction._source_bundle, current_restriction._source_bundle)
self.assertTrue(current_restriction._range_tracker)
def test_try_split_at_remainder(self):
fraction_of_remainder = 0.4
expected_primary = (0, 2, 2.0)
expected_residual = (2, 4, 2.0)
self.sdf_restriction_tracker.try_claim(0)
actual_primary, actual_residual = (
self.sdf_restriction_tracker.try_split(fraction_of_remainder))
self.assertEqual(
expected_primary,
(
actual_primary._source_bundle.start_position,
actual_primary._source_bundle.stop_position,
actual_primary._source_bundle.weight))
self.assertEqual(
expected_residual,
(
actual_residual._source_bundle.start_position,
actual_residual._source_bundle.stop_position,
actual_residual._source_bundle.weight))
self.assertEqual(
actual_primary._source_bundle.weight,
self.sdf_restriction_tracker.current_restriction().weight())
class UseSdfBoundedSourcesTests(unittest.TestCase):
def _run_sdf_wrapper_pipeline(self, source, expected_values):
with beam.Pipeline() as p:
experiments = (p._options.view_as(DebugOptions).experiments or [])
# Setup experiment option to enable using SDFBoundedSourceWrapper
if 'beam_fn_api' not in experiments:
# Required so mocking below doesn't mock Create used in assert_that.
experiments.append('beam_fn_api')
p._options.view_as(DebugOptions).experiments = experiments
actual = p | beam.io.Read(source)
assert_that(actual, equal_to(expected_values))
@mock.patch('apache_beam.io.iobase._SDFBoundedSourceWrapper.expand')
def test_sdf_wrapper_overrides_read(self, sdf_wrapper_mock_expand):
def _fake_wrapper_expand(pbegin):
return pbegin | beam.Create(['fake'])
sdf_wrapper_mock_expand.side_effect = _fake_wrapper_expand
self._run_sdf_wrapper_pipeline(RangeSource(0, 4), ['fake'])
def test_sdf_wrap_range_source(self):
self._run_sdf_wrapper_pipeline(RangeSource(0, 4), [0, 1, 2, 3])
if __name__ == '__main__':
unittest.main()
|
|
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join([str(random.randint(0, 9)) for i in range(length)])
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()])
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values])
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
host = parts[1].lower()
if host.endswith(':80') or host.endswith(':443'):
host = host.split(':')[0]
url_string = '%s://%s%s' % (parts[0], host, parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
# util function: turn Authorization: header into parameters, has to do some unescaping
def _split_header(header):
params = {}
parts = header[6:].split(',')
for param in parts:
# ignore realm parameter
if param.find('realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
# util function: turn url string into parameters, has to do some unescaping
# even empty values should be included
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = oauth_data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
consumer = self._get_consumer(oauth_request)
token = self.data_store.lookup_token(consumer, token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
|
|
import io
import itertools
import os
import sys
from collections import defaultdict
from copy import deepcopy
from pkgutil import get_data
from urllib.parse import urljoin
import lxml.html
import requests
from cf_units import Unit
from lxml import etree
from netCDF4 import Dataset, Dimension, Variable
from pkg_resources import resource_filename
# copied from paegan
# paegan may depend on these later
_possiblet = {
"time",
"TIME",
"Time",
"t",
"T",
"ocean_time",
"OCEAN_TIME",
"jd",
"JD",
"dn",
"DN",
"times",
"TIMES",
"Times",
"mt",
"MT",
"dt",
"DT",
}
_possiblez = {
"depth",
"DEPTH",
"depths",
"DEPTHS",
"height",
"HEIGHT",
"altitude",
"ALTITUDE",
"alt",
"ALT",
"Alt",
"Altitude",
"h",
"H",
"s_rho",
"S_RHO",
"s_w",
"S_W",
"z",
"Z",
"siglay",
"SIGLAY",
"siglev",
"SIGLEV",
"sigma",
"SIGMA",
"vertical",
"VERTICAL",
"lev",
"LEV",
"level",
"LEVEL",
}
_possiblex = {
"x",
"X",
"lon",
"LON",
"xlon",
"XLON",
"lonx",
"lonx",
"lon_u",
"LON_U",
"lon_v",
"LON_V",
"lonc",
"LONC",
"Lon",
"Longitude",
"longitude",
"LONGITUDE",
"lon_rho",
"LON_RHO",
"lon_psi",
"LON_PSI",
}
_possibley = {
"y",
"Y",
"lat",
"LAT",
"ylat",
"YLAT",
"laty",
"laty",
"lat_u",
"LAT_U",
"lat_v",
"LAT_V",
"latc",
"LATC",
"Lat",
"Latitude",
"latitude",
"LATITUDE",
"lat_rho",
"LAT_RHO",
"lat_psi",
"LAT_PSI",
}
_possibleaxis = _possiblet | _possiblez | _possiblex | _possibley
_possiblexunits = {
"degrees_east",
"degree_east",
"degrees_E",
"degree_E",
"degreesE",
"degreeE",
}
_possibleyunits = {
"degrees_north",
"degree_north",
"degrees_N",
"degree_N",
"degreesN",
"degreeN",
}
_possibletunits = {
"day",
"days",
"d",
"hour",
"hours",
"hr",
"hrs",
"h",
"year",
"years",
"minute",
"minutes",
"m",
"min",
"mins",
"second",
"seconds",
"s",
"sec",
"secs",
}
_possibleaxisunits = _possiblexunits | _possibleyunits | _possibletunits
class DotDict(dict):
"""
Subclass of dict that will recursively look up attributes with dot notation.
This is primarily for working with JSON-style data in a cleaner way like javascript.
Note that this will instantiate a number of child DotDicts when you first access attributes;
do not use in performance-critical parts of your code.
"""
def __dir__(self):
return list(self.__dict__.keys()) + list(self.keys())
def __getattr__(self, key):
"""Make attempts to lookup by nonexistent attributes also attempt key lookups."""
if key in self:
return self[key]
import dis
import sys
frame = sys._getframe(1)
if "\x00%c" % dis.opmap["STORE_ATTR"] in frame.f_code.co_code:
self[key] = DotDict()
return self[key]
raise AttributeError(key)
def __setattr__(self, key, value):
if key in dir(dict):
raise AttributeError("%s conflicts with builtin." % key)
if isinstance(value, dict):
self[key] = DotDict(value)
else:
self[key] = value
def copy(self):
return deepcopy(self)
def get_safe(self, qual_key, default=None):
"""
@brief Returns value of qualified key, such as "system.name" or None if not exists.
If default is given, returns the default. No exception thrown.
"""
value = get_safe(self, qual_key)
if value is None:
value = default
return value
@classmethod
def fromkeys(cls, seq, value=None):
return DotDict(dict.fromkeys(seq, value))
def get_safe(dict_instance, keypath, default=None):
"""
Returns a value with in a nested dict structure from a dot separated
path expression such as "system.server.host" or a list of key entries
@retval Value if found or None
"""
try:
obj = dict_instance
keylist = keypath if type(keypath) is list else keypath.split(".")
for key in keylist:
obj = obj[key]
return obj
except Exception:
return default
class VariableReferenceError(Exception):
"""A variable to assign bad variable references to"""
def __init__(self, name: str, dataset: Dataset = None):
self.name = name
self.dataset_path = dataset.filepath() if dataset is not None else None
def __str__(self):
return (
f"Cannot find variable named {self.name} in dataset " f"{self}.dataset_path"
)
class NCGraph(object):
def __init__(
self, ds, name, nc_object, self_reference_variables, reference_map=None
):
self.ds = ds
self.name = name
self.coords = DotDict()
self.dims = DotDict()
self.grid_mapping = DotDict()
self.obj = nc_object
self.reference_variables = self_reference_variables
self.reference_map = reference_map or {}
self.reference_map[name] = self
if isinstance(nc_object, Dimension):
self._type = "dim"
elif isinstance(nc_object, Variable):
self._type = "var"
self.get_references()
else:
raise TypeError("unknown type %s" % repr(type(nc_object)))
def get_references(self):
for dim in self.obj.dimensions:
self.dims[dim] = self.get_dimension(dim)
if hasattr(self.obj, "coordinates"):
coords = self.obj.coordinates.split(" ")
for coord in coords:
self.coords[coord] = self.get_coordinate(coord)
if hasattr(self.obj, "grid_mapping"):
gm = self.obj.grid_mapping
self.grid_mapping[gm] = self.get_grid_mapping(gm)
def get_dimension(self, dim):
if dim in self.reference_map:
return self.reference_map[dim]
return NCGraph(
self.ds,
dim,
self.ds.dimensions[dim],
self.reference_variables,
self.reference_map,
)
def get_coordinate(self, coord):
if coord not in self.ds.variables:
return
if coord in self.reference_map:
if self.name == coord:
self.reference_variables.add(self.name)
return self.reference_map[coord]
return NCGraph(
self.ds,
coord,
self.ds.variables[coord],
self.reference_variables,
self.reference_map,
)
def get_grid_mapping(self, gm):
if gm not in self.ds.variables:
return
if gm in self.reference_map:
return self.reference_map[gm]
return NCGraph(
self.ds,
gm,
self.ds.variables[gm],
self.reference_variables,
self.reference_map,
)
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict__[key]
return getattr(self.obj, key)
class StandardNameTable(object):
class NameEntry(object):
def __init__(self, entrynode):
self.canonical_units = self._get(entrynode, "canonical_units", True)
self.grib = self._get(entrynode, "grib")
self.amip = self._get(entrynode, "amip")
self.description = self._get(entrynode, "description")
def _get(self, entrynode, attrname, required=False):
vals = entrynode.xpath(attrname)
if len(vals) > 1:
raise Exception("Multiple attrs (%s) found" % attrname)
elif required and len(vals) == 0:
raise Exception("Required attr (%s) not found" % attrname)
return vals[0].text
def __init__(self, cached_location=None):
if cached_location:
with io.open(cached_location, "r", encoding="utf-8") as fp:
resource_text = fp.read()
elif os.environ.get("CF_STANDARD_NAME_TABLE") and os.path.exists(
os.environ["CF_STANDARD_NAME_TABLE"]
):
with io.open(
os.environ["CF_STANDARD_NAME_TABLE"], "r", encoding="utf-8"
) as fp:
resource_text = fp.read()
else:
resource_text = get_data(
"compliance_checker", "data/cf-standard-name-table.xml"
)
parser = etree.XMLParser(remove_blank_text=True)
self._root = etree.fromstring(resource_text, parser)
# generate and save a list of all standard names in file
self._names = [node.get("id") for node in self._root.iter("entry")]
self._aliases = [node.get("id") for node in self._root.iter("alias")]
self._version = self._root.xpath("version_number")[0].text
def __len__(self):
return len(self._names) + len(self._aliases)
def __getitem__(self, key):
if not (key in self._names or key in self._aliases):
raise KeyError("%s not found in standard name table" % key)
if key in self._aliases:
idx = self._aliases.index(key)
entryids = self._root.xpath("alias")[idx].xpath("entry_id")
if len(entryids) != 1:
raise Exception(
"Inconsistency in standard name table, could not lookup alias for %s"
% key
)
key = entryids[0].text
if key not in self._names:
raise KeyError("%s not found in standard name table" % key)
idx = self._names.index(key)
entry = self.NameEntry(self._root.xpath("entry")[idx])
return entry
def get(self, key, default=None):
"""
Returns the item for the key or returns the default if it does not exist
"""
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
return key in self._names or key in self._aliases
def __iter__(self):
return iter(itertools.chain(self._names, self._aliases))
def download_cf_standard_name_table(version, location=None):
"""
Downloads the specified CF standard name table version and saves it to file
:param str version: CF standard name table version number (i.e 34)
:param str location: Path/filename to write downloaded xml file to
"""
if (
location is None
): # This case occurs when updating the packaged version from command line
location = resource_filename(
"compliance_checker", "data/cf-standard-name-table.xml"
)
if version == "latest":
tables_tree = lxml.html.parse("http://cfconventions.org/documents.html")
end_str = "cf-standard-name-table.xml"
xpath_expr = (
"//a[substring(@href, string-length(@href) - "
"string-length('{0}') +1) "
" = '{0}'][1]".format(end_str)
)
latest_vers = tables_tree.xpath(xpath_expr)[0]
url = urljoin("http://cfconventions.org", latest_vers.attrib["href"])
else:
url = "http://cfconventions.org/Data/cf-standard-names/{0}/src/cf-standard-name-table.xml".format(
version
)
r = requests.get(url, allow_redirects=True)
r.raise_for_status()
print(
"Downloading cf-standard-names table version {0} from: {1}".format(
version, url
),
file=sys.stderr,
)
with open(location, "wb") as f:
f.write(r.content)
def create_cached_data_dir():
"""
Returns the path to the data directory to download CF standard names.
Use $XDG_DATA_HOME.
"""
writable_directory = os.path.join(os.path.expanduser("~"), ".local", "share")
data_directory = os.path.join(
os.environ.get("XDG_DATA_HOME", writable_directory), "compliance-checker"
)
if not os.path.isdir(data_directory):
os.makedirs(data_directory)
return data_directory
def units_known(units):
try:
Unit(units)
except ValueError:
return False
return True
def units_convertible(units1, units2, reftimeistime=True):
"""Return True if a Unit representing the string units1 can be converted
to a Unit representing the string units2, else False."""
try:
u1 = Unit(units1)
u2 = Unit(units2)
except ValueError:
return False
return u1.is_convertible(u2)
def units_temporal(units):
try:
u = Unit(units)
except ValueError:
return False
return u.is_time_reference()
def map_axes(dim_vars, reverse_map=False):
"""
axis name -> [dimension names]
dimension name -> [axis_name], length 0 if reverse_map
"""
ret_val = defaultdict(list)
axes = ["X", "Y", "Z", "T"]
for k, v in dim_vars.items():
axis = getattr(v, "axis", "")
if not axis:
continue
axis = axis.upper()
if axis in axes:
if reverse_map:
ret_val[k].append(axis)
else:
ret_val[axis].append(k)
return dict(ret_val)
def find_coord_vars(ncds):
"""
Finds all coordinate variables in a dataset.
A variable with the same name as a dimension is called a coordinate variable.
"""
coord_vars = []
for d in ncds.dimensions:
if d in ncds.variables and ncds.variables[d].dimensions == (d,):
coord_vars.append(ncds.variables[d])
return coord_vars
def is_time_variable(varname, var):
"""
Identifies if a variable is represents time
"""
satisfied = varname.lower() == "time"
satisfied |= getattr(var, "standard_name", "") == "time"
satisfied |= getattr(var, "axis", "") == "T"
satisfied |= units_convertible(
"seconds since 1900-01-01", getattr(var, "units", "")
)
return satisfied
def is_vertical_coordinate(var_name, var):
"""
Determines if a variable is a vertical coordinate variable
4.3
A vertical coordinate will be identifiable by: units of pressure; or the presence of the positive attribute with a
value of up or down (case insensitive). Optionally, the vertical type may be indicated additionally by providing
the standard_name attribute with an appropriate value, and/or the axis attribute with the value Z.
"""
# Known name
satisfied = var_name.lower() in _possiblez
satisfied |= getattr(var, "standard_name", "") in _possiblez
# Is the axis set to Z?
satisfied |= getattr(var, "axis", "").lower() == "z"
is_pressure = units_convertible(getattr(var, "units", "1"), "dbar")
# Pressure defined or positive defined
satisfied |= is_pressure
if not is_pressure:
satisfied |= getattr(var, "positive", "").lower() in ("up", "down")
return satisfied
def compare_unit_types(specified, reference):
"""
Compares two unit strings via UDUnits
:param str specified: The specified unit
:param str reference: The reference unit which to compare agains
"""
msgs = []
err_flag = False
try:
specified_unit = Unit(specified)
except ValueError:
msgs.append(f"Specified conversion unit f{specified} may not be valid UDUnits")
err_flag = True
try:
reference_unit = Unit(reference)
except ValueError:
msgs.append(f"Specified conversion unit f{reference} may not be valid UDUnits")
err_flag = True
if err_flag:
return msgs
unit_convertible = specified_unit.is_convertible(reference_unit)
fail_msg = [f'Units "{specified}" are not convertible to "{reference}"']
return msgs if unit_convertible else fail_msg
def string_from_var_type(variable):
if isinstance(variable, str):
return variable[:]
elif variable.dtype.kind == "S":
strip_char = variable.fill_value or b"\x00"
return variable.tobytes().rstrip(strip_char).decode("utf-8")
else:
raise TypeError(
f"Variable '{variable.name} has non-string/character' "
f"dtype {variable.dtype}"
)
def reference_attr_variables(
dataset: Dataset, attributes_string: str, split_by: str = None
):
"""
Attempts to reference variables in the string, optionally splitting by
a string
"""
if attributes_string is None:
return None
elif split_by is None:
return dataset.variables.get(
attributes_string, VariableReferenceError(attributes_string)
)
else:
string_proc = attributes_string.split(split_by)
return [
dataset.variables.get(var_name, VariableReferenceError(var_name))
for var_name in string_proc
]
|
|
"""Unit test for Switch objects."""
from unittest.mock import AsyncMock, Mock
from xknx import XKNX
from xknx.devices import Switch
from xknx.dpt import DPTBinary
from xknx.telegram import GroupAddress, Telegram
from xknx.telegram.apci import GroupValueRead, GroupValueResponse, GroupValueWrite
class TestSwitch:
"""Test class for Switch object."""
#
# SYNC
#
async def test_sync(self):
"""Test sync function / sending group reads to KNX bus."""
xknx = XKNX()
switch = Switch(
xknx, "TestOutlet", group_address_state="1/2/3", group_address="1/2/4"
)
await switch.sync()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"), payload=GroupValueRead()
)
async def test_sync_state_address(self):
"""Test sync function / sending group reads to KNX bus. Test with Switch with explicit state address."""
xknx = XKNX()
switch = Switch(
xknx, "TestOutlet", group_address="1/2/3", group_address_state="1/2/4"
)
await switch.sync()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/4"), payload=GroupValueRead()
)
#
# TEST PROCESS
#
async def test_process(self):
"""Test process / reading telegrams from telegram queue. Test if device was updated."""
xknx = XKNX()
callback_mock = AsyncMock()
switch1 = Switch(
xknx, "TestOutlet", group_address="1/2/3", device_updated_cb=callback_mock
)
switch2 = Switch(
xknx, "TestOutlet", group_address="1/2/3", device_updated_cb=callback_mock
)
assert switch1.state is None
assert switch2.state is None
callback_mock.assert_not_called()
telegram_on = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
telegram_off = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(0)),
)
await switch1.process(telegram_on)
assert switch1.state is True
callback_mock.assert_called_once()
callback_mock.reset_mock()
await switch1.process(telegram_off)
assert switch1.state is False
callback_mock.assert_called_once()
callback_mock.reset_mock()
# test setting switch2 to False with first telegram
await switch2.process(telegram_off)
assert switch2.state is False
callback_mock.assert_called_once()
callback_mock.reset_mock()
await switch2.process(telegram_on)
assert switch2.state is True
callback_mock.assert_called_once()
callback_mock.reset_mock()
async def test_process_state(self):
"""Test process / reading telegrams from telegram queue. Test if device was updated."""
xknx = XKNX()
callback_mock = AsyncMock()
switch1 = Switch(
xknx,
"TestOutlet",
group_address="1/2/3",
group_address_state="1/2/4",
device_updated_cb=callback_mock,
)
switch2 = Switch(
xknx,
"TestOutlet",
group_address="1/2/3",
group_address_state="1/2/4",
device_updated_cb=callback_mock,
)
assert switch1.state is None
assert switch2.state is None
callback_mock.assert_not_called()
telegram_on = Telegram(
destination_address=GroupAddress("1/2/4"),
payload=GroupValueResponse(DPTBinary(1)),
)
telegram_off = Telegram(
destination_address=GroupAddress("1/2/4"),
payload=GroupValueResponse(DPTBinary(0)),
)
await switch1.process(telegram_on)
assert switch1.state is True
callback_mock.assert_called_once()
callback_mock.reset_mock()
await switch1.process(telegram_off)
assert switch1.state is False
callback_mock.assert_called_once()
callback_mock.reset_mock()
# test setting switch2 to False with first telegram
await switch2.process(telegram_off)
assert switch2.state is False
callback_mock.assert_called_once()
callback_mock.reset_mock()
await switch2.process(telegram_on)
assert switch2.state is True
callback_mock.assert_called_once()
callback_mock.reset_mock()
async def test_process_invert(self):
"""Test process / reading telegrams from telegram queue with inverted switch."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3", invert=True)
assert switch.state is None
telegram_inv_on = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(0)),
)
telegram_inv_off = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
await switch.process(telegram_inv_on)
assert switch.state is True
await switch.process(telegram_inv_off)
assert switch.state is False
async def test_process_reset_after(self, time_travel):
"""Test process reset_after."""
xknx = XKNX()
reset_after_sec = 1
switch = Switch(
xknx, "TestInput", group_address="1/2/3", reset_after=reset_after_sec
)
telegram_on = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
await switch.process(telegram_on)
assert switch.state
assert xknx.telegrams.qsize() == 0
await time_travel(reset_after_sec)
assert xknx.telegrams.qsize() == 1
await switch.process(xknx.telegrams.get_nowait())
assert not switch.state
async def test_process_reset_after_cancel_existing(self, time_travel):
"""Test process reset_after cancels existing reset tasks."""
xknx = XKNX()
reset_after_sec = 0.01
switch = Switch(
xknx, "TestInput", group_address="1/2/3", reset_after=reset_after_sec
)
telegram_on = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueResponse(DPTBinary(1)),
)
await switch.process(telegram_on)
assert switch.state
assert xknx.telegrams.qsize() == 0
await time_travel(reset_after_sec / 2)
# half way through the reset timer
await switch.process(telegram_on)
assert switch.state
await time_travel(reset_after_sec / 2)
assert xknx.telegrams.qsize() == 0
async def test_process_callback(self):
"""Test process / reading telegrams from telegram queue. Test if callback was called."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3")
after_update_callback = Mock()
async def async_after_update_callback(device):
"""Async callback."""
after_update_callback(device)
switch.register_device_updated_cb(async_after_update_callback)
telegram = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
await switch.process(telegram)
after_update_callback.assert_called_with(switch)
#
# TEST RESPOND
#
async def test_respond_to_read(self):
"""Test respond_to_read function."""
xknx = XKNX()
responding = Switch(
xknx,
"TestSensor1",
group_address="1/1/1",
respond_to_read=True,
)
non_responding = Switch(
xknx,
"TestSensor2",
group_address="1/1/1",
respond_to_read=False,
)
responding_multiple = Switch(
xknx,
"TestSensor3",
group_address=["1/1/1", "3/3/3"],
group_address_state="2/2/2",
respond_to_read=True,
)
# set initial payload of Switch
responding.switch.value = True
non_responding.switch.value = True
responding_multiple.switch.value = True
read_telegram = Telegram(
destination_address=GroupAddress("1/1/1"), payload=GroupValueRead()
)
# verify no response when respond is False
await non_responding.process(read_telegram)
assert xknx.telegrams.qsize() == 0
# verify response when respond is True
await responding.process(read_telegram)
assert xknx.telegrams.qsize() == 1
response = xknx.telegrams.get_nowait()
assert response == Telegram(
destination_address=GroupAddress("1/1/1"),
payload=GroupValueResponse(DPTBinary(True)),
)
# verify no response when GroupValueRead request is not for group_address
await responding_multiple.process(read_telegram)
assert xknx.telegrams.qsize() == 1
response = xknx.telegrams.get_nowait()
assert response == Telegram(
destination_address=GroupAddress("1/1/1"),
payload=GroupValueResponse(DPTBinary(True)),
)
await responding_multiple.process(
Telegram(
destination_address=GroupAddress("2/2/2"), payload=GroupValueRead()
)
)
await responding_multiple.process(
Telegram(
destination_address=GroupAddress("3/3/3"), payload=GroupValueRead()
)
)
assert xknx.telegrams.qsize() == 0
#
# TEST SET ON
#
async def test_set_on(self):
"""Test switching on switch."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3")
await switch.set_on()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
#
# TEST SET OFF
#
async def test_set_off(self):
"""Test switching off switch."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3")
await switch.set_off()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(0)),
)
#
# TEST SET INVERT
#
async def test_set_invert(self):
"""Test switching on/off inverted switch."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3", invert=True)
await switch.set_on()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(0)),
)
await switch.set_off()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
#
# TEST has_group_address
#
def test_has_group_address(self):
"""Test has_group_address."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3")
assert switch.has_group_address(GroupAddress("1/2/3"))
assert not switch.has_group_address(GroupAddress("2/2/2"))
#
# TEST passive group addresses
#
def test_has_group_address_passive(self):
"""Test has_group_address with passive group address."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address=["1/2/3", "4/4/4"])
assert switch.has_group_address(GroupAddress("1/2/3"))
assert switch.has_group_address(GroupAddress("4/4/4"))
assert not switch.has_group_address(GroupAddress("2/2/2"))
async def test_process_passive(self):
"""Test process / reading telegrams from telegram queue. Test if device was updated."""
xknx = XKNX()
callback_mock = AsyncMock()
switch1 = Switch(
xknx,
"TestOutlet",
group_address=["1/2/3", "4/4/4"],
group_address_state=["1/2/30", "5/5/5"],
device_updated_cb=callback_mock,
)
assert switch1.state is None
callback_mock.assert_not_called()
telegram_on_passive = Telegram(
destination_address=GroupAddress("4/4/4"),
payload=GroupValueWrite(DPTBinary(1)),
)
telegram_off_passive = Telegram(
destination_address=GroupAddress("5/5/5"),
payload=GroupValueWrite(DPTBinary(0)),
)
await switch1.process(telegram_on_passive)
assert switch1.state is True
callback_mock.assert_called_once()
callback_mock.reset_mock()
await switch1.process(telegram_off_passive)
assert switch1.state is False
callback_mock.assert_called_once()
callback_mock.reset_mock()
|
|
from collections import Iterable
from difflib import get_close_matches
from numbers import Real
import re
from warnings import warn
import numpy as np
import h5py
import openmc.checkvalue as cv
from openmc.mixin import EqualityMixin
from . import HDF5_VERSION, HDF5_VERSION_MAJOR
from .data import K_BOLTZMANN, ATOMIC_SYMBOL, EV_PER_MEV, NATURAL_ABUNDANCE
from .ace import Table, get_table
from .angle_energy import AngleEnergy
from .function import Tabulated1D
from .correlated import CorrelatedAngleEnergy
from openmc.stats import Discrete, Tabular
_THERMAL_NAMES = {
'c_Al27': ('al', 'al27'),
'c_Be': ('be', 'be-metal'),
'c_BeO': ('beo',),
'c_Be_in_BeO': ('bebeo', 'be-o', 'be/o'),
'c_C6H6': ('benz', 'c6h6'),
'c_C_in_SiC': ('csic',),
'c_Ca_in_CaH2': ('cah',),
'c_D_in_D2O': ('dd2o', 'hwtr', 'hw'),
'c_Fe56': ('fe', 'fe56'),
'c_Graphite': ('graph', 'grph', 'gr'),
'c_H_in_CaH2': ('hcah2',),
'c_H_in_CH2': ('hch2', 'poly', 'pol'),
'c_H_in_CH4_liquid': ('lch4', 'lmeth'),
'c_H_in_CH4_solid': ('sch4', 'smeth'),
'c_H_in_H2O': ('hh2o', 'lwtr', 'lw'),
'c_H_in_H2O_solid': ('hice',),
'c_H_in_C5O2H8': ('lucite', 'c5o2h8'),
'c_H_in_YH2': ('hyh2',),
'c_H_in_ZrH': ('hzrh', 'h-zr', 'h/zr', 'hzr'),
'c_Mg24': ('mg', 'mg24'),
'c_O_in_BeO': ('obeo', 'o-be', 'o/be'),
'c_O_in_D2O': ('od2o',),
'c_O_in_H2O_ice': ('oice',),
'c_O_in_UO2': ('ouo2', 'o2-u', 'o2/u'),
'c_ortho_D': ('orthod', 'dortho'),
'c_ortho_H': ('orthoh', 'hortho'),
'c_Si_in_SiC': ('sisic',),
'c_SiO2_alpha': ('sio2', 'sio2a'),
'c_SiO2_beta': ('sio2b'),
'c_para_D': ('parad', 'dpara'),
'c_para_H': ('parah', 'hpara'),
'c_U_in_UO2': ('uuo2', 'u-o2', 'u/o2'),
'c_Y_in_YH2': ('yyh2',),
'c_Zr_in_ZrH': ('zrzrh', 'zr-h', 'zr/h')
}
def get_thermal_name(name):
"""Get proper S(a,b) table name, e.g. 'HH2O' -> 'c_H_in_H2O'
Parameters
----------
name : str
Name of an ACE thermal scattering table
Returns
-------
str
GND-format thermal scattering name
"""
if name in _THERMAL_NAMES:
return name
else:
for proper_name, names in _THERMAL_NAMES.items():
if name.lower() in names:
return proper_name
else:
# Make an educated guess?? This actually works well for
# JEFF-3.2 which stupidly uses names like lw00.32t,
# lw01.32t, etc. for different temperatures
for proper_name, names in _THERMAL_NAMES.items():
matches = get_close_matches(
name.lower(), names, cutoff=0.5)
if len(matches) > 0:
warn('Thermal scattering material "{}" is not recognized. '
'Assigning a name of {}.'.format(name, proper_name))
return proper_name
else:
# OK, we give up. Just use the ACE name.
warn('Thermal scattering material "{0}" is not recognized. '
'Assigning a name of c_{0}.'.format(name))
return 'c_' + name
class CoherentElastic(EqualityMixin):
r"""Coherent elastic scattering data from a crystalline material
Parameters
----------
bragg_edges : Iterable of float
Bragg edge energies in eV
factors : Iterable of float
Partial sum of structure factors, :math:`\sum\limits_{i=1}^{E_i<E} S_i`
Attributes
----------
bragg_edges : Iterable of float
Bragg edge energies in eV
factors : Iterable of float
Partial sum of structure factors, :math:`\sum\limits_{i=1}^{E_i<E} S_i`
"""
def __init__(self, bragg_edges, factors):
self.bragg_edges = bragg_edges
self.factors = factors
def __call__(self, E):
if isinstance(E, Iterable):
E = np.asarray(E)
idx = np.searchsorted(self.bragg_edges, E)
return self.factors[idx] / E
def __len__(self):
return len(self.bragg_edges)
@property
def bragg_edges(self):
return self._bragg_edges
@property
def factors(self):
return self._factors
@bragg_edges.setter
def bragg_edges(self, bragg_edges):
cv.check_type('Bragg edges', bragg_edges, Iterable, Real)
self._bragg_edges = np.asarray(bragg_edges)
@factors.setter
def factors(self, factors):
cv.check_type('structure factor cumulative sums', factors,
Iterable, Real)
self._factors = np.asarray(factors)
def to_hdf5(self, group, name):
"""Write coherent elastic scattering to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
name : str
Name of the dataset to create
"""
dataset = group.create_dataset(name, data=np.vstack(
[self.bragg_edges, self.factors]))
dataset.attrs['type'] = np.string_('bragg')
@classmethod
def from_hdf5(cls, dataset):
"""Read coherent elastic scattering from an HDF5 dataset
Parameters
----------
group : h5py.Dataset
HDF5 group to write to
Returns
-------
openmc.data.CoherentElastic
Coherent elastic scattering cross section
"""
bragg_edges = dataset.value[0, :]
factors = dataset.value[1, :]
return cls(bragg_edges, factors)
class ThermalScattering(EqualityMixin):
"""A ThermalScattering object contains thermal scattering data as represented by
an S(alpha, beta) table.
Parameters
----------
name : str
Name of the material using GND convention, e.g. c_H_in_H2O
atomic_weight_ratio : float
Atomic mass ratio of the target nuclide.
kTs : Iterable of float
List of temperatures of the target nuclide in the data set.
The temperatures have units of eV.
Attributes
----------
atomic_weight_ratio : float
Atomic mass ratio of the target nuclide.
elastic_xs : openmc.data.Tabulated1D or openmc.data.CoherentElastic
Elastic scattering cross section derived in the coherent or incoherent
approximation
inelastic_xs : openmc.data.Tabulated1D
Inelastic scattering cross section derived in the incoherent
approximation
name : str
Name of the material using GND convention, e.g. c_H_in_H2O
temperatures : Iterable of str
List of string representations the temperatures of the target nuclide
in the data set. The temperatures are strings of the temperature,
rounded to the nearest integer; e.g., '294K'
kTs : Iterable of float
List of temperatures of the target nuclide in the data set.
The temperatures have units of eV.
nuclides : Iterable of str
Nuclide names that the thermal scattering data applies to
"""
def __init__(self, name, atomic_weight_ratio, kTs):
self.name = name
self.atomic_weight_ratio = atomic_weight_ratio
self.kTs = kTs
self.elastic_xs = {}
self.elastic_mu_out = {}
self.inelastic_xs = {}
self.inelastic_e_out = {}
self.inelastic_mu_out = {}
self.inelastic_dist = {}
self.secondary_mode = None
self.nuclides = []
def __repr__(self):
if hasattr(self, 'name'):
return "<Thermal Scattering Data: {0}>".format(self.name)
else:
return "<Thermal Scattering Data>"
@property
def temperatures(self):
return ["{}K".format(int(round(kT / K_BOLTZMANN))) for kT in self.kTs]
def export_to_hdf5(self, path, mode='a'):
"""Export table to an HDF5 file.
Parameters
----------
path : str
Path to write HDF5 file to
mode : {'r', r+', 'w', 'x', 'a'}
Mode that is used to open the HDF5 file. This is the second argument
to the :class:`h5py.File` constructor.
"""
# Open file and write version
f = h5py.File(path, mode, libver='latest')
f.attrs['version'] = np.array(HDF5_VERSION)
# Write basic data
g = f.create_group(self.name)
g.attrs['atomic_weight_ratio'] = self.atomic_weight_ratio
g.attrs['nuclides'] = np.array(self.nuclides, dtype='S')
g.attrs['secondary_mode'] = np.string_(self.secondary_mode)
ktg = g.create_group('kTs')
for i, temperature in enumerate(self.temperatures):
ktg.create_dataset(temperature, data=self.kTs[i])
for T in self.temperatures:
Tg = g.create_group(T)
# Write thermal elastic scattering
if self.elastic_xs:
elastic_group = Tg.create_group('elastic')
self.elastic_xs[T].to_hdf5(elastic_group, 'xs')
if self.elastic_mu_out:
elastic_group.create_dataset('mu_out',
data=self.elastic_mu_out[T])
# Write thermal inelastic scattering
if self.inelastic_xs:
inelastic_group = Tg.create_group('inelastic')
self.inelastic_xs[T].to_hdf5(inelastic_group, 'xs')
if self.secondary_mode in ('equal', 'skewed'):
inelastic_group.create_dataset('energy_out',
data=self.inelastic_e_out[T])
inelastic_group.create_dataset('mu_out',
data=self.inelastic_mu_out[T])
elif self.secondary_mode == 'continuous':
self.inelastic_dist[T].to_hdf5(inelastic_group)
f.close()
def add_temperature_from_ace(self, ace_or_filename, name=None):
"""Add data to the ThermalScattering object from an ACE file at a
different temperature.
Parameters
----------
ace_or_filename : openmc.data.ace.Table or str
ACE table to read from. If given as a string, it is assumed to be
the filename for the ACE file.
name : str
GND-conforming name of the material, e.g. c_H_in_H2O. If none is
passed, the appropriate name is guessed based on the name of the ACE
table.
Returns
-------
openmc.data.ThermalScattering
Thermal scattering data
"""
data = ThermalScattering.from_ace(ace_or_filename, name)
# Check if temprature already exists
strT = data.temperatures[0]
if strT in self.temperatures:
warn('S(a,b) data at T={} already exists.'.format(strT))
return
# Check that name matches
if data.name != self.name:
raise ValueError('Data provided for an incorrect material.')
# Add temperature
self.kTs += data.kTs
# Add inelastic cross section and distributions
if strT in data.inelastic_xs:
self.inelastic_xs[strT] = data.inelastic_xs[strT]
if strT in data.inelastic_e_out:
self.inelastic_e_out[strT] = data.inelastic_e_out[strT]
if strT in data.inelastic_mu_out:
self.inelastic_mu_out[strT] = data.inelastic_mu_out[strT]
if strT in data.inelastic_dist:
self.inelastic_dist[strT] = data.inelastic_dist[strT]
# Add elastic cross sectoin and angular distribution
if strT in data.elastic_xs:
self.elastic_xs[strT] = data.elastic_xs[strT]
if strT in data.elastic_mu_out:
self.elastic_mu_out[strT] = data.elastic_mu_out[strT]
@classmethod
def from_hdf5(cls, group_or_filename):
"""Generate thermal scattering data from HDF5 group
Parameters
----------
group_or_filename : h5py.Group or str
HDF5 group containing interaction data. If given as a string, it is
assumed to be the filename for the HDF5 file, and the first group
is used to read from.
Returns
-------
openmc.data.ThermalScattering
Neutron thermal scattering data
"""
if isinstance(group_or_filename, h5py.Group):
group = group_or_filename
else:
h5file = h5py.File(group_or_filename, 'r')
# Make sure version matches
if 'version' in h5file.attrs:
major, minor = h5file.attrs['version']
if major != HDF5_VERSION_MAJOR:
raise IOError(
'HDF5 data format uses version {}.{} whereas your '
'installation of the OpenMC Python API expects version '
'{}.x.'.format(major, minor, HDF5_VERSION_MAJOR))
else:
raise IOError(
'HDF5 data does not indicate a version. Your installation of '
'the OpenMC Python API expects version {}.x data.'
.format(HDF5_VERSION_MAJOR))
group = list(h5file.values())[0]
name = group.name[1:]
atomic_weight_ratio = group.attrs['atomic_weight_ratio']
kTg = group['kTs']
kTs = []
for temp in kTg:
kTs.append(kTg[temp].value)
temperatures = [str(int(round(kT / K_BOLTZMANN))) + "K" for kT in kTs]
table = cls(name, atomic_weight_ratio, kTs)
table.nuclides = [nuc.decode() for nuc in group.attrs['nuclides']]
table.secondary_mode = group.attrs['secondary_mode'].decode()
# Read thermal elastic scattering
for T in temperatures:
Tgroup = group[T]
if 'elastic' in Tgroup:
elastic_group = Tgroup['elastic']
# Cross section
elastic_xs_type = elastic_group['xs'].attrs['type'].decode()
if elastic_xs_type == 'Tabulated1D':
table.elastic_xs[T] = \
Tabulated1D.from_hdf5(elastic_group['xs'])
elif elastic_xs_type == 'bragg':
table.elastic_xs[T] = \
CoherentElastic.from_hdf5(elastic_group['xs'])
# Angular distribution
if 'mu_out' in elastic_group:
table.elastic_mu_out[T] = \
elastic_group['mu_out'].value
# Read thermal inelastic scattering
if 'inelastic' in Tgroup:
inelastic_group = Tgroup['inelastic']
table.inelastic_xs[T] = \
Tabulated1D.from_hdf5(inelastic_group['xs'])
if table.secondary_mode in ('equal', 'skewed'):
table.inelastic_e_out[T] = \
inelastic_group['energy_out']
table.inelastic_mu_out[T] = \
inelastic_group['mu_out']
elif table.secondary_mode == 'continuous':
table.inelastic_dist[T] = \
AngleEnergy.from_hdf5(inelastic_group)
return table
@classmethod
def from_ace(cls, ace_or_filename, name=None):
"""Generate thermal scattering data from an ACE table
Parameters
----------
ace_or_filename : openmc.data.ace.Table or str
ACE table to read from. If given as a string, it is assumed to be
the filename for the ACE file.
name : str
GND-conforming name of the material, e.g. c_H_in_H2O. If none is
passed, the appropriate name is guessed based on the name of the ACE
table.
Returns
-------
openmc.data.ThermalScattering
Thermal scattering data
"""
if isinstance(ace_or_filename, Table):
ace = ace_or_filename
else:
ace = get_table(ace_or_filename)
# Get new name that is GND-consistent
ace_name, xs = ace.name.split('.')
name = get_thermal_name(ace_name)
# Assign temperature to the running list
kTs = [ace.temperature*EV_PER_MEV]
temperatures = [str(int(round(ace.temperature*EV_PER_MEV
/ K_BOLTZMANN))) + "K"]
table = cls(name, ace.atomic_weight_ratio, kTs)
# Incoherent inelastic scattering cross section
idx = ace.jxs[1]
n_energy = int(ace.xss[idx])
energy = ace.xss[idx+1 : idx+1+n_energy]*EV_PER_MEV
xs = ace.xss[idx+1+n_energy : idx+1+2*n_energy]
table.inelastic_xs[temperatures[0]] = Tabulated1D(energy, xs)
if ace.nxs[7] == 0:
table.secondary_mode = 'equal'
elif ace.nxs[7] == 1:
table.secondary_mode = 'skewed'
elif ace.nxs[7] == 2:
table.secondary_mode = 'continuous'
n_energy_out = ace.nxs[4]
if table.secondary_mode in ('equal', 'skewed'):
n_mu = ace.nxs[3]
idx = ace.jxs[3]
table.inelastic_e_out[temperatures[0]] = \
ace.xss[idx:idx + n_energy * n_energy_out * (n_mu + 2):
n_mu + 2]*EV_PER_MEV
table.inelastic_e_out[temperatures[0]].shape = \
(n_energy, n_energy_out)
table.inelastic_mu_out[temperatures[0]] = \
ace.xss[idx:idx + n_energy * n_energy_out * (n_mu + 2)]
table.inelastic_mu_out[temperatures[0]].shape = \
(n_energy, n_energy_out, n_mu+2)
table.inelastic_mu_out[temperatures[0]] = \
table.inelastic_mu_out[temperatures[0]][:, :, 1:]
else:
n_mu = ace.nxs[3] - 1
idx = ace.jxs[3]
locc = ace.xss[idx:idx + n_energy].astype(int)
n_energy_out = \
ace.xss[idx + n_energy:idx + 2 * n_energy].astype(int)
energy_out = []
mu_out = []
for i in range(n_energy):
idx = locc[i]
# Outgoing energy distribution for incoming energy i
e = ace.xss[idx + 1:idx + 1 + n_energy_out[i]*(n_mu + 3):
n_mu + 3]*EV_PER_MEV
p = ace.xss[idx + 2:idx + 2 + n_energy_out[i]*(n_mu + 3):
n_mu + 3]/EV_PER_MEV
c = ace.xss[idx + 3:idx + 3 + n_energy_out[i]*(n_mu + 3):
n_mu + 3]
eout_i = Tabular(e, p, 'linear-linear', ignore_negative=True)
eout_i.c = c
# Outgoing angle distribution for each
# (incoming, outgoing) energy pair
mu_i = []
for j in range(n_energy_out[i]):
mu = ace.xss[idx + 4:idx + 4 + n_mu]
p_mu = 1. / n_mu * np.ones(n_mu)
mu_ij = Discrete(mu, p_mu)
mu_ij.c = np.cumsum(p_mu)
mu_i.append(mu_ij)
idx += 3 + n_mu
energy_out.append(eout_i)
mu_out.append(mu_i)
# Create correlated angle-energy distribution
breakpoints = [n_energy]
interpolation = [2]
energy = table.inelastic_xs[temperatures[0]].x
table.inelastic_dist[temperatures[0]] = CorrelatedAngleEnergy(
breakpoints, interpolation, energy, energy_out, mu_out)
# Incoherent/coherent elastic scattering cross section
idx = ace.jxs[4]
n_mu = ace.nxs[6] + 1
if idx != 0:
n_energy = int(ace.xss[idx])
energy = ace.xss[idx + 1: idx + 1 + n_energy]*EV_PER_MEV
P = ace.xss[idx + 1 + n_energy: idx + 1 + 2 * n_energy]
if ace.nxs[5] == 4:
# Coherent elastic
table.elastic_xs[temperatures[0]] = CoherentElastic(
energy, P*EV_PER_MEV)
# Coherent elastic shouldn't have angular distributions listed
assert n_mu == 0
else:
# Incoherent elastic
table.elastic_xs[temperatures[0]] = Tabulated1D(energy, P)
# Angular distribution
assert n_mu > 0
idx = ace.jxs[6]
table.elastic_mu_out[temperatures[0]] = \
ace.xss[idx:idx + n_energy * n_mu]
table.elastic_mu_out[temperatures[0]].shape = \
(n_energy, n_mu)
# Get relevant nuclides -- NJOY only allows one to specify three
# nuclides that the S(a,b) table applies to. Thus, for all elements
# other than H and Fe, we automatically add all the naturally-occurring
# isotopes.
for zaid, awr in ace.pairs:
if zaid > 0:
Z, A = divmod(zaid, 1000)
element = ATOMIC_SYMBOL[Z]
if element in ['H', 'Fe']:
table.nuclides.append(element + str(A))
else:
if element + '0' not in table.nuclides:
table.nuclides.append(element + '0')
for isotope in sorted(NATURAL_ABUNDANCE):
if re.match(r'{}\d+'.format(element), isotope):
if isotope not in table.nuclides:
table.nuclides.append(isotope)
return table
|
|
#!/usr/bin/env python
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
from pprint import pprint as pp
from f5_cccl.resource.ltm.pool import *
from mock import MagicMock
import pytest
bigip_pools_cfg = [
{'description': None,
'partition': 'Common',
'loadBalancingMode': 'round-robin',
'monitor': '/Common/http ',
'membersReference': {
'isSubcollection': True,
'items': [
{'ratio': 1,
'name': '172.16.0.100%0:8080',
'partition': 'Common',
'session': 'monitor-enabled',
'priorityGroup': 0,
'connectionLimit': 0,
'description': None},
{'ratio': 1,
'name': '172.16.0.101%0:8080',
'partition': 'Common',
'session': 'monitor-enabled',
'priorityGroup': 0,
'connectionLimit': 0,
'description': None}
]
},
'name': 'pool1',
'metadata': [{
'name': 'user_agent',
'persist': 'true',
'value': 'some-controller-v.1.4.0'
}]
},
{'description': None,
'partition': 'Common',
'loadBalancingMode': 'round-robin',
'monitor': '/Common/http ',
'name': 'pool1'
}
]
cccl_pools_cfg = [
{ "name": "pool0" },
{ "name": "pool1",
"members": [
{"address": "172.16.0.100%0", "port": 8080},
{"address": "172.16.0.101%0", "port": 8080}
],
"monitors": ["/Common/http"],
'metadata': [{
'name': 'user_agent',
'persist': 'true',
'value': 'some-controller-v.1.4.0'
}]
},
{ "name": "pool2",
"members": [
{"address": "192.168.0.100", "port": 80},
{"address": "192.168.0.101", "port": 80}
],
"monitors": []
},
{ "name": "pool3",
"members": [],
"description": "This is test pool 3",
"monitors": []
},
{ "name": "pool4",
"members": [],
"description": "This is test pool 4",
"monitors": ["/Common/http"]
},
{ "name": "pool1",
"members": [
{"address": "172.16.0.100", "port": 8080},
{"address": "172.16.0.102", "port": 8080}
],
"monitors": ["/Common/http"]
}
]
@pytest.fixture
def bigip():
bigip = MagicMock()
return bigip
@pytest.fixture
def bigip_pool0():
return bigip_pools_cfg[0]
@pytest.fixture
def bigip_pool1():
return bigip_pools_cfg[1]
@pytest.fixture
def cccl_pool0():
return cccl_pools_cfg[0]
@pytest.fixture
def cccl_pool1():
return cccl_pools_cfg[1]
@pytest.fixture
def cccl_pool2():
return cccl_pools_cfg[2]
@pytest.fixture
def cccl_pool3():
return cccl_pools_cfg[3]
@pytest.fixture
def cccl_pool5():
return cccl_pools_cfg[5]
@pytest.fixture
def bigip_members():
members_filename = (
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'./bigip-members.json'))
with open(members_filename) as fp:
json_data = fp.read()
json_data = json.loads(json_data)
members = [m for m in json_data['members']]
pp(json_data)
return members
def test_create_pool_minconfig(cccl_pool0):
pool = ApiPool(partition="Common", default_route_domain=0, **cccl_pool0)
assert pool.name == "pool0"
assert pool.partition == "Common"
assert pool.data['loadBalancingMode'] == "round-robin"
assert not pool.data['description']
assert len(pool) == 0
assert pool.data['monitor'] == "default"
def test_create_pool(cccl_pool1):
pool = ApiPool(partition="Common", default_route_domain=0, **cccl_pool1)
assert pool.name == "pool1"
assert pool.partition == "Common"
assert pool.data['loadBalancingMode'] == "round-robin"
assert not pool.data['description']
assert pool.data['monitor'] == "/Common/http"
assert 'metadata' in pool.data
assert len(pool) == 2
def test_create_pool_empty_lists(cccl_pool3):
pool = ApiPool(partition="Common", default_route_domain=0, **cccl_pool3)
assert pool.name == "pool3"
assert pool.partition == "Common"
assert pool.data['loadBalancingMode'] == "round-robin"
assert pool.data['description'] == "This is test pool 3"
assert pool.data['monitor'] == "default"
assert len(pool) == 0
def test_compare_equal_pools(cccl_pool0):
p1 = ApiPool(partition="Common", default_route_domain=0, **cccl_pool0)
p2 = ApiPool(partition="Common", default_route_domain=0, **cccl_pool0)
assert id(p1) != id(p2)
assert p1 == p2
def test_compare_pool_and_dict(cccl_pool0):
pool = ApiPool(partition="Common", default_route_domain=0, **cccl_pool0)
assert not pool == cccl_pool0
def test_get_uri_path(bigip, cccl_pool0):
pool = ApiPool(partition="Common", default_route_domain=0, **cccl_pool0)
assert pool._uri_path(bigip) == bigip.tm.ltm.pools.pool
def test_pool_hash(bigip, cccl_pool0):
pool = ApiPool(partition="Common", default_route_domain=0, **cccl_pool0)
assert hash(pool) == hash((pool.name, pool.partition))
def test_compare_bigip_cccl_pools(cccl_pool1, bigip_pool0):
bigip_pool = IcrPool(**bigip_pool0)
cccl_pool = ApiPool(partition="Common", default_route_domain=0, **cccl_pool1)
assert bigip_pool == cccl_pool
def test_create_bigip_pool_no_members(bigip_pool1):
bigip_pool = IcrPool(**bigip_pool1)
assert bigip_pool.data['membersReference']
assert bigip_pool.data['membersReference']['items'] == []
def test_compare_pools_unequal_members(bigip, cccl_pool1, cccl_pool2, cccl_pool5):
pool1 = ApiPool(partition="Common", default_route_domain=0, **cccl_pool1)
pool2 = ApiPool(partition="Common", default_route_domain=0, **cccl_pool2)
pool5 = ApiPool(partition="Common", default_route_domain=0, **cccl_pool5)
pool1_one_member_cfg = { "name": "pool1",
"members": [
{"address": "172.16.0.100", "port": 8080},
],
"monitors": ["/Common/http"]
}
pool1_one_member = ApiPool(partition="Common",
default_route_domain=0, **pool1_one_member_cfg)
pool2_with_monitor = { "name": "pool2",
"members": [
{"address": "192.168.0.100%2", "port": 80},
{"address": "192.168.0.101%2", "port": 80}
],
"monitors": ["/Common/http"]
}
pool2_with_monitor = ApiPool(partition="Common", default_route_domain=0, **pool2_with_monitor)
assert not pool1 == pool2
assert pool1 != pool2
assert not pool1_one_member == pool1
assert not pool2_with_monitor == pool2
assert not pool1 == pool5
assert pool1 != pool5
assert pool5 != pool1
def test_get_monitors(bigip):
pool = ApiPool(name="pool1", default_route_domain=0, partition="Common")
assert pool._get_monitors(None) == "default"
assert pool._get_monitors([]) == "default"
monitors = ["/Common/http", "/Common/my_tcp"]
assert pool._get_monitors(monitors) == "/Common/http and /Common/my_tcp"
monitors = ["", ""]
assert pool._get_monitors(monitors) == " and "
monitors = ["/Common/my_tcp", "/Common/http"]
assert pool._get_monitors(monitors) == "/Common/http and /Common/my_tcp"
|
|
from __future__ import print_function, division
from collections import defaultdict
from sympy.core.function import expand_log, count_ops
from sympy.core import sympify, Basic, Dummy, S, Add, Mul, Pow, expand_mul, factor_terms
from sympy.core.compatibility import ordered, default_sort_key, reduce
from sympy.core.numbers import Integer, Rational
from sympy.core.mul import prod, _keep_coeff
from sympy.core.rules import Transform
from sympy.functions import exp_polar, exp, log, root, polarify, unpolarify
from sympy.polys import lcm, gcd
from sympy.ntheory.factor_ import multiplicity
def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops):
"""
reduces expression by combining powers with similar bases and exponents.
Notes
=====
If deep is True then powsimp() will also simplify arguments of
functions. By default deep is set to False.
If force is True then bases will be combined without checking for
assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true
if x and y are both negative.
You can make powsimp() only combine bases or only combine exponents by
changing combine='base' or combine='exp'. By default, combine='all',
which does both. combine='base' will only combine::
a a a 2x x
x * y => (x*y) as well as things like 2 => 4
and combine='exp' will only combine
::
a b (a + b)
x * x => x
combine='exp' will strictly only combine exponents in the way that used
to be automatic. Also use deep=True if you need the old behavior.
When combine='all', 'exp' is evaluated first. Consider the first
example below for when there could be an ambiguity relating to this.
This is done so things like the second example can be completely
combined. If you want 'base' combined first, do something like
powsimp(powsimp(expr, combine='base'), combine='exp').
Examples
========
>>> from sympy import powsimp, exp, log, symbols
>>> from sympy.abc import x, y, z, n
>>> powsimp(x**y*x**z*y**z, combine='all')
x**(y + z)*y**z
>>> powsimp(x**y*x**z*y**z, combine='exp')
x**(y + z)*y**z
>>> powsimp(x**y*x**z*y**z, combine='base', force=True)
x**y*(x*y)**z
>>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True)
(n*x)**(y + z)
>>> powsimp(x**z*x**y*n**z*n**y, combine='exp')
n**(y + z)*x**(y + z)
>>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True)
(n*x)**y*(n*x)**z
>>> x, y = symbols('x y', positive=True)
>>> powsimp(log(exp(x)*exp(y)))
log(exp(x)*exp(y))
>>> powsimp(log(exp(x)*exp(y)), deep=True)
x + y
Radicals with Mul bases will be combined if combine='exp'
>>> from sympy import sqrt, Mul
>>> x, y = symbols('x y')
Two radicals are automatically joined through Mul:
>>> a=sqrt(x*sqrt(y))
>>> a*a**3 == a**4
True
But if an integer power of that radical has been
autoexpanded then Mul does not join the resulting factors:
>>> a**4 # auto expands to a Mul, no longer a Pow
x**2*y
>>> _*a # so Mul doesn't combine them
x**2*y*sqrt(x*sqrt(y))
>>> powsimp(_) # but powsimp will
(x*sqrt(y))**(5/2)
>>> powsimp(x*y*a) # but won't when doing so would violate assumptions
x*y*sqrt(x*sqrt(y))
"""
from sympy.matrices.expressions.matexpr import MatrixSymbol
def recurse(arg, **kwargs):
_deep = kwargs.get('deep', deep)
_combine = kwargs.get('combine', combine)
_force = kwargs.get('force', force)
_measure = kwargs.get('measure', measure)
return powsimp(arg, _deep, _combine, _force, _measure)
expr = sympify(expr)
if (not isinstance(expr, Basic) or isinstance(expr, MatrixSymbol) or (
expr.is_Atom or expr in (exp_polar(0), exp_polar(1)))):
return expr
if deep or expr.is_Add or expr.is_Mul and _y not in expr.args:
expr = expr.func(*[recurse(w) for w in expr.args])
if expr.is_Pow:
return recurse(expr*_y, deep=False)/_y
if not expr.is_Mul:
return expr
# handle the Mul
if combine in ('exp', 'all'):
# Collect base/exp data, while maintaining order in the
# non-commutative parts of the product
c_powers = defaultdict(list)
nc_part = []
newexpr = []
coeff = S.One
for term in expr.args:
if term.is_Rational:
coeff *= term
continue
if term.is_Pow:
term = _denest_pow(term)
if term.is_commutative:
b, e = term.as_base_exp()
if deep:
b, e = [recurse(i) for i in [b, e]]
if b.is_Pow or b.func is exp:
# don't let smthg like sqrt(x**a) split into x**a, 1/2
# or else it will be joined as x**(a/2) later
b, e = b**e, S.One
c_powers[b].append(e)
else:
# This is the logic that combines exponents for equal,
# but non-commutative bases: A**x*A**y == A**(x+y).
if nc_part:
b1, e1 = nc_part[-1].as_base_exp()
b2, e2 = term.as_base_exp()
if (b1 == b2 and
e1.is_commutative and e2.is_commutative):
nc_part[-1] = Pow(b1, Add(e1, e2))
continue
nc_part.append(term)
# add up exponents of common bases
for b, e in ordered(iter(c_powers.items())):
# allow 2**x/4 -> 2**(x - 2); don't do this when b and e are
# Numbers since autoevaluation will undo it, e.g.
# 2**(1/3)/4 -> 2**(1/3 - 2) -> 2**(1/3)/4
if (b and b.is_Number and not all(ei.is_Number for ei in e) and \
coeff is not S.One and
b not in (S.One, S.NegativeOne)):
m = multiplicity(abs(b), abs(coeff))
if m:
e.append(m)
coeff /= b**m
c_powers[b] = Add(*e)
if coeff is not S.One:
if coeff in c_powers:
c_powers[coeff] += S.One
else:
c_powers[coeff] = S.One
# convert to plain dictionary
c_powers = dict(c_powers)
# check for base and inverted base pairs
be = list(c_powers.items())
skip = set() # skip if we already saw them
for b, e in be:
if b in skip:
continue
bpos = b.is_positive or b.is_polar
if bpos:
binv = 1/b
if b != binv and binv in c_powers:
if b.as_numer_denom()[0] is S.One:
c_powers.pop(b)
c_powers[binv] -= e
else:
skip.add(binv)
e = c_powers.pop(binv)
c_powers[b] -= e
# check for base and negated base pairs
be = list(c_powers.items())
_n = S.NegativeOne
for i, (b, e) in enumerate(be):
if ((-b).is_Symbol or b.is_Add) and -b in c_powers:
if (b.is_positive in (0, 1) or e.is_integer):
c_powers[-b] += c_powers.pop(b)
if _n in c_powers:
c_powers[_n] += e
else:
c_powers[_n] = e
# filter c_powers and convert to a list
c_powers = [(b, e) for b, e in c_powers.items() if e]
# ==============================================================
# check for Mul bases of Rational powers that can be combined with
# separated bases, e.g. x*sqrt(x*y)*sqrt(x*sqrt(x*y)) ->
# (x*sqrt(x*y))**(3/2)
# ---------------- helper functions
def ratq(x):
'''Return Rational part of x's exponent as it appears in the bkey.
'''
return bkey(x)[0][1]
def bkey(b, e=None):
'''Return (b**s, c.q), c.p where e -> c*s. If e is not given then
it will be taken by using as_base_exp() on the input b.
e.g.
x**3/2 -> (x, 2), 3
x**y -> (x**y, 1), 1
x**(2*y/3) -> (x**y, 3), 2
exp(x/2) -> (exp(a), 2), 1
'''
if e is not None: # coming from c_powers or from below
if e.is_Integer:
return (b, S.One), e
elif e.is_Rational:
return (b, Integer(e.q)), Integer(e.p)
else:
c, m = e.as_coeff_Mul(rational=True)
if c is not S.One:
return (b**m, Integer(c.q)), Integer(c.p)
else:
return (b**e, S.One), S.One
else:
return bkey(*b.as_base_exp())
def update(b):
'''Decide what to do with base, b. If its exponent is now an
integer multiple of the Rational denominator, then remove it
and put the factors of its base in the common_b dictionary or
update the existing bases if necessary. If it has been zeroed
out, simply remove the base.
'''
newe, r = divmod(common_b[b], b[1])
if not r:
common_b.pop(b)
if newe:
for m in Mul.make_args(b[0]**newe):
b, e = bkey(m)
if b not in common_b:
common_b[b] = 0
common_b[b] += e
if b[1] != 1:
bases.append(b)
# ---------------- end of helper functions
# assemble a dictionary of the factors having a Rational power
common_b = {}
done = []
bases = []
for b, e in c_powers:
b, e = bkey(b, e)
if b in common_b.keys():
common_b[b] = common_b[b] + e
else:
common_b[b] = e
if b[1] != 1 and b[0].is_Mul:
bases.append(b)
c_powers = [(b, e) for b, e in common_b.items() if e]
bases.sort(key=default_sort_key) # this makes tie-breaking canonical
bases.sort(key=measure, reverse=True) # handle longest first
for base in bases:
if base not in common_b: # it may have been removed already
continue
b, exponent = base
last = False # True when no factor of base is a radical
qlcm = 1 # the lcm of the radical denominators
while True:
bstart = b
qstart = qlcm
bb = [] # list of factors
ee = [] # (factor's expo. and it's current value in common_b)
for bi in Mul.make_args(b):
bib, bie = bkey(bi)
if bib not in common_b or common_b[bib] < bie:
ee = bb = [] # failed
break
ee.append([bie, common_b[bib]])
bb.append(bib)
if ee:
# find the number of extractions possible
# e.g. [(1, 2), (2, 2)] -> min(2/1, 2/2) -> 1
min1 = ee[0][1]/ee[0][0]
for i in range(len(ee)):
rat = ee[i][1]/ee[i][0]
if rat < 1:
break
min1 = min(min1, rat)
else:
# update base factor counts
# e.g. if ee = [(2, 5), (3, 6)] then min1 = 2
# and the new base counts will be 5-2*2 and 6-2*3
for i in range(len(bb)):
common_b[bb[i]] -= min1*ee[i][0]
update(bb[i])
# update the count of the base
# e.g. x**2*y*sqrt(x*sqrt(y)) the count of x*sqrt(y)
# will increase by 4 to give bkey (x*sqrt(y), 2, 5)
common_b[base] += min1*qstart*exponent
if (last # no more radicals in base
or len(common_b) == 1 # nothing left to join with
or all(k[1] == 1 for k in common_b) # no rad's in common_b
):
break
# see what we can exponentiate base by to remove any radicals
# so we know what to search for
# e.g. if base were x**(1/2)*y**(1/3) then we should
# exponentiate by 6 and look for powers of x and y in the ratio
# of 2 to 3
qlcm = lcm([ratq(bi) for bi in Mul.make_args(bstart)])
if qlcm == 1:
break # we are done
b = bstart**qlcm
qlcm *= qstart
if all(ratq(bi) == 1 for bi in Mul.make_args(b)):
last = True # we are going to be done after this next pass
# this base no longer can find anything to join with and
# since it was longer than any other we are done with it
b, q = base
done.append((b, common_b.pop(base)*Rational(1, q)))
# update c_powers and get ready to continue with powsimp
c_powers = done
# there may be terms still in common_b that were bases that were
# identified as needing processing, so remove those, too
for (b, q), e in common_b.items():
if (b.is_Pow or b.func is exp) and \
q is not S.One and not b.exp.is_Rational:
b, be = b.as_base_exp()
b = b**(be/q)
else:
b = root(b, q)
c_powers.append((b, e))
check = len(c_powers)
c_powers = dict(c_powers)
assert len(c_powers) == check # there should have been no duplicates
# ==============================================================
# rebuild the expression
newexpr = expr.func(*(newexpr + [Pow(b, e) for b, e in c_powers.items()]))
if combine == 'exp':
return expr.func(newexpr, expr.func(*nc_part))
else:
return recurse(expr.func(*nc_part), combine='base') * \
recurse(newexpr, combine='base')
elif combine == 'base':
# Build c_powers and nc_part. These must both be lists not
# dicts because exp's are not combined.
c_powers = []
nc_part = []
for term in expr.args:
if term.is_commutative:
c_powers.append(list(term.as_base_exp()))
else:
# This is the logic that combines bases that are
# different and non-commutative, but with equal and
# commutative exponents: A**x*B**x == (A*B)**x.
if nc_part:
b1, e1 = nc_part[-1].as_base_exp()
b2, e2 = term.as_base_exp()
if (e1 == e2 and e2.is_commutative):
nc_part[-1] = Pow(b1*b2, e1)
continue
nc_part.append(term)
# Pull out numerical coefficients from exponent if assumptions allow
# e.g., 2**(2*x) => 4**x
for i in range(len(c_powers)):
b, e = c_powers[i]
if not (all(x.is_nonnegative for x in b.as_numer_denom()) or e.is_integer or force or b.is_polar):
continue
exp_c, exp_t = e.as_coeff_Mul(rational=True)
if exp_c is not S.One and exp_t is not S.One:
c_powers[i] = [Pow(b, exp_c), exp_t]
# Combine bases whenever they have the same exponent and
# assumptions allow
# first gather the potential bases under the common exponent
c_exp = defaultdict(list)
for b, e in c_powers:
if deep:
e = recurse(e)
c_exp[e].append(b)
del c_powers
# Merge back in the results of the above to form a new product
c_powers = defaultdict(list)
for e in c_exp:
bases = c_exp[e]
# calculate the new base for e
if len(bases) == 1:
new_base = bases[0]
elif e.is_integer or force:
new_base = expr.func(*bases)
else:
# see which ones can be joined
unk = []
nonneg = []
neg = []
for bi in bases:
if bi.is_negative:
neg.append(bi)
elif bi.is_nonnegative:
nonneg.append(bi)
elif bi.is_polar:
nonneg.append(
bi) # polar can be treated like non-negative
else:
unk.append(bi)
if len(unk) == 1 and not neg or len(neg) == 1 and not unk:
# a single neg or a single unk can join the rest
nonneg.extend(unk + neg)
unk = neg = []
elif neg:
# their negative signs cancel in groups of 2*q if we know
# that e = p/q else we have to treat them as unknown
israt = False
if e.is_Rational:
israt = True
else:
p, d = e.as_numer_denom()
if p.is_integer and d.is_integer:
israt = True
if israt:
neg = [-w for w in neg]
unk.extend([S.NegativeOne]*len(neg))
else:
unk.extend(neg)
neg = []
del israt
# these shouldn't be joined
for b in unk:
c_powers[b].append(e)
# here is a new joined base
new_base = expr.func(*(nonneg + neg))
# if there are positive parts they will just get separated
# again unless some change is made
def _terms(e):
# return the number of terms of this expression
# when multiplied out -- assuming no joining of terms
if e.is_Add:
return sum([_terms(ai) for ai in e.args])
if e.is_Mul:
return prod([_terms(mi) for mi in e.args])
return 1
xnew_base = expand_mul(new_base, deep=False)
if len(Add.make_args(xnew_base)) < _terms(new_base):
new_base = factor_terms(xnew_base)
c_powers[new_base].append(e)
# break out the powers from c_powers now
c_part = [Pow(b, ei) for b, e in c_powers.items() for ei in e]
# we're done
return expr.func(*(c_part + nc_part))
else:
raise ValueError("combine must be one of ('all', 'exp', 'base').")
def powdenest(eq, force=False, polar=False):
r"""
Collect exponents on powers as assumptions allow.
Given ``(bb**be)**e``, this can be simplified as follows:
* if ``bb`` is positive, or
* ``e`` is an integer, or
* ``|be| < 1`` then this simplifies to ``bb**(be*e)``
Given a product of powers raised to a power, ``(bb1**be1 *
bb2**be2...)**e``, simplification can be done as follows:
- if e is positive, the gcd of all bei can be joined with e;
- all non-negative bb can be separated from those that are negative
and their gcd can be joined with e; autosimplification already
handles this separation.
- integer factors from powers that have integers in the denominator
of the exponent can be removed from any term and the gcd of such
integers can be joined with e
Setting ``force`` to True will make symbols that are not explicitly
negative behave as though they are positive, resulting in more
denesting.
Setting ``polar`` to True will do simplifications on the Riemann surface of
the logarithm, also resulting in more denestings.
When there are sums of logs in exp() then a product of powers may be
obtained e.g. ``exp(3*(log(a) + 2*log(b)))`` - > ``a**3*b**6``.
Examples
========
>>> from sympy.abc import a, b, x, y, z
>>> from sympy import Symbol, exp, log, sqrt, symbols, powdenest
>>> powdenest((x**(2*a/3))**(3*x))
(x**(2*a/3))**(3*x)
>>> powdenest(exp(3*x*log(2)))
2**(3*x)
Assumptions may prevent expansion:
>>> powdenest(sqrt(x**2))
sqrt(x**2)
>>> p = symbols('p', positive=True)
>>> powdenest(sqrt(p**2))
p
No other expansion is done.
>>> i, j = symbols('i,j', integer=True)
>>> powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j
x**(x*(i + j))
But exp() will be denested by moving all non-log terms outside of
the function; this may result in the collapsing of the exp to a power
with a different base:
>>> powdenest(exp(3*y*log(x)))
x**(3*y)
>>> powdenest(exp(y*(log(a) + log(b))))
(a*b)**y
>>> powdenest(exp(3*(log(a) + log(b))))
a**3*b**3
If assumptions allow, symbols can also be moved to the outermost exponent:
>>> i = Symbol('i', integer=True)
>>> powdenest(((x**(2*i))**(3*y))**x)
((x**(2*i))**(3*y))**x
>>> powdenest(((x**(2*i))**(3*y))**x, force=True)
x**(6*i*x*y)
>>> powdenest(((x**(2*a/3))**(3*y/i))**x)
((x**(2*a/3))**(3*y/i))**x
>>> powdenest((x**(2*i)*y**(4*i))**z, force=True)
(x*y**2)**(2*i*z)
>>> n = Symbol('n', negative=True)
>>> powdenest((x**i)**y, force=True)
x**(i*y)
>>> powdenest((n**i)**x, force=True)
(n**i)**x
"""
from sympy.simplify.simplify import posify
if force:
eq, rep = posify(eq)
return powdenest(eq, force=False).xreplace(rep)
if polar:
eq, rep = polarify(eq)
return unpolarify(powdenest(unpolarify(eq, exponents_only=True)), rep)
new = powsimp(sympify(eq))
return new.xreplace(Transform(
_denest_pow, filter=lambda m: m.is_Pow or m.func is exp))
_y = Dummy('y')
def _denest_pow(eq):
"""
Denest powers.
This is a helper function for powdenest that performs the actual
transformation.
"""
from sympy.simplify.simplify import logcombine
b, e = eq.as_base_exp()
if b.is_Pow or isinstance(b.func, exp) and e != 1:
new = b._eval_power(e)
if new is not None:
eq = new
b, e = new.as_base_exp()
# denest exp with log terms in exponent
if b is S.Exp1 and e.is_Mul:
logs = []
other = []
for ei in e.args:
if any(ai.func is log for ai in Add.make_args(ei)):
logs.append(ei)
else:
other.append(ei)
logs = logcombine(Mul(*logs))
return Pow(exp(logs), Mul(*other))
_, be = b.as_base_exp()
if be is S.One and not (b.is_Mul or
b.is_Rational and b.q != 1 or
b.is_positive):
return eq
# denest eq which is either pos**e or Pow**e or Mul**e or
# Mul(b1**e1, b2**e2)
# handle polar numbers specially
polars, nonpolars = [], []
for bb in Mul.make_args(b):
if bb.is_polar:
polars.append(bb.as_base_exp())
else:
nonpolars.append(bb)
if len(polars) == 1 and not polars[0][0].is_Mul:
return Pow(polars[0][0], polars[0][1]*e)*powdenest(Mul(*nonpolars)**e)
elif polars:
return Mul(*[powdenest(bb**(ee*e)) for (bb, ee) in polars]) \
*powdenest(Mul(*nonpolars)**e)
if b.is_Integer:
# use log to see if there is a power here
logb = expand_log(log(b))
if logb.is_Mul:
c, logb = logb.args
e *= c
base = logb.args[0]
return Pow(base, e)
# if b is not a Mul or any factor is an atom then there is nothing to do
if not b.is_Mul or any(s.is_Atom for s in Mul.make_args(b)):
return eq
# let log handle the case of the base of the argument being a Mul, e.g.
# sqrt(x**(2*i)*y**(6*i)) -> x**i*y**(3**i) if x and y are positive; we
# will take the log, expand it, and then factor out the common powers that
# now appear as coefficient. We do this manually since terms_gcd pulls out
# fractions, terms_gcd(x+x*y/2) -> x*(y + 2)/2 and we don't want the 1/2;
# gcd won't pull out numerators from a fraction: gcd(3*x, 9*x/2) -> x but
# we want 3*x. Neither work with noncommutatives.
def nc_gcd(aa, bb):
a, b = [i.as_coeff_Mul() for i in [aa, bb]]
c = gcd(a[0], b[0]).as_numer_denom()[0]
g = Mul(*(a[1].args_cnc(cset=True)[0] & b[1].args_cnc(cset=True)[0]))
return _keep_coeff(c, g)
glogb = expand_log(log(b))
if glogb.is_Add:
args = glogb.args
g = reduce(nc_gcd, args)
if g != 1:
cg, rg = g.as_coeff_Mul()
glogb = _keep_coeff(cg, rg*Add(*[a/g for a in args]))
# now put the log back together again
if glogb.func is log or not glogb.is_Mul:
if glogb.args[0].is_Pow or glogb.args[0].func is exp:
glogb = _denest_pow(glogb.args[0])
if (abs(glogb.exp) < 1) == True:
return Pow(glogb.base, glogb.exp*e)
return eq
# the log(b) was a Mul so join any adds with logcombine
add = []
other = []
for a in glogb.args:
if a.is_Add:
add.append(a)
else:
other.append(a)
return Pow(exp(logcombine(Mul(*add))), e*Mul(*other))
|
|
#!/usr/bin/env python3
import os
from subprocess import (
check_output,
CalledProcessError,
STDOUT
)
import random
import shutil
from shutil import copyfile
from crontab import CronTab
from charmhelpers.core import unitdata
from charmhelpers.core.host import (
lsb_release,
service_running,
service_start,
service_stop
)
from charmhelpers.core.hookenv import (
log,
config,
open_port,
status_set,
charm_dir
)
from charms.reactive import (
when,
when_any,
when_all,
when_not,
set_state,
remove_state
)
from charms import layer
from charms import apt
@when_not('apt.installed.letsencrypt')
def check_version_and_install():
series = lsb_release()['DISTRIB_RELEASE']
if not series >= '16.04':
log('letsencrypt not supported on series >= %s' % (series))
status_set('blocked', "Unsupported series < Xenial")
return
else:
apt.queue_install(['letsencrypt'])
apt.install_queued()
# open ports during installation to prevent a scenario where
# we need to wait for the update-status hook to request
# certificates because Juju hasn't opened the ports yet and
# no other hook is queued to run.
open_port(80)
open_port(443)
@when('config.changed.fqdn')
def config_changed():
configs = config()
if configs.changed('fqdn') and configs.previous('fqdn') \
or configs.get('fqdn'):
remove_state('lets-encrypt.registered')
@when('apt.installed.letsencrypt')
@when_any(
'lets-encrypt.certificate-requested',
'config.set.fqdn',
)
@when_not('lets-encrypt.registered')
@when_not('lets-encrypt.disable')
def register_server():
configs = config()
# Get all certificate requests
requests = unitdata.kv().get('certificate.requests', [])
if not requests and not configs.get('fqdn'):
return
if configs.get('fqdn'):
requests.append({'fqdn': [configs.get('fqdn')],
'contact-email': configs.get('contact-email', '')})
# If the ports haven't been opened in a previous hook, they won't be open,
# so opened_ports won't return them.
ports = opened_ports()
if not ('80/tcp' in ports or '443/tcp' in ports):
status_set(
'waiting',
'Waiting for ports to open (will happen in next hook)')
return
if create_certificates(requests):
unconfigure_periodic_renew()
configure_periodic_renew()
create_dhparam()
set_state('lets-encrypt.registered')
@when_all(
'apt.installed.letsencrypt',
'lets-encrypt.registered',
# This state is set twice each day by crontab. This
# handler will be run in the next update-status hook.
'lets-encrypt.renew.requested',
)
@when_not(
'lets-encrypt.disable',
'lets-encrypt.renew.disable',
)
def renew_cert():
remove_state('lets-encrypt.renew.requested')
# We don't want to stop the webserver if no renew is needed.
if no_renew_needed():
return
print("Renewing certificate...")
configs = config()
fqdn = configs.get('fqdn')
needs_start = stop_running_web_service()
open_port(80)
open_port(443)
try:
output = check_output(
['letsencrypt', 'renew', '--agree-tos'],
universal_newlines=True,
stderr=STDOUT)
print(output) # So output shows up in logs
status_set('active', 'registered %s' % (fqdn))
set_state('lets-encrypt.renewed')
except CalledProcessError as err:
status_set(
'blocked',
'letsencrypt renewal failed: \n{}'.format(err.output))
print(err.output) # So output shows up in logs
finally:
if needs_start:
start_web_service()
def no_renew_needed():
# If renew is needed, the following call might fail because the needed
# ports are in use. We catch this because we only need to know if a
# renew was attempted, not if it succeeded.
try:
output = check_output(
['letsencrypt', 'renew', '--agree-tos'], universal_newlines=True)
except CalledProcessError as error:
output = error.output
return "No renewals were attempted." in output
def stop_running_web_service():
service_name = layer.options('lets-encrypt').get('service-name')
if service_name and service_running(service_name):
log('stopping running service: %s' % (service_name))
service_stop(service_name)
return True
def start_web_service():
service_name = layer.options('lets-encrypt').get('service-name')
if service_name:
log('starting service: %s' % (service_name))
service_start(service_name)
def configure_periodic_renew():
command = (
'export CHARM_DIR="{}"; '
'{} '
'set_state lets-encrypt.renew.requested '
''.format(
os.environ['CHARM_DIR'],
shutil.which("charms.reactive")))
cron = CronTab(user='root')
jobRenew = cron.new(
command=command,
comment="Renew Let's Encrypt [managed by Juju]")
# Twice a day, random minute per certbot instructions
# https://certbot.eff.org/all-instructions/
jobRenew.setall('{} 6,18 * * *'.format(random.randint(1, 59)))
jobRenew.enable()
cron.write()
def unconfigure_periodic_renew():
cron = CronTab(user='root')
jobs = cron.find_comment(comment="Renew Let's Encrypt [managed by Juju]")
for job in jobs:
cron.remove(job)
cron.write()
def create_dhparam():
copyfile(
'{}/files/dhparam.pem'.format(charm_dir()),
'/etc/letsencrypt/dhparam.pem')
def opened_ports():
output = check_output(['opened-ports'], universal_newlines=True)
return output.split()
def create_certificates(requests):
for cert_request in requests:
# Check if there are no conflicts
# If a fqdn is already present, do not create a new one
fqdnpaths = []
for fqdn in cert_request['fqdn']:
fqdnpaths.append('/etc/letsencrypt/live/' + fqdn)
if any([os.path.isdir(f) for f in fqdnpaths]):
continue # Cert already exists
needs_start = stop_running_web_service()
mail_args = []
if cert_request['contact-email']:
mail_args.append('--email')
mail_args.append(cert_request['contact-email'])
else:
mail_args.append('--register-unsafely-without-email')
try:
# Agreement already captured by terms, see metadata
le_cmd = ['letsencrypt', 'certonly', '--standalone', '--agree-tos',
'--non-interactive']
for fqdn in cert_request['fqdn']:
le_cmd.extend(['-d', fqdn])
le_cmd.extend(mail_args)
output = check_output(
le_cmd,
universal_newlines=True,
stderr=STDOUT)
print(output) # So output shows up in logs
status_set('active', 'registered %s' % (fqdn))
except CalledProcessError as err:
status_set(
'blocked',
'letsencrypt registration failed: \n{}'.format(err.output))
print(err.output) # So output shows up in logs
return False
finally:
if needs_start:
start_web_service()
return True
|
|
from enum import Enum, auto
from typing import Any, Dict, List, Optional, Tuple
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
class ErrorCode(Enum):
BAD_REQUEST = auto() # Generic name, from the name of HTTP 400.
REQUEST_VARIABLE_MISSING = auto()
REQUEST_VARIABLE_INVALID = auto()
INVALID_JSON = auto()
BAD_IMAGE = auto()
REALM_UPLOAD_QUOTA = auto()
BAD_NARROW = auto()
CANNOT_DEACTIVATE_LAST_USER = auto()
MISSING_HTTP_EVENT_HEADER = auto()
STREAM_DOES_NOT_EXIST = auto()
UNAUTHORIZED_PRINCIPAL = auto()
UNSUPPORTED_WEBHOOK_EVENT_TYPE = auto()
ANOMALOUS_WEBHOOK_PAYLOAD = auto()
BAD_EVENT_QUEUE_ID = auto()
CSRF_FAILED = auto()
INVITATION_FAILED = auto()
INVALID_ZULIP_SERVER = auto()
INVALID_MARKDOWN_INCLUDE_STATEMENT = auto()
REQUEST_CONFUSING_VAR = auto()
INVALID_API_KEY = auto()
INVALID_ZOOM_TOKEN = auto()
UNAUTHENTICATED_USER = auto()
NONEXISTENT_SUBDOMAIN = auto()
RATE_LIMIT_HIT = auto()
USER_DEACTIVATED = auto()
REALM_DEACTIVATED = auto()
PASSWORD_AUTH_DISABLED = auto()
PASSWORD_RESET_REQUIRED = auto()
AUTHENTICATION_FAILED = auto()
class JsonableError(Exception):
"""A standardized error format we can turn into a nice JSON HTTP response.
This class can be invoked in a couple ways.
* Easiest, but completely machine-unreadable:
raise JsonableError(_("No such widget: {}").format(widget_name))
The message may be passed through to clients and shown to a user,
so translation is required. Because the text will vary depending
on the user's language, it's not possible for code to distinguish
this error from others in a non-buggy way.
* Fully machine-readable, with an error code and structured data:
class NoSuchWidgetError(JsonableError):
code = ErrorCode.NO_SUCH_WIDGET
data_fields = ['widget_name']
def __init__(self, widget_name: str) -> None:
self.widget_name: str = widget_name
@staticmethod
def msg_format() -> str:
return _("No such widget: {widget_name}")
raise NoSuchWidgetError(widget_name)
Now both server and client code see a `widget_name` attribute
and an error code.
Subclasses may also override `http_status_code`.
"""
# Override this in subclasses, as needed.
code: ErrorCode = ErrorCode.BAD_REQUEST
# Override this in subclasses if providing structured data.
data_fields: List[str] = []
# Optionally override this in subclasses to return a different HTTP status,
# like 403 or 404.
http_status_code: int = 400
def __init__(self, msg: str) -> None:
# `_msg` is an implementation detail of `JsonableError` itself.
self._msg: str = msg
@staticmethod
def msg_format() -> str:
"""Override in subclasses. Gets the items in `data_fields` as format args.
This should return (a translation of) a string literal.
The reason it's not simply a class attribute is to allow
translation to work.
"""
# Secretly this gets one more format arg not in `data_fields`: `_msg`.
# That's for the sake of the `JsonableError` base logic itself, for
# the simplest form of use where we just get a plain message string
# at construction time.
return "{_msg}"
@property
def extra_headers(self) -> Dict[str, Any]:
return {}
#
# Infrastructure -- not intended to be overridden in subclasses.
#
@property
def msg(self) -> str:
format_data = dict(
((f, getattr(self, f)) for f in self.data_fields), _msg=getattr(self, "_msg", None)
)
return self.msg_format().format(**format_data)
@property
def data(self) -> Dict[str, Any]:
return dict(((f, getattr(self, f)) for f in self.data_fields), code=self.code.name)
def __str__(self) -> str:
return self.msg
class StreamDoesNotExistError(JsonableError):
code = ErrorCode.STREAM_DOES_NOT_EXIST
data_fields = ["stream"]
def __init__(self, stream: str) -> None:
self.stream = stream
@staticmethod
def msg_format() -> str:
return _("Stream '{stream}' does not exist")
class StreamWithIDDoesNotExistError(JsonableError):
code = ErrorCode.STREAM_DOES_NOT_EXIST
data_fields = ["stream_id"]
def __init__(self, stream_id: int) -> None:
self.stream_id = stream_id
@staticmethod
def msg_format() -> str:
return _("Stream with ID '{stream_id}' does not exist")
class CannotDeactivateLastUserError(JsonableError):
code = ErrorCode.CANNOT_DEACTIVATE_LAST_USER
data_fields = ["is_last_owner", "entity"]
def __init__(self, is_last_owner: bool) -> None:
self.is_last_owner = is_last_owner
self.entity = _("organization owner") if is_last_owner else _("user")
@staticmethod
def msg_format() -> str:
return _("Cannot deactivate the only {entity}.")
class InvalidMarkdownIncludeStatement(JsonableError):
code = ErrorCode.INVALID_MARKDOWN_INCLUDE_STATEMENT
data_fields = ["include_statement"]
def __init__(self, include_statement: str) -> None:
self.include_statement = include_statement
@staticmethod
def msg_format() -> str:
return _("Invalid Markdown include statement: {include_statement}")
class RateLimited(JsonableError):
code = ErrorCode.RATE_LIMIT_HIT
http_status_code = 429
def __init__(self, secs_to_freedom: Optional[float] = None) -> None:
self.secs_to_freedom = secs_to_freedom
@staticmethod
def msg_format() -> str:
return _("API usage exceeded rate limit")
@property
def extra_headers(self) -> Dict[str, Any]:
extra_headers_dict = super().extra_headers
if self.secs_to_freedom is not None:
extra_headers_dict["Retry-After"] = self.secs_to_freedom
return extra_headers_dict
@property
def data(self) -> Dict[str, Any]:
data_dict = super().data
data_dict["retry-after"] = self.secs_to_freedom
return data_dict
class InvalidJSONError(JsonableError):
code = ErrorCode.INVALID_JSON
@staticmethod
def msg_format() -> str:
return _("Malformed JSON")
class OrganizationMemberRequired(JsonableError):
code: ErrorCode = ErrorCode.UNAUTHORIZED_PRINCIPAL
def __init__(self) -> None:
pass
@staticmethod
def msg_format() -> str:
return _("Must be an organization member")
class OrganizationAdministratorRequired(JsonableError):
code: ErrorCode = ErrorCode.UNAUTHORIZED_PRINCIPAL
def __init__(self) -> None:
pass
@staticmethod
def msg_format() -> str:
return _("Must be an organization administrator")
class OrganizationOwnerRequired(JsonableError):
code: ErrorCode = ErrorCode.UNAUTHORIZED_PRINCIPAL
def __init__(self) -> None:
pass
@staticmethod
def msg_format() -> str:
return _("Must be an organization owner")
class StreamAdministratorRequired(JsonableError):
code: ErrorCode = ErrorCode.UNAUTHORIZED_PRINCIPAL
def __init__(self) -> None:
pass
@staticmethod
def msg_format() -> str:
return _("Must be an organization or stream administrator")
class AuthenticationFailedError(JsonableError):
# Generic class for authentication failures
code: ErrorCode = ErrorCode.AUTHENTICATION_FAILED
http_status_code = 401
def __init__(self) -> None:
pass
@staticmethod
def msg_format() -> str:
return _("Your username or password is incorrect")
class UserDeactivatedError(AuthenticationFailedError):
code: ErrorCode = ErrorCode.USER_DEACTIVATED
@staticmethod
def msg_format() -> str:
return _("Account is deactivated")
class RealmDeactivatedError(AuthenticationFailedError):
code: ErrorCode = ErrorCode.REALM_DEACTIVATED
@staticmethod
def msg_format() -> str:
return _("This organization has been deactivated")
class PasswordAuthDisabledError(AuthenticationFailedError):
code: ErrorCode = ErrorCode.PASSWORD_AUTH_DISABLED
@staticmethod
def msg_format() -> str:
return _("Password authentication is disabled in this organization")
class PasswordResetRequiredError(AuthenticationFailedError):
code: ErrorCode = ErrorCode.PASSWORD_RESET_REQUIRED
@staticmethod
def msg_format() -> str:
return _("Your password has been disabled and needs to be reset")
class MarkdownRenderingException(Exception):
pass
class InvalidAPIKeyError(JsonableError):
code = ErrorCode.INVALID_API_KEY
http_status_code = 401
def __init__(self) -> None:
pass
@staticmethod
def msg_format() -> str:
return _("Invalid API key")
class InvalidAPIKeyFormatError(InvalidAPIKeyError):
@staticmethod
def msg_format() -> str:
return _("Malformed API key")
class WebhookError(JsonableError):
"""
Intended as a generic exception raised by specific webhook
integrations. This class is subclassed by more specific exceptions
such as UnsupportedWebhookEventType and AnomalousWebhookPayload.
"""
data_fields = ["webhook_name"]
def __init__(self) -> None:
# webhook_name is often set by decorators such as webhook_view
# in zerver/decorator.py
self.webhook_name = "(unknown)"
class UnsupportedWebhookEventType(WebhookError):
"""Intended as an exception for event formats that we know the
third-party service generates but which Zulip doesn't support /
generate a message for.
Exceptions where we cannot parse the event type, possibly because
the event isn't actually from the service in question, should
raise AnomalousWebhookPayload.
"""
code = ErrorCode.UNSUPPORTED_WEBHOOK_EVENT_TYPE
data_fields = ["webhook_name", "event_type"]
def __init__(self, event_type: Optional[str]) -> None:
super().__init__()
self.event_type = event_type
@staticmethod
def msg_format() -> str:
return _("The '{event_type}' event isn't currently supported by the {webhook_name} webhook")
class AnomalousWebhookPayload(WebhookError):
"""Intended as an exception for incoming webhook requests that we
cannot recognize as having been generated by the service in
question. (E.g. because someone pointed a Jira server at the
GitHub integration URL).
If we can parse the event but don't support it, use
UnsupportedWebhookEventType.
"""
code = ErrorCode.ANOMALOUS_WEBHOOK_PAYLOAD
@staticmethod
def msg_format() -> str:
return _("Unable to parse request: Did {webhook_name} generate this event?")
class MissingAuthenticationError(JsonableError):
code = ErrorCode.UNAUTHENTICATED_USER
http_status_code = 401
def __init__(self) -> None:
pass
# No msg_format is defined since this exception is caught and
# converted into json_unauthorized in Zulip's middleware.
class InvalidSubdomainError(JsonableError):
code = ErrorCode.NONEXISTENT_SUBDOMAIN
http_status_code = 404
def __init__(self) -> None:
pass
@staticmethod
def msg_format() -> str:
return _("Invalid subdomain")
class ZephyrMessageAlreadySentException(Exception):
def __init__(self, message_id: int) -> None:
self.message_id = message_id
class InvitationError(JsonableError):
code = ErrorCode.INVITATION_FAILED
data_fields = [
"errors",
"sent_invitations",
"license_limit_reached",
"daily_limit_reached",
]
def __init__(
self,
msg: str,
errors: List[Tuple[str, str, bool]],
sent_invitations: bool,
license_limit_reached: bool = False,
daily_limit_reached: bool = False,
) -> None:
self._msg: str = msg
self.errors: List[Tuple[str, str, bool]] = errors
self.sent_invitations: bool = sent_invitations
self.license_limit_reached: bool = license_limit_reached
self.daily_limit_reached: bool = daily_limit_reached
class AccessDeniedError(JsonableError):
http_status_code = 403
def __init__(self) -> None:
pass
@staticmethod
def msg_format() -> str:
return _("Access denied")
class ResourceNotFoundError(JsonableError):
http_status_code = 404
class ValidationFailureError(JsonableError):
# This class translations a Django ValidationError into a
# Zulip-style JsonableError, sending back just the first error for
# consistency of API.
data_fields = ["errors"]
def __init__(self, error: ValidationError) -> None:
super().__init__(error.messages[0])
self.errors = dict(error)
|
|
#!/usr/bin/env python
# Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages and runs tests from the current working directory.
This will traverse the current working directory and look for python files that
contain subclasses of GlslCTest.
If a class has an @inside_glslc_testsuite decorator, an instance of that
class will be created and serve as a test case in that testsuite. The test
case is then run by the following steps:
1. A temporary directory will be created.
2. The glslc_args member variable will be inspected and all placeholders in it
will be expanded by calling instantiate_for_glslc_args() on placeholders.
The transformed list elements are then supplied as glslc arguments.
3. If the environment member variable exists, its write() method will be
invoked.
4. All expected_* member variables will be inspected and all placeholders in
them will be expanded by calling instantiate_for_expectation() on those
placeholders. After placeholder expansion, if the expected_* variable is
a list, its element will be joined together with '' to form a single
string. These expected_* variables are to be used by the check_*() methods.
5. glslc will be run with the arguments supplied in glslc_args.
6. All check_*() member methods will be called by supplying a TestStatus as
argument. Each check_*() method is expected to return a (Success, Message)
pair where Success is a boolean indicating success and Message is an error
message.
7. If any check_*() method fails, the error message is outputted and the
current test case fails.
If --leave-output was not specified, all temporary files and directories will
be deleted.
"""
from __future__ import print_function
import argparse
import fnmatch
import inspect
import os
import shutil
import subprocess
import sys
import tempfile
from collections import defaultdict
from placeholder import PlaceHolder
EXPECTED_BEHAVIOR_PREFIX = 'expected_'
VALIDATE_METHOD_PREFIX = 'check_'
def get_all_variables(instance):
"""Returns the names of all the variables in instance."""
return [v for v in dir(instance) if not callable(getattr(instance, v))]
def get_all_methods(instance):
"""Returns the names of all methods in instance."""
return [m for m in dir(instance) if callable(getattr(instance, m))]
def get_all_superclasses(cls):
"""Returns all superclasses of a given class.
Returns:
A list of superclasses of the given class. The order guarantees that
* A Base class precedes its derived classes, e.g., for "class B(A)", it
will be [..., A, B, ...].
* When there are multiple base classes, base classes declared first
precede those declared later, e.g., for "class C(A, B), it will be
[..., A, B, C, ...]
"""
classes = []
for superclass in cls.__bases__:
for c in get_all_superclasses(superclass):
if c not in classes:
classes.append(c)
for superclass in cls.__bases__:
if superclass not in classes:
classes.append(superclass)
return classes
def get_all_test_methods(test_class):
"""Gets all validation methods.
Returns:
A list of validation methods. The order guarantees that
* A method defined in superclass precedes one defined in subclass,
e.g., for "class A(B)", methods defined in B precedes those defined
in A.
* If a subclass has more than one superclass, e.g., "class C(A, B)",
then methods defined in A precedes those defined in B.
"""
classes = get_all_superclasses(test_class)
classes.append(test_class)
all_tests = [m for c in classes
for m in get_all_methods(c)
if m.startswith(VALIDATE_METHOD_PREFIX)]
unique_tests = []
for t in all_tests:
if t not in unique_tests:
unique_tests.append(t)
return unique_tests
class GlslCTest:
"""Base class for glslc test cases.
Subclasses define test cases' facts (shader source code, glslc command,
result validation), which will be used by the TestCase class for running
tests. Subclasses should define glslc_args (specifying glslc command
arguments), and at least one check_*() method (for result validation) for
a full-fledged test case. All check_*() methods should take a TestStatus
parameter and return a (Success, Message) pair, in which Success is a
boolean indicating success and Message is an error message. The test passes
iff all check_*() methods returns true.
Often, a test case class will delegate the check_* behaviors by inheriting
from other classes.
"""
def name(self):
return self.__class__.__name__
class TestStatus:
"""A struct for holding run status of a test case."""
def __init__(self, test_manager, returncode, stdout, stderr, directory, inputs, input_filenames):
self.test_manager = test_manager
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
# temporary directory where the test runs
self.directory = directory
# List of inputs, as PlaceHolder objects.
self.inputs = inputs
# the names of input shader files (potentially including paths)
self.input_filenames = input_filenames
class GlslCTestException(Exception):
"""GlslCTest exception class."""
pass
def inside_glslc_testsuite(testsuite_name):
"""Decorator for subclasses of GlslCTest.
This decorator checks that a class meets the requirements (see below)
for a test case class, and then puts the class in a certain testsuite.
* The class needs to be a subclass of GlslCTest.
* The class needs to have glslc_args defined as a list.
* The class needs to define at least one check_*() methods.
* All expected_* variables required by check_*() methods can only be
of bool, str, or list type.
* Python runtime will throw an exception if the expected_* member
attributes required by check_*() methods are missing.
"""
def actual_decorator(cls):
if not inspect.isclass(cls):
raise GlslCTestException('Test case should be a class')
if not issubclass(cls, GlslCTest):
raise GlslCTestException(
'All test cases should be subclasses of GlslCTest')
if 'glslc_args' not in get_all_variables(cls):
raise GlslCTestException('No glslc_args found in the test case')
if not isinstance(cls.glslc_args, list):
raise GlslCTestException('glslc_args needs to be a list')
if not any([
m.startswith(VALIDATE_METHOD_PREFIX)
for m in get_all_methods(cls)]):
raise GlslCTestException(
'No check_*() methods found in the test case')
if not all([
isinstance(v, (bool, str, list))
for v in get_all_variables(cls)]):
raise GlslCTestException(
'expected_* variables are only allowed to be bool, str, or '
'list type.')
cls.parent_testsuite = testsuite_name
return cls
return actual_decorator
class TestManager:
"""Manages and runs a set of tests."""
def __init__(self, executable_path, disassembler_path):
self.executable_path = executable_path
self.disassembler_path = disassembler_path
self.num_successes = 0
self.num_failures = 0
self.num_tests = 0
self.leave_output = False
self.tests = defaultdict(list)
def notify_result(self, test_case, success, message):
"""Call this to notify the manager of the results of a test run."""
self.num_successes += 1 if success else 0
self.num_failures += 0 if success else 1
counter_string = str(
self.num_successes + self.num_failures) + '/' + str(self.num_tests)
print('%-10s %-40s ' % (counter_string, test_case.test.name()) +
('Passed' if success else '-Failed-'))
if not success:
print(' '.join(test_case.command))
print(message)
def add_test(self, testsuite, test):
"""Add this to the current list of test cases."""
self.tests[testsuite].append(TestCase(test, self))
self.num_tests += 1
def run_tests(self):
for suite in self.tests:
print('Glslc test suite: "{suite}"'.format(suite=suite))
for x in self.tests[suite]:
x.runTest()
class TestCase:
"""A single test case that runs in its own directory."""
def __init__(self, test, test_manager):
self.test = test
self.test_manager = test_manager
self.inputs = [] # inputs, as PlaceHolder objects.
self.file_shaders = [] # filenames of shader files.
self.stdin_shader = None # text to be passed to glslc as stdin
def setUp(self):
"""Creates environment and instantiates placeholders for the test case."""
self.directory = tempfile.mkdtemp(dir=os.getcwd())
glslc_args = self.test.glslc_args
# Instantiate placeholders in glslc_args
self.test.glslc_args = [
arg.instantiate_for_glslc_args(self)
if isinstance(arg, PlaceHolder) else arg
for arg in self.test.glslc_args]
# Get all shader files' names
self.inputs = [arg for arg in glslc_args if isinstance(arg, PlaceHolder)]
self.file_shaders = [arg.filename for arg in self.inputs]
if 'environment' in get_all_variables(self.test):
self.test.environment.write(self.directory)
expectations = [v for v in get_all_variables(self.test)
if v.startswith(EXPECTED_BEHAVIOR_PREFIX)]
# Instantiate placeholders in expectations
for expectation_name in expectations:
expectation = getattr(self.test, expectation_name)
if isinstance(expectation, list):
expanded_expections = [
element.instantiate_for_expectation(self)
if isinstance(element, PlaceHolder) else element
for element in expectation]
setattr(
self.test, expectation_name,
''.join(expanded_expections))
elif isinstance(expectation, PlaceHolder):
setattr(self.test, expectation_name,
expectation.instantiate_for_expectation(self))
def tearDown(self):
"""Removes the directory if we were not instructed to do otherwise."""
if not self.test_manager.leave_output:
shutil.rmtree(self.directory)
def runTest(self):
"""Sets up and runs a test, reports any failures and then cleans up."""
self.setUp()
success = False
message = ''
try:
self.command = [self.test_manager.executable_path]
self.command.extend(self.test.glslc_args)
process = subprocess.Popen(
args=self.command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=self.directory)
output = process.communicate(self.stdin_shader)
test_status = TestStatus(
self.test_manager,
process.returncode, output[0], output[1],
self.directory, self.inputs, self.file_shaders)
run_results = [getattr(self.test, test_method)(test_status)
for test_method in get_all_test_methods(
self.test.__class__)]
success, message = zip(*run_results)
success = all(success)
message = '\n'.join(message)
except Exception as e:
success = False
message = str(e)
self.test_manager.notify_result(self, success, message)
self.tearDown()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('glslc', metavar='path/to/glslc', type=str, nargs=1,
help='Path to glslc')
parser.add_argument('spirvdis', metavar='path/to/glslc', type=str, nargs=1,
help='Path to spirv-dis')
parser.add_argument('--leave-output', action='store_const', const=1,
help='Do not clean up temporary directories')
parser.add_argument('--test-dir', nargs=1,
help='Directory to gather the tests from')
args = parser.parse_args()
default_path = sys.path
root_dir = os.getcwd()
if args.test_dir:
root_dir = args.test_dir[0]
manager = TestManager(args.glslc[0], args.spirvdis[0])
if args.leave_output:
manager.leave_output = True
for root, _, filenames in os.walk(root_dir):
for filename in fnmatch.filter(filenames, '*.py'):
if filename.endswith('nosetest.py'):
# Skip nose tests, which are for testing functions of
# the test framework.
continue
sys.path = default_path
sys.path.append(root)
mod = __import__(os.path.splitext(filename)[0])
for _, obj, in inspect.getmembers(mod):
if inspect.isclass(obj) and hasattr(obj, 'parent_testsuite'):
manager.add_test(obj.parent_testsuite, obj())
manager.run_tests()
if manager.num_failures > 0:
sys.exit(-1)
if __name__ == '__main__':
main()
|
|
from __future__ import absolute_import
from django import forms
from django.db.models import BLANK_CHOICE_DASH
from django.template.defaultfilters import slugify
from starslib import base
from . import models
from six.moves import range
from six.moves import zip
class CreateGameForm(forms.ModelForm):
class Meta:
model = models.Game
fields = ('name', 'slug', 'description', 'published')
class AiPlayersWidget(forms.MultiWidget):
def __init__(self, attrs=None):
_widgets = [
forms.Select(attrs=attrs,
choices=BLANK_CHOICE_DASH + list(w))
for x in range(16)
for w in (models.GameOptions.AI_RACES,
models.GameOptions.AI_SKILL_LEVELS)
]
return super(AiPlayersWidget, self).__init__(_widgets, attrs)
def decompress(self, value):
new_value = [None for x in range(32)]
if value:
values = [int(x.strip()) for x in value.split(',')]
L = len(values) // 2 * 2
new_value[:L] = values[:L]
return new_value
class AiPlayers(forms.MultiValueField):
widget = AiPlayersWidget
def __init__(self, *args, **kwargs):
fields = [
forms.ChoiceField(choices=BLANK_CHOICE_DASH + list(w),
required=False)
for x in range(16)
for w in (models.GameOptions.AI_RACES,
models.GameOptions.AI_SKILL_LEVELS)
]
super(AiPlayers, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
values = []
for race, skill in zip(*[iter(data_list)]*2):
if race and skill:
values.extend([race, skill])
return ','.join(values)
class GameOptionsForm(forms.ModelForm):
ai_players = AiPlayers(required=False)
class Meta:
model = models.GameOptions
fields = ('universe_size', 'universe_density', 'starting_distance',
'maximum_minerals', 'slow_tech', 'accelerated_bbs',
'random_events', 'computer_alliances', 'public_scores',
'galaxy_clumping', 'ai_players', 'percent_planets',
'tech_level', 'tech_fields', 'score', 'exceeds_nearest_score',
'production', 'capital_ships', 'highest_score_after_years',
'num_criteria', 'min_turns_to_win')
class RaceForm(forms.ModelForm):
class Meta:
model = models.Race
fields = ('name', 'plural_name')
def clean_name(self):
name = self.cleaned_data.get('name', '')
try:
name.encode('cp1252')
except UnicodeEncodeError:
raise forms.ValidationError(
"Race name is restricted to the cp1252/latin1 character set.")
return name
def clean_plural_name(self):
plural_name = self.cleaned_data.get('plural_name', '')
try:
plural_name.encode('cp1252')
except UnicodeEncodeError:
raise forms.ValidationError(
"Race plural name is restricted to the"
" cp1252/latin1 character set.")
return plural_name
def clean(self):
cleaned_data = super(RaceForm, self).clean()
game = self.instance.game
r_id = self.instance.id
name = cleaned_data.get('name', '')
existing_race = models.Race.objects.filter(game=game, name=name)
if existing_race and r_id != existing_race.get().id:
raise forms.ValidationError(
"The race name '{0}' is already being used for"
" this game.".format(name))
plural_name = cleaned_data.get('plural_name', '')
existing_race = models.Race.objects.filter(game=game,
plural_name=plural_name)
if existing_race and r_id != existing_race.get().id:
raise forms.ValidationError(
"The race plural_name '{0}' is already being used for"
" this game.".format(plural_name))
max_length = self.instance._meta.get_field('slug').max_length
slug, num, end = slugify(plural_name), 1, ''
if len(slug) > max_length:
slug = slug[:max_length]
while game.races.exclude(pk=r_id
).filter(slug=slug+end).exists():
num += 1
end = str(num)
if len(slug) + len(end) > max_length:
slug = slug[:max_length - len(end)]
self.instance.slug = slug + end
return cleaned_data
class ChooseUserRaceForm(forms.ModelForm):
class Meta:
model = models.Race
fields = ('racefile',)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(ChooseUserRaceForm, self).__init__(*args, **kwargs)
self.fields['racefile'].queryset = models.StarsFile.objects.filter(
userrace__user=user
)
self.fields['racefile'].choices = [
(u'', self.fields['racefile'].empty_label)
] + [
(u.racefile.id, u.identifier)
for u in models.UserRace.objects.filter(user=user,
racefile__isnull=False)
]
class AmbassadorForm(forms.ModelForm):
class Meta:
model = models.Ambassador
fields = ('name',)
class RaceFileForm(forms.ModelForm):
class Meta:
model = models.StarsFile
fields = ('file',)
def clean_file(self):
f = self.cleaned_data.get('file')
valid = True
try:
self.stars_file = base.StarsFile()
self.stars_file.bytes = f.read()
if self.stars_file.type != 'r':
valid = False
elif self.stars_file.counts != {8: 1, 6: 1, 0: 1}:
valid = False
except (base.StarsError, Exception):
valid = False
if valid:
self.instance.type = 'r'
else:
raise forms.ValidationError("Not a valid Stars race file.")
return f
class UserRaceForm(forms.ModelForm):
class Meta:
model = models.UserRace
fields = ('identifier',)
def _get_validation_exclusions(self):
exclude = super(UserRaceForm, self)._get_validation_exclusions()
exclude.remove('user')
return exclude
class OrderFileForm(forms.ModelForm):
class Meta:
model = models.StarsFile
fields = ('file',)
def clean_file(self):
f = self.cleaned_data.get('file')
try:
self._sfile = models.StarsFile.parse(f.read(), type='x')
except (base.StarsError, Exception):
raise forms.ValidationError("Not a valid Stars order file.")
self.instance.type = 'x'
return f
class HistoryFileForm(forms.ModelForm):
class Meta:
model = models.StarsFile
fields = ('file',)
def clean_file(self):
f = self.cleaned_data.get('file')
try:
self._sfile = models.StarsFile.parse(f.read(), type='h')
except (base.StarsError, Exception):
raise forms.ValidationError("Not a valid Stars history file.")
self.instance.type = 'h'
return f
class RacePageForm(forms.ModelForm):
set_as_homepage = forms.BooleanField(required=False)
class Meta:
model = models.RacePage
fields = ('title', 'body')
|
|
# Copyright (c) 2015 Jonathan M. Lange <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from eliot import add_destination
from pyrsistent import PClass, field
from twisted.plugin import IPlugin
from twisted.trial.itrial import IReporter
from zope.interface import implementer
from ._types import (
ERROR,
FAILURE,
SKIP,
TEST,
UNEXPECTED_SUCCESS,
make_error_message,
make_expected_failure_message,
)
class InvalidStateError(Exception):
"""
Raised when someone attempts to put an EliotReporter into an invalid state.
"""
# TODO: The Trial base reporter does some sort of warning capturing. It would
# be good to do something similar here so that *everything* that the test
# emits is captured in single, coheerent Eliot log.
# TODO: Ideally we'd also capture stdout & stderr and encode those as Eliot
# messages. Probably can't be done at the reporter level, but we can provide
# functions for tests to be able to use that.
# TODO: Should setUp, tearDown, the test itself and cleanup also be Eliot
# actions? If so, that's a thing for the base test case rather than the
# reporter.
# TODO: Currently Eliot has support for capturing the eliot logs and dumping
# them to the Twisted log, which Trial stores as _trial_temp/test.log. If
# we're using something like this, then we actually want all of those log
# messages to be included as part of the test action, included in the same
# log.
# TODO: "The value is in the output". No one is going to care about this
# unless there's something that consumes the output and displays the results
# as something that matters to humans.
# TODO: Currently the action of a test "succeeds" whether or not the test
# passes. It's unclear whether this is the right behaviour. Factors:
#
# - when reading eliot output, it makes it harder to see whether a test
# passed or failed.
# - tests can have multiple errors, if we made the action fail on test failure,
# then we'd have to aggregate these errors somehow.
# - aggregating the errors would mean that we either would not see them at all
# until the test completes, or that we would log duplicate actions
@implementer(IReporter)
class EliotReporter(object):
def __init__(self, stream, tbformat='default', realtime=False,
publisher=None, logger=None):
# TODO: Trial has a pretty confusing set of expectations for
# reporters. In particular, it's not clear what it needs to construct
# a reporter. It's also not clear what it expects as public
# properties. The IReporter interface and the tests for the reporter
# interface cover somewhat different things.
self._stream = stream
self.tbformat = tbformat
self.shouldStop = False
self.testsRun = 0
add_destination(self._write_message)
self._current_test = None
self._successful = True
self._logger = logger
def _write_message(self, message):
self._stream.write(json.dumps(message) + "\n")
def _ensure_test_running(self, expected_test):
current = self._current_test
if current and current.id() != expected_test.id():
raise InvalidStateError(
'Expected {} to be running, was {} instead'.format(
expected_test, self._current_test))
def startTest(self, method):
"""
Report the beginning of a run of a single test method.
@param method: an object that is adaptable to ITestMethod
"""
if self._current_test:
raise InvalidStateError(
'Trying to start {}, but {} already started'.format(
method, self._current_test))
self._current_test = method
self._action = TEST(test=method, logger=self._logger)
# TODO: This isn't using Eliot the way it was intended. Probably a
# better way is to have a test case (or a testtools-style TestCase
# runner!) that does all of this.
self._action.__enter__()
def stopTest(self, method):
"""
Report the status of a single test method
@param method: an object that is adaptable to ITestMethod
"""
if not self._current_test:
raise InvalidStateError(
'Trying to stop {} without starting it first'.format(method))
self._ensure_test_running(method)
self._current_test = None
self._action.__exit__(None, None, None)
def addSuccess(self, test):
"""
Record that test passed.
"""
self._ensure_test_running(test)
def addError(self, test, error):
"""
Record that a test has raised an unexpected exception.
"""
self._ensure_test_running(test)
make_error_message(ERROR, error).write(self._logger)
self._successful = False
def addFailure(self, test, failure):
"""
Record that a test has failed with the given failure.
"""
self._ensure_test_running(test)
make_error_message(FAILURE, failure).write(self._logger)
self._successful = False
def addExpectedFailure(self, test, failure, todo):
"""
Record that the given test failed, and was expected to do so.
"""
self._ensure_test_running(test)
make_expected_failure_message(todo, failure).write(self._logger)
def addUnexpectedSuccess(self, test, todo):
"""
Record that the given test failed, and was expected to do so.
"""
self._ensure_test_running(test)
UNEXPECTED_SUCCESS(todo=todo).write(self._logger)
def addSkip(self, test, reason):
"""
Record that a test has been skipped for the given reason.
"""
self._ensure_test_running(test)
SKIP(reason=reason).write(self._logger)
def wasSuccessful(self):
return self._successful
def stop(self):
self.shouldStop = True
def done(self):
"""
Called when the test run is complete.
"""
@implementer(IReporter, IPlugin)
class TrialReporter(PClass):
name = field()
module = field()
description = field()
longOpt = field()
shortOpt = field()
klass = field()
eliot_plugin = TrialReporter(
name="Eliot reporter",
description="Output all test results as eliot logs",
longOpt="eliot",
shortOpt=None,
module="eliotreporter",
klass="EliotReporter",
)
|
|
import cmd
import sys
import textwrap
import loadJSON
DESC = 'desc'
NORTH = 'north'
SOUTH = 'south'
EAST = 'east'
WEST = 'west'
UP = 'up'
DOWN = 'down'
GROUND = 'ground'
SHOP = 'shop'
GROUNDDESC = 'grounddesc'
SHORTDESC = 'shortdesc'
LONGDESC = 'longdesc'
TAKEABLE = 'takeable'
EDIBLE = 'edible'
DESCWORDS = 'descwords'
SHOWFULLEXITS = "showFullExits"
SCREEN_WIDTH = 80
# Initialize some empty dictionaries
rooms = {}
items = {}
gameData = {}
def moveDirection(direction):
global rooms, items, gameData
if direction in rooms[gameData["location"]]:
print('You move', direction, '\n')
gameData["location"] = rooms[gameData["location"]][direction]
displayLocation(gameData["location"])
else:
print('You cannot move in that direction.')
def getAllDescWords(itemList):
itemList = list(set(itemList))
descWords = []
for item in itemList:
descWords.extend(items[item][DESCWORDS])
return list(set(descWords))
def getAllFirstDescWords(itemList):
itemList = list(set(itemList))
descWords = []
for item in itemList:
descWords.append(items[item][DESCWORDS][0])
return list(set(descWords))
def getFirstItemMatchingDesc(desc, itemList):
itemList = list(set(itemList))
for item in itemList:
if desc in items[item][DESCWORDS]:
return item
return None
def getAllItemsMatchingDesc(desc, itemList):
itemList = list(set(itemList))
matchingItems = []
for item in itemList:
if desc in items[item][DESCWORDS]:
matchingItems.append(item)
return matchingItems
class TRPGCmd(cmd.Cmd):
prompt = '\n> '
def default(self, arg):
print("I do not understand that command. Type \"help\" for a list of commands.")
def do_quit(self, arg):
'''Quits the program.'''
return True # Tells the command listener to terminate the loop
def do_north(self, arg):
'''Go to the area to the north, if possible'''
moveDirection(NORTH)
def do_south(self, arg):
'''Go to the area to the south, if possible'''
moveDirection(SOUTH)
def do_east(self, arg):
'''Go to the area to the east, if possible'''
moveDirection(EAST)
def do_west(self, arg):
'''Go to the area to the west, if possible'''
moveDirection(WEST)
def do_up(self, arg):
'''Go to the area above, if possible'''
moveDirection(UP)
def do_down(self, arg):
'''Go to the area below, if possible'''
moveDirection(DOWN)
def do_inventory(self, arg):
'''Display a list of the items in your possesion.'''
if len(gameData['inventory']) == 0:
print('Inventory:\n (nothing)')
return
itemCount = {}
for item in gameData['inventory']:
if item in iter(itemCount.keys()):
itemCount['item'] += 1
else:
itemCount['item'] = 1
print('Inventory:')
for item in set(gameData['inventory']):
if itemCount['item'] > 1:
print(' %s (%s)' % (item, itemCount['item']))
else:
print(' ' + item)
# provide some command aliases for quicker typing
do_n = do_north
do_s = do_south
do_e = do_east
do_w = do_west
do_u = do_up
do_d = do_down
do_inv = do_inventory
def do_take(self, arg):
'''take <item> - Take an item on teh ground, and place in inventory.'''
global rooms, gameData
itemToTake = arg.lower()
if itemToTake == '':
print('Take what? Type "look" to see the items on the ground here.')
return
cantTake = False
for item in getAllItemsMatchingDesc(itemToTake, rooms[gameData['location']][GROUND]):
if items[item].get(TAKEABLE, True) == False:
cantTake = True
continue
print('You take %s.' % (items[item][SHORTDESC]))
rooms[gameData['location']][GROUND].remove(item)
gameData['inventory'].append(item)
return
if cantTake:
print('You cannot take "%s"' % (itemToTake))
else:
print('That is not on the ground.')
def complete_take(self, text, line, begidx, endidx):
possibleItems = []
text = text.lower()
if not text:
return getAllFirstDescWords(rooms[gameData['location']][GROUND])
for item in list(set(rooms[gameData['location']][GROUND])):
for descWord in items[item][DESCWORDS]:
if descWord.startswith(text) and items[item].get(TAKEABLE, True):
possibleItems.append(descWord)
return list(set(possibleItems))
def do_drop(self, arg):
'''drop <item> - Drop an item in your inventory onto the ground.'''
global rooms, gameData
itemToDrop = arg.lower()
invDescWords = getAllDescWords(gameData['inventory'])
if itemToDrop not in invDescWords:
print('You do not have "%s" in your inventory.' % (itemToDrop))
return
item = getFirstItemMatchingDesc(itemToDrop, gameData['inventory'])
if item is not None:
print('You drop %s.' % (items[item][SHORTDESC]))
gameData['inventory'].remove(item)
rooms[gameData['location']][GROUND].append(item)
def do_exits(self, arg):
'''Toggle showing of full exit data.'''
global gameData
gameData[SHOWFULLEXITS] = not gameData[SHOWFULLEXITS]
if gameData[SHOWFULLEXITS]:
print("Showing full exit descriptions.")
else:
print("Showing brief exit descriptions.")
def help_combat(self):
print("Combat isn't currently available in this version.")
def initializeGameData():
global rooms, items, gameData
data = loadJSON.loadAll(
{
"rooms": "../sample_game/json/rooms.json",
"items": "../sample_game/json/items.json",
"game": "../sample_game/json/game.json"})
rooms = data["rooms"]
items = data["items"]
gameData = data["game"]
def main():
initializeGameData()
print('Text RPG!')
print('=========')
print()
print('(Type "help" for a list of commands.)')
print()
displayLocation(gameData["location"])
TRPGCmd().cmdloop()
print('Thanks for playing!')
if __name__ == "__main__": main()
|
|
import configparser
import unittest
from pathlib import Path
from TM1py import Element
from TM1py.Exceptions import TM1pyException
from TM1py.Objects import Dimension, Hierarchy, Subset
from TM1py.Services import TM1Service
class TestHierarchyService(unittest.TestCase):
tm1: TM1Service
prefix = 'TM1py_Tests_Hierarchy_'
dimension_name = prefix + "Some_Name"
subset_name = prefix + "Some_Subset"
@classmethod
def setUpClass(cls):
"""
Establishes a connection to TM1 and creates TM! objects to use across all tests
"""
# Connection to TM1
cls.config = configparser.ConfigParser()
cls.config.read(Path(__file__).parent.joinpath('config.ini'))
cls.tm1 = TM1Service(**cls.config['tm1srv01'])
@classmethod
def teardown_class(cls):
cls.tm1.logout()
@classmethod
def setUp(cls):
cls.create_dimension()
cls.create_subset()
@classmethod
def tearDown(cls):
cls.delete_dimension()
@classmethod
def create_dimension(cls):
dimension = Dimension(cls.dimension_name)
hierarchy = Hierarchy(name=cls.dimension_name, dimension_name=cls.dimension_name)
hierarchy.add_element('Total Years', 'Consolidated')
hierarchy.add_element('No Year', 'Numeric')
hierarchy.add_element('1989', 'Numeric')
hierarchy.add_element("My Element", "Numeric")
hierarchy.add_element_attribute('Previous Year', 'String')
hierarchy.add_element_attribute('Next Year', 'String')
hierarchy.add_edge('Total Years', '1989', 2)
dimension.add_hierarchy(hierarchy)
cls.tm1.dimensions.create(dimension)
@classmethod
def delete_dimension(cls):
cls.tm1.dimensions.delete(cls.dimension_name)
@classmethod
def create_subset(cls):
s = Subset(cls.subset_name, cls.dimension_name, cls.dimension_name,
expression="{{[{}].Members}}".format(cls.dimension_name))
cls.tm1.dimensions.subsets.create(s, False)
def add_other_hierarchy(self):
dimension = self.tm1.dimensions.get(self.dimension_name)
# other hierarchy
hierarchy = Hierarchy(name="Other Hierarchy", dimension_name=self.dimension_name)
hierarchy.add_element('Other Total Years', 'Consolidated')
hierarchy.add_element('No Year', 'Numeric')
hierarchy.add_element('1989', 'Numeric')
hierarchy.add_element("Element With ' in the name", "Numeric")
hierarchy.add_element_attribute('Previous Year', 'String')
hierarchy.add_element_attribute('Next Year', 'String')
hierarchy.add_edge('Other Total Years', '1989', 2)
dimension.add_hierarchy(hierarchy)
self.tm1.dimensions.update(dimension)
def add_balanced_hierarchy(self, hierarchy_name):
dimension = self.tm1.dimensions.get(self.dimension_name)
# other hierarchy
hierarchy = Hierarchy(name=hierarchy_name, dimension_name=self.dimension_name)
hierarchy.add_element("Total Years Balanced", "Consolidated")
hierarchy.add_element('1989', 'Numeric')
hierarchy.add_element('1990', 'Numeric')
hierarchy.add_element('1991', 'Numeric')
hierarchy.add_edge("Total Years Balanced", "1989", 1)
hierarchy.add_edge("Total Years Balanced", "1990", 1)
hierarchy.add_edge("Total Years Balanced", "1991", 1)
dimension.add_hierarchy(hierarchy)
self.tm1.dimensions.update(dimension)
def update_hierarchy(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
h = d.default_hierarchy
# Edit Elements and Edges
for year in range(2010, 2021, 1):
parent = str(year)
h.add_element(parent, 'Consolidated')
h.add_edge('Total Years', parent, 1)
for month in ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'):
component = '{}-{}'.format(year, month)
h.add_element(component, 'Numeric')
h.add_edge(parent, component, 1)
# Edit Element Attributes
h.add_element_attribute('Name Long', 'Alias')
h.add_element_attribute('Name Short', 'Alias')
h.add_element_attribute('Days', 'Numeric')
# Remove attribute
h.remove_element_attribute('Next Year')
# Remove Edge
h.remove_edge('Total Years', '1989')
# Update Edge
h.update_edge('Total Years', '2011', 2)
# Update_element
h.update_element('No Year', 'String')
self.tm1.dimensions.update(d)
def test_get_hierarchy(self):
h = self.tm1.dimensions.hierarchies.get(self.dimension_name, self.dimension_name)
self.assertIn('Total Years', h.elements.keys())
self.assertIn('No Year', h.elements.keys())
self.assertIn('1989', h.elements.keys())
self.assertIn('Next Year', [ea.name for ea in h.element_attributes])
self.assertIn('Previous Year', [ea.name for ea in h.element_attributes])
self.assertIn(self.subset_name, h.subsets)
def test_hierarchy___get__(self):
h = self.tm1.dimensions.hierarchies.get(self.dimension_name, self.dimension_name)
element = h["Total Years"]
self.assertIsInstance(element, Element)
self.assertEqual(element.name, "Total Years")
self.assertEqual(element.element_type, Element.Types.CONSOLIDATED)
element = h["Total Years".replace(" ", "").lower()]
self.assertIsInstance(element, Element)
self.assertEqual(element.name, "Total Years")
self.assertEqual(element.element_type, Element.Types.CONSOLIDATED)
element = h["1989"]
self.assertIsInstance(element, Element)
self.assertEqual(element.name, "1989")
self.assertEqual(element.element_type, Element.Types.NUMERIC)
self.assertNotEqual(element.element_type, Element.Types.STRING)
def test_hierarchy___get__exception(self):
h = self.tm1.dimensions.hierarchies.get(self.dimension_name, self.dimension_name)
try:
_ = h["Im not a valid year"]
raise Exception("did not throw Exception when expected to do so")
except ValueError:
pass
def test_hierarchy___contains__(self):
h = self.tm1.dimensions.hierarchies.get(self.dimension_name, self.dimension_name)
self.assertIn("1989", h)
self.assertIn("Total Years", h)
self.assertIn("Total Years".replace(" ", "").lower(), h)
self.assertIn("1 9 8 9 ", h)
self.assertNotIn("3001", h)
def test_hierarchy___iter__(self):
h = self.tm1.dimensions.hierarchies.get(self.dimension_name, self.dimension_name)
elements_cloned_through_iter = [element for element in h]
self.assertEqual(len(h._elements), len(elements_cloned_through_iter))
for element in elements_cloned_through_iter:
self.assertIn(element.name, h.elements)
def test_hierarchy___len__(self):
h = self.tm1.dimensions.hierarchies.get(self.dimension_name, self.dimension_name)
self.assertGreater(len(h), 0)
self.assertEqual(len(h), len(h._elements))
def test_update_hierarchy(self):
self.update_hierarchy()
# Check if update works
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertIn('2010-Jan', h.elements.keys())
self.assertIn('2020-Dec', h.elements.keys())
self.assertNotIn('Next Year', [ea.name for ea in h.element_attributes])
self.assertIn('Previous Year', [ea.name for ea in h.element_attributes])
self.assertIn('Days', [ea.name for ea in h.element_attributes])
self.assertIn('Name Long', [ea.name for ea in h.element_attributes])
self.assertEqual(h.edges[('Total Years', '2011')], 2)
self.assertEqual(h.elements['No Year'].element_type, Element.Types.STRING)
summary = self.tm1.dimensions.hierarchies.get_hierarchy_summary(self.dimension_name, self.dimension_name)
self.assertEqual(summary["Elements"], 147)
self.assertEqual(summary["Edges"], 143)
self.assertEqual(summary["Members"], 147)
self.assertEqual(summary["ElementAttributes"], 4)
self.assertEqual(summary["Levels"], 3)
def test_update_hierarchy_remove_c_element(self):
self.update_hierarchy()
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertIn('2011', h.elements)
self.assertIn(('2011', '2011-Jan'), h.edges)
h.remove_element('2011')
self.tm1.dimensions.hierarchies.update(h)
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertNotIn('2011', h.elements)
self.assertNotIn(('2011', '2011-Jan'), h.edges)
def test_update_hierarchy_remove_n_element(self):
self.update_hierarchy()
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertIn('2011-Jan', h.elements)
self.assertIn(('2011', '2011-Jan'), h.edges)
h.remove_element('2011-Jan')
self.tm1.dimensions.hierarchies.update(h)
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertNotIn('2011-Jan', h.elements)
self.assertNotIn(('2011', '2011-Jan'), h.edges)
def test_update_hierarchy_remove_s_element(self):
self.update_hierarchy()
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertIn('No Year', h.elements)
h.remove_element('No Year')
self.tm1.dimensions.hierarchies.update(h)
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertNotIn('No Year', h.elements)
def test_update_hierarchy_remove_edges_related_to_element(self):
self.update_hierarchy()
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertIn('2012', h.elements)
h.remove_edges_related_to_element(element_name='2012 ')
self.tm1.dimensions.hierarchies.update(h)
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertIn('2012', h.elements)
self.assertNotIn(('2012', '2012- Jan'), h.edges)
self.assertNotIn(('2012', '2012-DEC'), h.edges)
self.assertNotIn(('TotalYears', '2012'), h.edges)
self.assertIn(('Total YEARS', '2011'), h.edges)
self.assertIn(('Total Years', '2013'), h.edges)
def test_update_hierarchy_remove_edges(self):
self.update_hierarchy()
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertIn('2012', h.elements)
self.assertIn(('2012', '2012-Jan'), h.edges)
self.assertIn(('2012', '2012-Feb'), h.edges)
self.assertIn(('2012', '2012-Mar'), h.edges)
self.assertIn(('2012', '2012-Apr'), h.edges)
edges = [('2012', '2012- Jan'), ('2012', '2012-Feb'), ('2012', '2012-MAR'), ('2012', '2012-Apr')]
h.remove_edges(edges=edges)
self.tm1.dimensions.hierarchies.update(h)
d = self.tm1.dimensions.get(self.dimension_name)
h = d.default_hierarchy
self.assertNotIn(('2012', '2012-Jan'), h.edges)
self.assertNotIn(('2012', '2012-Feb'), h.edges)
self.assertNotIn(('2012', '2012-Mar'), h.edges)
self.assertNotIn(('2012', '2012-Apr'), h.edges)
self.assertNotIn(('2012', '2012 - JAN'), h.edges)
self.assertIn(('2012', '2012-May'), h.edges)
self.assertIn('2012', h.elements)
self.assertIn('2012-Feb', h.elements)
def test_hierarchy_summary(self):
summary = self.tm1.dimensions.hierarchies.get_hierarchy_summary(self.dimension_name, self.dimension_name)
self.assertEqual(summary["Elements"], 4)
self.assertEqual(summary["Edges"], 1)
self.assertEqual(summary["Members"], 4)
self.assertEqual(summary["ElementAttributes"], 2)
self.assertEqual(summary["Levels"], 2)
def test_get_default_member(self):
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name, self.dimension_name)
self.assertEqual(default_member, "Total Years")
def test_get_default_member_for_leaves_hierarchy(self):
self.add_other_hierarchy()
default_member = self.tm1.dimensions.hierarchies.get_default_member(
dimension_name=self.dimension_name,
hierarchy_name="Leaves")
self.assertEqual(default_member, "No Year")
def test_update_default_member(self):
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name, self.dimension_name)
self.assertEqual(default_member, "Total Years")
self.tm1.dimensions.hierarchies.update_default_member(self.dimension_name, self.dimension_name, "1989")
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name, self.dimension_name)
self.assertEqual(default_member, "1989")
def test_update_default_member_skip_hierarchy_name_argument(self):
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name)
self.assertEqual(default_member, "Total Years")
self.tm1.dimensions.hierarchies.update_default_member(dimension_name=self.dimension_name, member_name="1989")
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name)
self.assertEqual(default_member, "1989")
def test_update_default_member_for_alternate_hierarchy(self):
self.add_other_hierarchy()
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name, "Other Hierarchy")
self.assertEqual(default_member, "Other Total Years")
self.tm1.dimensions.hierarchies.update_default_member(self.dimension_name, self.dimension_name, "1989")
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name, self.dimension_name)
self.assertEqual(default_member, "1989")
def test_update_default_member_for_leaves_hierarchy(self):
self.add_other_hierarchy()
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name, "Leaves")
self.assertEqual(default_member, "No Year")
self.tm1.dimensions.hierarchies.update_default_member(self.dimension_name, self.dimension_name, "1989")
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name, self.dimension_name)
self.assertEqual(default_member, "1989")
def test_update_default_member_with_invalid_value(self):
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name, self.dimension_name)
self.assertEqual(default_member, "Total Years")
self.tm1.dimensions.hierarchies.update_default_member(
self.dimension_name,
self.dimension_name,
member_name="I am not a valid Member")
default_member = self.tm1.dimensions.hierarchies.get_default_member(self.dimension_name, self.dimension_name)
self.assertEqual(default_member, "Total Years")
def test_remove_all_edges(self):
hierarchy = self.tm1.dimensions.hierarchies.get(self.dimension_name, self.dimension_name)
self.assertGreater(len(hierarchy.edges), 0)
self.tm1.dimensions.hierarchies.remove_all_edges(self.dimension_name, self.dimension_name)
hierarchy = self.tm1.dimensions.hierarchies.get(self.dimension_name, self.dimension_name)
self.assertEqual(len(hierarchy.edges), 0)
def test_remove_edges_under_consolidation(self):
members = self.tm1.dimensions.hierarchies.elements.get_members_under_consolidation(
self.dimension_name,
self.dimension_name,
'Total Years')
self.assertGreater(len(members), 0)
self.tm1.dimensions.hierarchies.remove_edges_under_consolidation(
self.dimension_name,
self.dimension_name,
'Total Years')
members = self.tm1.dimensions.hierarchies.elements.get_members_under_consolidation(
self.dimension_name,
self.dimension_name,
'Total Years')
self.assertEqual(len(members), 0)
def test_add_edges(self):
edges = {("Total Years", "My Element"): 1, ("Total Years", "No Year"): 1}
self.tm1.dimensions.hierarchies.add_edges(self.dimension_name, self.dimension_name, edges)
hierarchy = self.tm1.dimensions.hierarchies.get(self.dimension_name, self.dimension_name)
self.assertEqual(hierarchy.edges[("Total Years", "My Element")], 1)
self.assertEqual(hierarchy.edges[("Total Years", "No Year")], 1)
def test_add_edges_fail_existing(self):
with self.assertRaises(TM1pyException) as _:
edges = {("Total Years", "1989"): 1}
self.tm1.dimensions.hierarchies.add_edges(self.dimension_name, self.dimension_name, edges)
def test_is_balanced_false(self):
is_balanced = self.tm1.dimensions.hierarchies.is_balanced(self.dimension_name, self.dimension_name)
self.assertFalse(is_balanced)
def test_is_balanced_true(self):
balanced_hierarchy_name = "Balanced Hierarchy"
self.add_balanced_hierarchy(balanced_hierarchy_name)
is_balanced = self.tm1.dimensions.hierarchies.is_balanced(self.dimension_name, balanced_hierarchy_name)
self.assertTrue(is_balanced)
def test_hierarchy_remove_all_elements(self):
hierarchy = self.tm1.hierarchies.get(self.dimension_name, self.dimension_name)
self.assertGreater(len(hierarchy.elements), 0)
self.assertGreater(len(hierarchy.edges), 0)
hierarchy.remove_all_elements()
self.tm1.hierarchies.update(hierarchy)
hierarchy = self.tm1.hierarchies.get(self.dimension_name, self.dimension_name)
self.assertEqual(len(hierarchy.elements), 0)
self.assertEqual(len(hierarchy.edges), 0)
def test_hierarchy_remove_all_edges(self):
hierarchy = self.tm1.hierarchies.get(self.dimension_name, self.dimension_name)
self.assertGreater(len(hierarchy.elements), 0)
self.assertGreater(len(hierarchy.edges), 0)
hierarchy.remove_all_edges()
self.tm1.hierarchies.update(hierarchy)
hierarchy = self.tm1.hierarchies.get(self.dimension_name, self.dimension_name)
self.assertGreater(len(hierarchy.elements), 0)
self.assertEqual(len(hierarchy.edges), 0)
if __name__ == '__main__':
unittest.main()
|
|
from django.urls import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from lfs.core.fields.thumbs import ImageWithThumbsField
class Manufacturer(models.Model):
"""The manufacturer is the unique creator of a product.
"""
name = models.CharField(_(u"Name"), max_length=50)
slug = models.SlugField(_(u"Slug"), unique=True)
short_description = models.TextField(_(u"Short description"), blank=True)
description = models.TextField(_(u"Description"), blank=True)
image = ImageWithThumbsField(_(u"Image"), upload_to="images", blank=True, null=True, sizes=((60, 60), (100, 100), (200, 200), (400, 400)))
position = models.IntegerField(_(u"Position"), default=1000)
active_formats = models.BooleanField(_(u"Active formats"), default=False)
product_rows = models.IntegerField(_(u"Product rows"), default=3)
product_cols = models.IntegerField(_(u"Product cols"), default=3)
meta_title = models.CharField(_(u"Meta title"), max_length=100, default="<name>")
meta_keywords = models.TextField(_(u"Meta keywords"), blank=True)
meta_description = models.TextField(_(u"Meta description"), blank=True)
class Meta:
ordering = ("name", )
app_label = 'manufacturer'
def __str__(self):
return self.name
def get_absolute_url(self):
"""Returns the absolute url of the manufacturer
"""
return reverse("lfs_manufacturer", kwargs={"slug": self.slug})
def get_format_info(self):
"""Returns format information.
"""
if self.active_formats is True:
return {
"product_cols": self.product_cols,
"product_rows": self.product_rows
}
else:
try:
# TODO: Use cache here. Maybe we need a lfs_get_object,
# which raise a ObjectDoesNotExist if the object does not
# exist
from lfs.core.models import Shop
shop = Shop.objects.get(pk=1)
except ObjectDoesNotExist:
return {
"product_cols": 3,
"product_rows": 3
}
else:
return {
"product_cols": shop.product_cols,
"product_rows": shop.product_rows
}
def get_meta_title(self):
"""Returns the meta keywords of the catgory.
"""
mt = self.meta_title.replace("<name>", self.name)
return mt
def get_meta_keywords(self):
"""Returns the meta keywords of the catgory.
"""
mk = self.meta_keywords.replace("<name>", self.name)
mk = mk.replace("<short-description>", self.short_description)
return mk
def get_meta_description(self):
"""Returns the meta description of the product.
"""
md = self.meta_description.replace("<name>", self.name)
md = md.replace("<short-description>", self.short_description)
return md
def get_image(self):
"""Returns the image of the category if it has none it inherits that
from the parent category.
"""
if self.image:
return self.image
return None
def get_all_products(self):
"""Returns all products for manufacturer
"""
from lfs.catalog.settings import VARIANT
cache_key = "%s-manufacturer-all-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, self.id)
products = cache.get(cache_key)
if products is not None:
return products
products = self.products.filter(active=True).exclude(sub_type=VARIANT).distinct()
cache.set(cache_key, products)
return products
def get_filtered_products(self, filters, price_filter, sorting):
"""Returns products for given categories and current filters sorted by
current sorting.
"""
from lfs.catalog.models import Product
from lfs.catalog.settings import PROPERTY_VALUE_TYPE_FILTER
products = self.get_all_products()
if filters:
# Generate ids for collected products
product_ids = products.values_list('pk', flat=True)
product_ids = ", ".join(product_ids)
# Generate filter
temp = []
for f in filters:
if not isinstance(f[1], (list, tuple)):
temp.append("property_id='%s' AND value='%s'" % (f[0], f[1]))
else:
temp.append("property_id='%s' AND value_as_float BETWEEN '%s' AND '%s'" % (f[0], f[1][0], f[1][1]))
fstr = " OR ".join(temp)
# TODO: Will this work with every DB?
# Get all product ids with matching filters. The idea behind this SQL
# query is: If for every filter (property=value) for a product id exists
# a "product property value" the product matches.
cursor = connection.cursor()
cursor.execute("""
SELECT product_id, count(*)
FROM catalog_productpropertyvalue
WHERE product_id IN (%s) and (%s) and type=%s
GROUP BY product_id
HAVING count(*)=%s""" % (product_ids, fstr, PROPERTY_VALUE_TYPE_FILTER, len(filters)))
matched_product_ids = [row[0] for row in cursor.fetchall()]
# All variants of category products
all_variants = Product.objects.filter(parent__in=products)
if all_variants:
all_variant_ids = [str(p.id) for p in all_variants]
all_variant_ids = ", ".join(all_variant_ids)
# Variants with matching filters
cursor.execute("""
SELECT product_id, count(*)
FROM catalog_productpropertyvalue
WHERE product_id IN (%s) and %s and type=%s
GROUP BY product_id
HAVING count(*)=%s""" % (all_variant_ids, fstr, PROPERTY_VALUE_TYPE_FILTER, len(filters)))
# Get the parent ids of the variants as the "product with variants"
# should be displayed and not the variants.
variant_ids = [str(row[0]) for row in cursor.fetchall()]
if variant_ids:
variant_ids = ", ".join(variant_ids)
cursor.execute("""
SELECT parent_id
FROM catalog_product
WHERE id IN (%s)""" % variant_ids)
parent_ids = [str(row[0]) for row in cursor.fetchall()]
matched_product_ids.extend(parent_ids)
# As we factored out the ids of all matching products now, we get the
# product instances in the correct order
products = Product.objects.filter(pk__in=matched_product_ids).distinct()
if price_filter:
matched_product_ids = []
# Get all variants of the products
variants = Product.objects.filter(parent__in=products)
# Filter the variants by price
variants = variants.filter(effective_price__range=[price_filter["min"], price_filter["max"]])
# Get the parent ids of the variants as the "product with variants"
# should be displayed and not the variants.
if variants:
variant_ids = [str(r.id) for r in variants]
variant_ids = ", ".join(variant_ids)
cursor = connection.cursor()
cursor.execute("""
SELECT parent_id
FROM catalog_product
WHERE id IN (%s)""" % variant_ids)
parent_ids = [str(row[0]) for row in cursor.fetchall()]
matched_product_ids.extend(parent_ids)
# Filter the products
products = products.filter(effective_price__range=[price_filter["min"], price_filter["max"]])
# Merge the results
matched_product_ids.extend(products.values_list('pk', flat=True))
# And get a new query set of all products
products = Product.objects.filter(pk__in=matched_product_ids)
if sorting:
products = products.order_by(sorting)
return products
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Character.characterImage'
db.add_column('main_character', 'characterImage',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.ImageFile'], related_name='characterImages', null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Character.characterImage'
db.delete_column('main_character', 'characterImage_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Permission']", 'symmetrical': 'False'})
},
'auth.permission': {
'Meta': {'object_name': 'Permission', 'unique_together': "(('content_type', 'codename'),)", 'ordering': "('content_type__app_label', 'content_type__model', 'codename')"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'blank': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Group']", 'symmetrical': 'False', 'related_name': "'user_set'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Permission']", 'symmetrical': 'False', 'related_name': "'user_set'"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'object_name': 'ContentType', 'unique_together': "(('app_label', 'model'),)", 'ordering': "('name',)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.aptitude': {
'Meta': {'object_name': 'Aptitude'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '20', 'null': 'True'})
},
'main.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'armour': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'availability': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'cost': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'damageAttribute': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'damageDieCount': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '10', 'null': 'True'}),
'damageDieSize': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '10', 'null': 'True'}),
'damageScale': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'null': 'True'}),
'evasion': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'itemSlot': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '2', 'null': 'True'}),
'itemType': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.ItemCategory']"}),
'magicalArmour': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'magicalEvasion': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'null': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'null': 'True'}),
'tier': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'})
},
'main.baseitemability': {
'Meta': {'object_name': 'BaseItemAbility'},
'baseItem': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.BaseItem']", 'related_name': "'abilities'", 'symmetrical': 'False'}),
'craftPoints': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'equippableTo': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tier': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'usedInCrafting': ('django.db.models.fields.BooleanField', [], {})
},
'main.baseskill': {
'Meta': {'object_name': 'BaseSkill'},
'aptitude': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Aptitude']"}),
'attribute': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '20', 'null': 'True'}),
'halfRate': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '20', 'null': 'True'}),
'skillType': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '20', 'null': 'True'}),
'specialized': ('django.db.models.fields.BooleanField', [], {})
},
'main.character': {
'Meta': {'object_name': 'Character'},
'accessorySlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'related_name': "'equippedAccessories'", 'null': 'True'}),
'accessorySlot2': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'related_name': "'equippedAccessories2'", 'null': 'True'}),
'age': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '4', 'null': 'True'}),
'agility': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'baseHP': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'baseMP': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'blurb': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '1000', 'null': 'True'}),
'bodySlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'related_name': "'equippedBodies'", 'null': 'True'}),
'bonusAptitudes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Aptitude']", 'symmetrical': 'False'}),
'characterImage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.ImageFile']", 'related_name': "'characterImages'", 'null': 'True'}),
'gil': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '10', 'null': 'True'}),
'handSlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'related_name': "'equippedHands'", 'null': 'True'}),
'headSlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'related_name': "'equippedHeads'", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Job']", 'related_name': "'characters'"}),
'level': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'magic': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'null': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Race']", 'related_name': "'characters'"}),
'secondWeaponSlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'related_name': "'equippedSecondaryWeapons'", 'null': 'True'}),
'speed': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'spirit': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'strength': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'traits': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Trait']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'related_name': "'characters'", 'null': 'True'}),
'vitality': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'weaponSlot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Item']", 'related_name': "'equippedWeapons'", 'null': 'True'}),
'xp': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'max_length': '10', 'null': 'True'})
},
'main.imagefile': {
'Meta': {'object_name': 'ImageFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '254'})
},
'main.item': {
'Meta': {'object_name': 'Item'},
'baseItem': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseItem']"}),
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'related_name': "'items'"}),
'damageAttribute': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '3', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'})
},
'main.itemcategory': {
'Meta': {'object_name': 'ItemCategory'},
'baseSkill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseSkill']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'subCategory': ('django.db.models.fields.IntegerField', [], {'max_length': '2'})
},
'main.job': {
'Meta': {'object_name': 'Job'},
'accuracyBonus': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'aptitude': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'to': "orm['main.Aptitude']", 'null': 'True'}),
'expertiseAttribute': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '20', 'null': 'True'}),
'expertiseSkill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'to': "orm['main.BaseSkill']", 'null': 'True'}),
'hasMP': ('django.db.models.fields.BooleanField', [], {}),
'hpDie': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.ItemCategory']", 'symmetrical': 'False'}),
'maxAgility': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxMagic': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpeed': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpirit': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxStrength': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxVitality': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'mpDie': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '20', 'null': 'True'}),
'skillPoints': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
},
'main.overviewbox': {
'Meta': {'object_name': 'OverviewBox'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'viewName': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.overviewboxsetting': {
'Meta': {'object_name': 'OverviewBoxSetting'},
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'related_name': "'overviewBoxSettings'"}),
'enabled': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'overviewBox': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.OverviewBox']"}),
'sortOrder': ('django.db.models.fields.IntegerField', [], {}),
'spanFull': ('django.db.models.fields.BooleanField', [], {})
},
'main.race': {
'Meta': {'object_name': 'Race'},
'dayVision': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'hearing': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lifeSense': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'magicSense': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxAgility': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxMagic': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpeed': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxSpirit': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxStrength': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'maxVitality': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '20', 'null': 'True'}),
'nightVision': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'smell': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
},
'main.skill': {
'Meta': {'object_name': 'Skill'},
'baseSkill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.BaseSkill']"}),
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'related_name': "'skills'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'specialization': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '20', 'null': 'True'})
},
'main.trait': {
'Meta': {'object_name': 'Trait'},
'cost': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'traitType': ('django.db.models.fields.IntegerField', [], {})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'currentCharacter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Character']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['main']
|
|
import numpy as np
from neupy import layers
from neupy.utils import asfloat
from neupy.exceptions import LayerConnectionError
from base import BaseTestCase
class OldInlineDefinitionsTestCase(BaseTestCase):
def test_inline_network_python_compatibility(self):
network = layers.Input(1) > layers.Relu(2)
self.assertTrue(network.__bool__())
self.assertTrue(network.__nonzero__())
def test_inline_network_order(self):
input_layer_1 = layers.Input(1)
relu_2 = layers.Relu(2)
relu_3 = layers.Relu(3)
network_1 = input_layer_1 > relu_2 > relu_3
self.assertEqual(list(network_1), [input_layer_1, relu_2, relu_3])
input_layer_4 = layers.Input(4)
relu_5 = layers.Relu(5)
relu_6 = layers.Relu(6)
network_2 = input_layer_4 > relu_5
self.assertEqual(list(network_2), [input_layer_4, relu_5])
network_3 = relu_5 > relu_6
self.assertEqual(list(network_3), [relu_5, relu_6])
def test_inline_network_wtih_different_pointers(self):
relu_2 = layers.Relu(2)
network_1 = layers.Input(1) > relu_2 > layers.Relu(3)
network_2 = relu_2 > layers.Relu(4)
network_3 = layers.Input(1) > relu_2
self.assertShapesEqual(network_1.input_shape, (None, 1))
self.assertShapesEqual(network_1.output_shape, (None, 3))
self.assertShapesEqual(network_2.input_shape, None)
self.assertShapesEqual(network_2.output_shape, (None, 4))
self.assertShapesEqual(network_3.input_shape, (None, 1))
self.assertShapesEqual(network_3.output_shape, (None, 2))
self.assertIn(relu_2, network_2.input_layers)
self.assertIn(relu_2, network_3.output_layers)
def test_inline_network_with_parallel_network(self):
left_branch = layers.join(
layers.Convolution((3, 3, 32)),
layers.Relu(),
layers.MaxPooling((2, 2)),
)
right_branch = layers.join(
layers.Convolution((7, 7, 16)),
layers.Relu(),
)
input_layer = layers.Input((10, 10, 3))
concat = layers.Concatenate()
network_concat = input_layer > (left_branch | right_branch) > concat
network = network_concat > layers.Reshape() > layers.Softmax()
self.assertShapesEqual(network_concat.input_shape, (None, 10, 10, 3))
self.assertShapesEqual(network_concat.output_shape, (None, 4, 4, 48))
self.assertShapesEqual(network.input_shape, (None, 10, 10, 3))
self.assertShapesEqual(network.output_shape, (None, 768))
class InlineDefinitionsTestCase(BaseTestCase):
def test_inline_definition(self):
network = layers.Input(2) >> layers.Relu(10) >> layers.Tanh(1)
self.assertShapesEqual(network.input_shape, (None, 2))
self.assertShapesEqual(network.output_shape, (None, 1))
input_value = asfloat(np.random.random((10, 2)))
output_value = self.eval(network.output(input_value))
self.assertEqual(output_value.shape, (10, 1))
def test_network_shape_multiple_inputs(self):
in1 = layers.Input(10)
in2 = layers.Input(20)
conn = (in1 | in2) >> layers.Concatenate()
self.assertShapesEqual(conn.input_shape, [(None, 10), (None, 20)])
self.assertShapesEqual(conn.output_shape, (None, 30))
def test_network_shape_multiple_outputs(self):
conn = layers.Input(10) >> (layers.Sigmoid(1) | layers.Sigmoid(2))
self.assertShapesEqual(conn.input_shape, (None, 10))
self.assertShapesEqual(conn.output_shape, [(None, 1), (None, 2)])
def test_inline_operation_order(self):
in1 = layers.Input(10)
out1 = layers.Sigmoid(1)
out2 = layers.Sigmoid(2)
conn = in1 >> out1 | out2
self.assertShapesEqual(conn.input_shape, [(None, 10), None])
self.assertShapesEqual(conn.output_shape, [(None, 1), (None, 2)])
def test_sequential_partial_definitions(self):
# Tree structure:
#
# Sigmoid(10)
# /
# Input(10) - Sigmoid(5)
# \
# Softmax(10)
#
input_layer = layers.Input(10)
minimized = input_layer >> layers.Sigmoid(5)
reconstructed = minimized >> layers.Sigmoid(10)
classifier = minimized >> layers.Softmax(20)
x_matrix = asfloat(np.random.random((3, 10)))
minimized_output = self.eval(minimized.output(x_matrix))
self.assertEqual((3, 5), minimized_output.shape)
reconstructed_output = self.eval(reconstructed.output(x_matrix))
self.assertEqual((3, 10), reconstructed_output.shape)
classifier_output = self.eval(classifier.output(x_matrix))
self.assertEqual((3, 20), classifier_output.shape)
def test_inplace_seq_operator(self):
network = layers.Input(1)
network >>= layers.Relu(2)
network >>= layers.Relu(3)
self.assertEqual(len(network), 3)
self.assertShapesEqual(network.input_shape, (None, 1))
self.assertShapesEqual(network.output_shape, (None, 3))
def test_inplace_parallel(self):
network = layers.Input(10)
network |= layers.Input(10)
network >>= layers.Concatenate()
self.assertEqual(len(network), 3)
self.assertShapesEqual(network.input_shape, [(None, 10), (None, 10)])
self.assertShapesEqual(network.output_shape, (None, 20))
class DefinitionsTestCase(BaseTestCase):
def test_one_to_many_parallel_network_output(self):
one_to_many = layers.join(
layers.Input(4),
layers.parallel(
layers.Linear(11),
layers.Linear(12),
layers.Linear(13),
),
)
input_value = asfloat(np.random.random((10, 4)))
actual_output = self.eval(one_to_many.output(input_value))
self.assertEqual(actual_output[0].shape, (10, 11))
self.assertEqual(actual_output[1].shape, (10, 12))
self.assertEqual(actual_output[2].shape, (10, 13))
def test_networks_with_complex_parallel_relations(self):
input_layer = layers.Input((5, 5, 3))
network = layers.join(
layers.parallel([
layers.Convolution((1, 1, 8)),
], [
layers.Convolution((1, 1, 4)),
layers.parallel(
layers.Convolution((1, 3, 2), padding='same'),
layers.Convolution((3, 1, 2), padding='same'),
),
], [
layers.Convolution((1, 1, 8)),
layers.Convolution((3, 3, 4), padding='same'),
layers.parallel(
layers.Convolution((1, 3, 2), padding='same'),
layers.Convolution((3, 1, 2), padding='same'),
)
], [
layers.MaxPooling((3, 3), padding='same', stride=(1, 1)),
layers.Convolution((1, 1, 8)),
]),
layers.Concatenate(),
)
self.assertShapesEqual(network.input_shape, [None, None, None, None])
self.assertShapesEqual(network.output_shape, (None, None, None, 24))
# Connect them at the end, because we need to make
# sure tha parallel networks defined without input shapes
network = layers.join(input_layer, network)
self.assertShapesEqual(network.output_shape, (None, 5, 5, 24))
def test_residual_networks(self):
network = layers.join(
layers.Input((5, 5, 3)),
layers.parallel(
layers.Identity(),
layers.join(
layers.Convolution((3, 3, 8), padding='same'),
layers.Relu(),
),
),
layers.Concatenate(),
)
self.assertShapesEqual((None, 5, 5, 3), network.input_shape)
self.assertShapesEqual((None, 5, 5, 11), network.output_shape)
def test_fail_many_to_many_connection(self):
network_a = layers.join(
layers.Input(10),
layers.parallel(
layers.Relu(5),
layers.Relu(4),
),
)
network_b = layers.join(
layers.parallel(
layers.Relu(5),
layers.Relu(4),
),
layers.Concatenate(),
)
error_message = "Cannot make many to many connection between graphs"
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layers.join(network_a, network_b)
def test_fail_when_cycle_created(self):
network = layers.join(
layers.Input(10),
layers.Relu(10),
)
error_message = (
"Cannot define connection between layers, "
"because it creates cycle in the graph"
)
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layers.join(network, network)
extra_relu = layers.Relu(5)
network = layers.join(network, extra_relu)
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layers.join(network, extra_relu)
class RepeatArchitectureTestCase(BaseTestCase):
def test_repeat_layer(self):
network = layers.repeat(layers.Relu(10), n=5)
self.assertEqual(len(network), 5)
self.assertShapesEqual(network.output_shape, (None, 10))
def test_repeat_network(self):
block = layers.join(
layers.Convolution((3, 3, 32)),
layers.Relu(),
layers.BatchNorm(),
)
network = layers.repeat(block, n=5)
self.assertEqual(len(network), 15)
self.assertShapesEqual(network.output_shape, (None, None, None, 32))
def test_failed_to_repeat_network(self):
network = layers.join(
layers.Input(10),
layers.Relu(5),
)
network.create_variables()
relu = network.layers[1]
error_message = "input shape is incompatible with the output shape"
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layers.repeat(relu, n=4)
def test_wrong_number_of_repeats(self):
error_message = "parameter should be a positive integer"
for wrong_value in (0, 1.5, 9. / 3.):
with self.assertRaisesRegexp(ValueError, error_message):
layers.repeat(layers.Relu(10), n=wrong_value)
def test_repeat_once(self):
input_layer = layers.Relu(10)
output_layer = layers.repeat(input_layer, n=1)
self.assertIs(output_layer, input_layer)
def test_repeat_with_name_patterns(self):
network = layers.repeat(layers.Relu(10, name='rl{}'), n=4)
layer_names = [layer.name for layer in network.layers]
self.assertSequenceEqual(layer_names, ['rl1', 'rl2', 'rl3', 'rl4'])
|
|
#-*- coding: utf-8 -*-
class ScratchCommands:
def __init__(self):
self.catNames = [
"operators",
"sensing",
"data",
"looks",
"motion",
"events",
"control",
"pen",
"sound",
"MoreBlocks"]
self.catCommands = {}
self.catCommands["operators"] = ["-",
"*",
"/",
"&",
"%",
"+",
"<",
"=",
">",
"|",
"abs",
"computeFunction:of:",
"concatenate:with:",
"letter:of:",
"not",
"randomFrom:to:",
"rounded",
"sqrt",
"stringLength:"]
self.catCommands["sensing"] = ["answer",
"color:sees:",
"distanceTo:",
"doAsk",
"getAttribute:of:",
"isLoud",
"keyPressed:",
"mousePressed",
"mouseX",
"mouseY",
"senseVideoMotion",
"sensor:",
"sensorPressed:",
"setVideoState",
"setVideoTransparency",
"soundLevel",
"timeAndDate",
"timer",
"timerReset",
"timestamp",
"touching:",
"touchingColor:",
"yScroll",
"xScroll"]
self.catCommands["data"] = ["append:toList:",
"changeVar:by:",
"CLR_COUNT",
"contentsOfList:",
"COUNT",
"deleteLine:ofList:",
"getLine:ofList:",
"getUserId",
"getUserName",
"hideList:",
"hideVariable:",
"INCR_COUNT",
"insert:at:ofList:",
"lineCountOfList:",
"list:contains:",
"readVariable",
"setLine:ofList:to:",
"setVar:to:",
"showList:",
"showVariable:"]
self.catCommands["looks"] = ["backgroundIndex",
"changeGraphicEffect:by:",
"changeSizeBy:",
"comeToFront",
"costumeIndex",
"filterReset",
"goBackByLayers:",
"hide",
"hideAll",
"lookLike:",
"nextBackground",
"nextCostume",
"nextScene",
"say:",
"say:duration:elapsed:from:",
"scale",
"sceneName",
"setGraphicEffect:to:",
"setSizeTo:",
"show",
"showBackground:",
"startScene",
"startSceneAndWait",
"think:",
"think:duration:elapsed:from:"]
self.catCommands["motion"]= ["bounceOffEdge",
"changeXposBy:",
"changeYposBy:",
"glideSecs:toX:y:elapsed:from:",
"forward:",
"gotoSpriteOrMouse:",
"gotoX:y:",
"heading",
"heading:",
"pointTowards:",
"scrollAlign",
"scrollRight",
"scrollUp",
"setRotationStyle",
"turnLeft:",
"turnRight:",
"xpos",
"xpos:",
"ypos",
"ypos:"]
self.catCommands["events"]= ["broadcast:",
"doBroadcastAndWait",
"whenClicked",
"whenGreenFlag",
"whenIReceive",
"whenKeyPressed",
"whenSceneStarts",
"whenSensorGreaterThan"]
self.catCommands["control"]= ["createCloneOf",
"deleteClone",
"doForever",
"doForeverIf",
"doForLoop",
"doIf",
"doIfElse",
"doRepeat",
"doReturn",
"doUntilRepeat",
"doWaitUntil",
"doWhile",
"stopAll",
"stopScripts",
"wait:elapsed:from:",
"warpSpeed",
"whenCloned"]
self.catCommands["pen"]= ["changePenHueBy:",
"changePenSizeBy:",
"clearPenTrails",
"penColor:",
"penSize:",
"putPenDown",
"putPenUp",
"setPenHueTo:",
"setPenShadeTo:",
"stampCostume"]
self.catCommands["sound"]= ["changeTempoBy:",
"changeVolumeBy:",
"doPlaySoundAndWait",
"drum:duration:elapsed:from",
"instrument:",
"midiInstrument:",
"noteOn:duration:elapsed:from:",
"playDrum",
"playSound:",
"rest:elapsed:from:",
"setTempoTo:",
"setVolumeTo:",
"stopAllSounds",
"tempo",
"volume"]
self.catCommands["MoreBlocks"] = ["procDef"]
self.allCommands= [
"procDef",
"-",
"*",
"/",
"&",
"%",
"+",
"<",
"=",
">",
"|",
"abs",
"answer",
"append:toList:",
"backgroundIndex",
"bounceOffEdge",
"broadcast:",
"changeGraphicEffect:by:",
"changePenHueBy:",
"changePenShadeBy:",
"changePenSizeBy:",
"changeSizeBy:",
"changeTempoBy:",
"changeVar:by:",
"changeVolumeBy:",
"changeXposBy:",
"changeYposBy:",
"clearPenTrails",
"CLR_COUNT",
"color:sees:",
"comeToFront",
"computeFunction:of:",
"concatenate:with:",
"contentsOfList:",
"costumeIndex",
"COUNT",
"createCloneOf",
"deleteClone",
"deleteLine:ofList:",
"distanceTo:",
"doAsk",
"doBroadcastAndWait",
"doForever",
"doForeverIf",
"doForLoop",
"doIf",
"doIfElse",
"doPlaySoundAndWait",
"doRepeat",
"doReturn",
"doUntilRepeat",
"doWaitUntil",
"doWhile",
"drum:duration:elapsed:from:",
"filterReset",
"forward:",
"getAttribute:of:",
"getLine:ofList:",
"getUserId",
"getUserName",
"glideSecs:toX:y:elapsed:from:",
"goBackByLayers:",
"gotoSpriteOrMouse:",
"gotoX:y:",
"headingDirection",
"heading:",
"hide",
"hideAll",
"hideList:",
"hideVariable:",
"INCR_COUNT",
"insert:at:ofList:",
"instrument:",
"isLoudLoud?",
"keyPressed:",
"letter:of:",
"lineCountOfList:",
"list:contains:",
"lookLike:",
"midiInstrument:",
"mousePressed",
"mouseX",
"mouseY",
"nextBackground",
"nextCostume",
"nextScene",
"not",
"noteOn:duration:elapsed:from:",
"penColor:",
"penSize:",
"playDrum",
"playSound:",
"pointTowards:",
"putPenDown",
"putPenUp",
"randomFrom:to:",
"readVariable",
"rest:elapsed:from:",
"rounded",
"say:",
"say:duration:elapsed:from:",
"scale",
"sceneName",
"scrollAlign",
"scrollRight",
"scrollUp",
"senseVideoMotion",
"sensor:",
"sensorPressed:",
"setGraphicEffect:to:",
"setLine:ofList:to:",
"setPenHueTo:",
"setPenShadeTo:",
"setRotationStyle",
"setSizeTo:",
"setTempoTo:",
"setVar:to:",
"setVideoState",
"setVideoTransparency",
"setVolumeTo:",
"show",
"showBackground:",
"showList:",
"showVariable:",
"soundLevel",
"sqrt",
"stampCostume",
"startScene",
"startSceneAndWait",
"stopAll",
"stopAllSounds",
"stopScripts",
"stringLength:",
"tempo",
"think:",
"think:duration:elapsed:from:",
"timeAndDate",
"timer",
"timerReset",
"timestamp",
"touching:",
"touchingColor:",
"turnLeft:",
"turnRight:",
"volume",
"wait:elapsed:from:",
"warpSpeed",
"whenClicked",
"whenCloned",
"whenGreenFlag",
"whenIReceive",
"whenKeyPressed",
"whenSceneStarts",
"whenSensorGreaterThan",
"xpos",
"xpos:",
"xScroll",
"ypos",
"ypos:",
"yScroll"]
self.hats = [
"whenClicked",
"whenGreenFlag",
"whenIReceive",
"whenKeyPressed",
"whenSceneStarts",
"whenSensorGreaterThan"]
def getCatNames(self):
return self.catNames
def getAllCommands(self):
return self.allCommands
def getCatCommands(self,category):
return self.catCommands[category]
def getHatCommands(self):
return self.hats
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from sqlalchemy.orm import exc
from neutron.common import constants as q_const
from neutron.common import ipv6_utils as ipv6
from neutron.common import utils
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix',
'egress': 'dest_ip_prefix'}
DHCP_RULE_PORT = {4: (67, 68, q_const.IPv4), 6: (547, 546, q_const.IPv6)}
class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin):
"""Mixin class to add agent-based security group implementation."""
def get_port_from_device(self, device):
"""Get port dict from device name on an agent.
Subclass must provide this method.
:param device: device name which identifies a port on the agent side.
What is specified in "device" depends on a plugin agent implementation.
For example, it is a port ID in OVS agent and netdev name in Linux
Bridge agent.
:return: port dict returned by DB plugin get_port(). In addition,
it must contain the following fields in the port dict returned.
- device
- security_groups
- security_group_rules,
- security_group_source_groups
- fixed_ips
"""
raise NotImplementedError(_("%s must implement get_port_from_device.")
% self.__class__.__name__)
def create_security_group_rule(self, context, security_group_rule):
bulk_rule = {'security_group_rules': [security_group_rule]}
rule = self.create_security_group_rule_bulk_native(context,
bulk_rule)[0]
sgids = [rule['security_group_id']]
self.notifier.security_groups_rule_updated(context, sgids)
return rule
def create_security_group_rule_bulk(self, context,
security_group_rule):
rules = super(SecurityGroupServerRpcMixin,
self).create_security_group_rule_bulk_native(
context, security_group_rule)
sgids = set([r['security_group_id'] for r in rules])
self.notifier.security_groups_rule_updated(context, list(sgids))
return rules
def delete_security_group_rule(self, context, sgrid):
rule = self.get_security_group_rule(context, sgrid)
super(SecurityGroupServerRpcMixin,
self).delete_security_group_rule(context, sgrid)
self.notifier.security_groups_rule_updated(context,
[rule['security_group_id']])
def update_security_group_on_port(self, context, id, port,
original_port, updated_port):
"""Update security groups on port.
This method returns a flag which indicates request notification
is required and does not perform notification itself.
It is because another changes for the port may require notification.
"""
need_notify = False
port_updates = port['port']
if (ext_sg.SECURITYGROUPS in port_updates and
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
port_updates[ext_sg.SECURITYGROUPS])):
# delete the port binding and read it with the new rules
port_updates[ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._delete_port_security_group_bindings(context, id)
self._process_port_create_security_group(
context,
updated_port,
port_updates[ext_sg.SECURITYGROUPS])
need_notify = True
else:
updated_port[ext_sg.SECURITYGROUPS] = (
original_port[ext_sg.SECURITYGROUPS])
return need_notify
def is_security_group_member_updated(self, context,
original_port, updated_port):
"""Check security group member updated or not.
This method returns a flag which indicates request notification
is required and does not perform notification itself.
It is because another changes for the port may require notification.
"""
need_notify = False
if (original_port['fixed_ips'] != updated_port['fixed_ips'] or
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
updated_port.get(ext_sg.SECURITYGROUPS))):
need_notify = True
return need_notify
def notify_security_groups_member_updated(self, context, port):
"""Notify update event of security group members.
The agent setups the iptables rule to allow
ingress packet from the dhcp server (as a part of provider rules),
so we need to notify an update of dhcp server ip
address to the plugin agent.
security_groups_provider_updated() just notifies that an event
occurs and the plugin agent fetches the update provider
rule in the other RPC call (security_group_rules_for_devices).
"""
if port['device_owner'] == q_const.DEVICE_OWNER_DHCP:
self.notifier.security_groups_provider_updated(context)
# For IPv6, provider rule need to be updated in case router
# interface is created or updated after VM port is created.
elif port['device_owner'] == q_const.DEVICE_OWNER_ROUTER_INTF:
if any(netaddr.IPAddress(fixed_ip['ip_address']).version == 6
for fixed_ip in port['fixed_ips']):
self.notifier.security_groups_provider_updated(context)
else:
self.notifier.security_groups_member_updated(
context, port.get(ext_sg.SECURITYGROUPS))
def security_group_info_for_ports(self, context, ports):
sg_info = {'devices': ports,
'security_groups': {},
'sg_member_ips': {}}
rules_in_db = self._select_rules_for_ports(context, ports)
remote_security_group_info = {}
for (binding, rule_in_db) in rules_in_db:
port_id = binding['port_id']
remote_gid = rule_in_db.get('remote_group_id')
security_group_id = rule_in_db.get('security_group_id')
ethertype = rule_in_db['ethertype']
if ('security_group_source_groups'
not in sg_info['devices'][port_id]):
sg_info['devices'][port_id][
'security_group_source_groups'] = []
if remote_gid:
if (remote_gid
not in sg_info['devices'][port_id][
'security_group_source_groups']):
sg_info['devices'][port_id][
'security_group_source_groups'].append(remote_gid)
if remote_gid not in remote_security_group_info:
remote_security_group_info[remote_gid] = {}
if ethertype not in remote_security_group_info[remote_gid]:
remote_security_group_info[remote_gid][ethertype] = []
direction = rule_in_db['direction']
rule_dict = {
'direction': direction,
'ethertype': ethertype}
for key in ('protocol', 'port_range_min', 'port_range_max',
'remote_ip_prefix', 'remote_group_id'):
if rule_in_db.get(key):
if key == 'remote_ip_prefix':
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
rule_dict[direction_ip_prefix] = rule_in_db[key]
continue
rule_dict[key] = rule_in_db[key]
if security_group_id not in sg_info['security_groups']:
sg_info['security_groups'][security_group_id] = []
if rule_dict not in sg_info['security_groups'][security_group_id]:
sg_info['security_groups'][security_group_id].append(
rule_dict)
sg_info['sg_member_ips'] = remote_security_group_info
# the provider rules do not belong to any security group, so these
# rules still reside in sg_info['devices'] [port_id]
self._apply_provider_rule(context, sg_info['devices'])
return self._get_security_group_member_ips(context, sg_info)
def _get_security_group_member_ips(self, context, sg_info):
ips = self._select_ips_for_remote_group(
context, sg_info['sg_member_ips'].keys())
for sg_id, member_ips in ips.items():
for ip in member_ips:
ethertype = 'IPv%d' % netaddr.IPAddress(ip).version
if (ethertype in sg_info['sg_member_ips'][sg_id]
and ip not in sg_info['sg_member_ips'][sg_id][ethertype]):
sg_info['sg_member_ips'][sg_id][ethertype].append(ip)
return sg_info
def _select_rules_for_ports(self, context, ports):
if not ports:
return []
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id
sgr_sgid = sg_db.SecurityGroupRule.security_group_id
query = context.session.query(sg_db.SecurityGroupPortBinding,
sg_db.SecurityGroupRule)
query = query.join(sg_db.SecurityGroupRule,
sgr_sgid == sg_binding_sgid)
query = query.filter(sg_binding_port.in_(ports.keys()))
return query.all()
def _select_ips_for_remote_group(self, context, remote_group_ids):
ips_by_group = {}
if not remote_group_ids:
return ips_by_group
for remote_group_id in remote_group_ids:
ips_by_group[remote_group_id] = []
ip_port = models_v2.IPAllocation.port_id
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id
query = context.session.query(sg_binding_sgid,
models_v2.Port,
models_v2.IPAllocation.ip_address)
query = query.join(models_v2.IPAllocation,
ip_port == sg_binding_port)
query = query.join(models_v2.Port,
ip_port == models_v2.Port.id)
query = query.filter(sg_binding_sgid.in_(remote_group_ids))
for security_group_id, port, ip_address in query:
ips_by_group[security_group_id].append(ip_address)
# if there are allowed_address_pairs add them
if getattr(port, 'allowed_address_pairs', None):
for address_pair in port.allowed_address_pairs:
ips_by_group[security_group_id].append(
address_pair['ip_address'])
return ips_by_group
def _select_remote_group_ids(self, ports):
remote_group_ids = []
for port in ports.values():
for rule in port.get('security_group_rules'):
remote_group_id = rule.get('remote_group_id')
if remote_group_id:
remote_group_ids.append(remote_group_id)
return remote_group_ids
def _select_network_ids(self, ports):
return set((port['network_id'] for port in ports.values()))
def _select_dhcp_ips_for_network_ids(self, context, network_ids):
if not network_ids:
return {}
query = context.session.query(models_v2.Port,
models_v2.IPAllocation.ip_address)
query = query.join(models_v2.IPAllocation)
query = query.filter(models_v2.Port.network_id.in_(network_ids))
owner = q_const.DEVICE_OWNER_DHCP
query = query.filter(models_v2.Port.device_owner == owner)
ips = {}
for network_id in network_ids:
ips[network_id] = []
for port, ip in query:
if (netaddr.IPAddress(ip).version == 6
and not netaddr.IPAddress(ip).is_link_local()):
mac_address = port['mac_address']
ip = str(ipv6.get_ipv6_addr_by_EUI64(q_const.IPV6_LLA_PREFIX,
mac_address))
if ip not in ips[port['network_id']]:
ips[port['network_id']].append(ip)
return ips
def _select_ra_ips_for_network_ids(self, context, network_ids):
"""Select IP addresses to allow sending router advertisement from.
If OpenStack dnsmasq sends RA, get link local address of
gateway and allow RA from this Link Local address.
The gateway port link local address will only be obtained
when router is created before VM instance is booted and
subnet is attached to router.
If OpenStack doesn't send RA, allow RA from gateway IP.
Currently, the gateway IP needs to be link local to be able
to send RA to VM.
"""
if not network_ids:
return {}
ips = {}
for network_id in network_ids:
ips[network_id] = set([])
query = context.session.query(models_v2.Subnet)
subnets = query.filter(models_v2.Subnet.network_id.in_(network_ids))
for subnet in subnets:
gateway_ip = subnet['gateway_ip']
if subnet['ip_version'] != 6 or not gateway_ip:
continue
if not netaddr.IPAddress(gateway_ip).is_link_local():
if subnet['ipv6_ra_mode']:
gateway_ip = self._get_lla_gateway_ip_for_subnet(context,
subnet)
else:
# TODO(xuhanp):Figure out how to allow gateway IP from
# existing device to be global address and figure out the
# link local address by other method.
continue
if gateway_ip:
ips[subnet['network_id']].add(gateway_ip)
return ips
def _get_lla_gateway_ip_for_subnet(self, context, subnet):
query = context.session.query(models_v2.Port)
query = query.join(models_v2.IPAllocation)
query = query.filter(
models_v2.IPAllocation.subnet_id == subnet['id'])
query = query.filter(
models_v2.IPAllocation.ip_address == subnet['gateway_ip'])
query = query.filter(models_v2.Port.device_owner ==
q_const.DEVICE_OWNER_ROUTER_INTF)
try:
gateway_port = query.one()
except (exc.NoResultFound, exc.MultipleResultsFound):
LOG.warn(_('No valid gateway port on subnet %s is '
'found for IPv6 RA'), subnet['id'])
return
mac_address = gateway_port['mac_address']
lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
q_const.IPV6_LLA_PREFIX,
mac_address))
return lla_ip
def _convert_remote_group_id_to_ip_prefix(self, context, ports):
remote_group_ids = self._select_remote_group_ids(ports)
ips = self._select_ips_for_remote_group(context, remote_group_ids)
for port in ports.values():
updated_rule = []
for rule in port.get('security_group_rules'):
remote_group_id = rule.get('remote_group_id')
direction = rule.get('direction')
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
if not remote_group_id:
updated_rule.append(rule)
continue
port['security_group_source_groups'].append(remote_group_id)
base_rule = rule
for ip in ips[remote_group_id]:
if ip in port.get('fixed_ips', []):
continue
ip_rule = base_rule.copy()
version = netaddr.IPNetwork(ip).version
ethertype = 'IPv%s' % version
if base_rule['ethertype'] != ethertype:
continue
ip_rule[direction_ip_prefix] = str(
netaddr.IPNetwork(ip).cidr)
updated_rule.append(ip_rule)
port['security_group_rules'] = updated_rule
return ports
def _add_ingress_dhcp_rule(self, port, ips):
dhcp_ips = ips.get(port['network_id'])
for dhcp_ip in dhcp_ips:
source_port, dest_port, ethertype = DHCP_RULE_PORT[
netaddr.IPAddress(dhcp_ip).version]
dhcp_rule = {'direction': 'ingress',
'ethertype': ethertype,
'protocol': 'udp',
'port_range_min': dest_port,
'port_range_max': dest_port,
'source_port_range_min': source_port,
'source_port_range_max': source_port,
'source_ip_prefix': dhcp_ip}
port['security_group_rules'].append(dhcp_rule)
def _add_ingress_ra_rule(self, port, ips):
ra_ips = ips.get(port['network_id'])
for ra_ip in ra_ips:
if not netaddr.IPAddress(ra_ip).version == 6:
return
ra_rule = {'direction': 'ingress',
'ethertype': q_const.IPv6,
'protocol': q_const.PROTO_NAME_ICMP_V6,
'source_ip_prefix': ra_ip,
'source_port_range_min': q_const.ICMPV6_TYPE_RA}
port['security_group_rules'].append(ra_rule)
def _apply_provider_rule(self, context, ports):
network_ids = self._select_network_ids(ports)
ips_dhcp = self._select_dhcp_ips_for_network_ids(context, network_ids)
ips_ra = self._select_ra_ips_for_network_ids(context, network_ids)
for port in ports.values():
self._add_ingress_ra_rule(port, ips_ra)
self._add_ingress_dhcp_rule(port, ips_dhcp)
def security_group_rules_for_ports(self, context, ports):
rules_in_db = self._select_rules_for_ports(context, ports)
for (binding, rule_in_db) in rules_in_db:
port_id = binding['port_id']
port = ports[port_id]
direction = rule_in_db['direction']
rule_dict = {
'security_group_id': rule_in_db['security_group_id'],
'direction': direction,
'ethertype': rule_in_db['ethertype'],
}
for key in ('protocol', 'port_range_min', 'port_range_max',
'remote_ip_prefix', 'remote_group_id'):
if rule_in_db.get(key):
if key == 'remote_ip_prefix':
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
rule_dict[direction_ip_prefix] = rule_in_db[key]
continue
rule_dict[key] = rule_in_db[key]
port['security_group_rules'].append(rule_dict)
self._apply_provider_rule(context, ports)
return self._convert_remote_group_id_to_ip_prefix(context, ports)
|
|
# Prototyping of libgeos_c functions, now using a function written by
# `tartley`: http://trac.gispython.org/lab/ticket/189
import ctypes
class allocated_c_char_p(ctypes.c_char_p):
pass
EXCEPTION_HANDLER_FUNCTYPE = ctypes.CFUNCTYPE(None, ctypes.c_char_p, ctypes.c_char_p)
def prototype(lgeos, geosVersion):
lgeos.initGEOS.argtypes = [EXCEPTION_HANDLER_FUNCTYPE, EXCEPTION_HANDLER_FUNCTYPE]
lgeos.initGEOS.restype = None
lgeos.finishGEOS.argtypes = []
lgeos.finishGEOS.restype = None
lgeos.GEOSversion.argtypes = []
lgeos.GEOSversion.restype = ctypes.c_char_p
lgeos.GEOSGeomFromWKT.restype = ctypes.c_void_p
lgeos.GEOSGeomFromWKT.argtypes = [ctypes.c_char_p]
lgeos.GEOSGeomToWKT.restype = allocated_c_char_p
lgeos.GEOSGeomToWKT.argtypes = [ctypes.c_void_p]
lgeos.GEOS_setWKBOutputDims.restype = ctypes.c_int
lgeos.GEOS_setWKBOutputDims.argtypes = [ctypes.c_int]
lgeos.GEOSGeomFromWKB_buf.restype = ctypes.c_void_p
lgeos.GEOSGeomFromWKB_buf.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
lgeos.GEOSGeomToWKB_buf.restype = allocated_c_char_p
lgeos.GEOSGeomToWKB_buf.argtypes = [ctypes.c_void_p , ctypes.POINTER(ctypes.c_size_t)]
lgeos.GEOSCoordSeq_create.restype = ctypes.c_void_p
lgeos.GEOSCoordSeq_create.argtypes = [ctypes.c_uint, ctypes.c_uint]
lgeos.GEOSCoordSeq_clone.restype = ctypes.c_void_p
lgeos.GEOSCoordSeq_clone.argtypes = [ctypes.c_void_p]
lgeos.GEOSCoordSeq_destroy.restype = None
lgeos.GEOSCoordSeq_destroy.argtypes = [ctypes.c_void_p]
lgeos.GEOSCoordSeq_setX.restype = ctypes.c_int
lgeos.GEOSCoordSeq_setX.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_double]
lgeos.GEOSCoordSeq_setY.restype = ctypes.c_int
lgeos.GEOSCoordSeq_setY.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_double]
lgeos.GEOSCoordSeq_setZ.restype = ctypes.c_int
lgeos.GEOSCoordSeq_setZ.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_double]
lgeos.GEOSCoordSeq_setOrdinate.restype = ctypes.c_int
lgeos.GEOSCoordSeq_setOrdinate.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_double]
lgeos.GEOSCoordSeq_getX.restype = ctypes.c_int
lgeos.GEOSCoordSeq_getX.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_void_p]
lgeos.GEOSCoordSeq_getY.restype = ctypes.c_int
lgeos.GEOSCoordSeq_getY.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_void_p]
lgeos.GEOSCoordSeq_getZ.restype = ctypes.c_int
lgeos.GEOSCoordSeq_getZ.argtypes = [ctypes.c_void_p, ctypes.c_uint, ctypes.c_void_p]
lgeos.GEOSCoordSeq_getSize.restype = ctypes.c_int
lgeos.GEOSCoordSeq_getSize.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSCoordSeq_getDimensions.restype = ctypes.c_int
lgeos.GEOSCoordSeq_getDimensions.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSGeom_createPoint.restype = ctypes.c_void_p
lgeos.GEOSGeom_createPoint.argtypes = [ctypes.c_void_p]
lgeos.GEOSGeom_createLinearRing.restype = ctypes.c_void_p
lgeos.GEOSGeom_createLinearRing.argtypes = [ctypes.c_void_p]
lgeos.GEOSGeom_createLineString.restype = ctypes.c_void_p
lgeos.GEOSGeom_createLineString.argtypes = [ctypes.c_void_p]
lgeos.GEOSGeom_createPolygon.restype = ctypes.c_void_p
lgeos.GEOSGeom_createPolygon.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint]
lgeos.GEOSGeom_createCollection.restype = ctypes.c_void_p
lgeos.GEOSGeom_createCollection.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_uint]
lgeos.GEOSGeom_clone.restype = ctypes.c_void_p
lgeos.GEOSGeom_clone.argtypes = [ctypes.c_void_p]
lgeos.GEOSGeom_destroy.restype = None
lgeos.GEOSGeom_destroy.argtypes = [ctypes.c_void_p]
lgeos.GEOSEnvelope.restype = ctypes.c_void_p
lgeos.GEOSEnvelope.argtypes = [ctypes.c_void_p]
lgeos.GEOSIntersection.restype = ctypes.c_void_p
lgeos.GEOSIntersection.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSBuffer.restype = ctypes.c_void_p
lgeos.GEOSBuffer.argtypes = [ctypes.c_void_p, ctypes.c_double, ctypes.c_int]
lgeos.GEOSSimplify.restype = ctypes.c_void_p
lgeos.GEOSSimplify.argtypes = [ctypes.c_void_p, ctypes.c_double]
lgeos.GEOSTopologyPreserveSimplify.restype = ctypes.c_void_p
lgeos.GEOSTopologyPreserveSimplify.argtypes = [ctypes.c_void_p, ctypes.c_double]
lgeos.GEOSConvexHull.restype = ctypes.c_void_p
lgeos.GEOSConvexHull.argtypes = [ctypes.c_void_p]
lgeos.GEOSDifference.restype = ctypes.c_void_p
lgeos.GEOSDifference.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSSymDifference.restype = ctypes.c_void_p
lgeos.GEOSSymDifference.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSBoundary.restype = ctypes.c_void_p
lgeos.GEOSBoundary.argtypes = [ctypes.c_void_p]
lgeos.GEOSUnion.restype = ctypes.c_void_p
lgeos.GEOSUnion.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSPointOnSurface.restype = ctypes.c_void_p
lgeos.GEOSPointOnSurface.argtypes = [ctypes.c_void_p]
lgeos.GEOSGetCentroid.restype = ctypes.c_void_p
lgeos.GEOSGetCentroid.argtypes = [ctypes.c_void_p]
lgeos.GEOSRelate.restype = allocated_c_char_p
lgeos.GEOSRelate.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSPolygonize.restype = ctypes.c_void_p
lgeos.GEOSPolygonize.argtypes = [ctypes.c_void_p, ctypes.c_uint]
lgeos.GEOSLineMerge.restype = ctypes.c_void_p
lgeos.GEOSLineMerge.argtypes = [ctypes.c_void_p]
lgeos.GEOSRelatePattern.restype = ctypes.c_char
lgeos.GEOSRelatePattern.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p]
lgeos.GEOSDisjoint.restype = ctypes.c_byte
lgeos.GEOSDisjoint.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSTouches.restype = ctypes.c_byte
lgeos.GEOSTouches.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSIntersects.restype = ctypes.c_byte
lgeos.GEOSIntersects.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSCrosses.restype = ctypes.c_byte
lgeos.GEOSCrosses.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSWithin.restype = ctypes.c_byte
lgeos.GEOSWithin.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSContains.restype = ctypes.c_byte
lgeos.GEOSContains.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSOverlaps.restype = ctypes.c_byte
lgeos.GEOSOverlaps.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSEquals.restype = ctypes.c_byte
lgeos.GEOSEquals.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSEqualsExact.restype = ctypes.c_byte
lgeos.GEOSEqualsExact.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_double]
lgeos.GEOSisEmpty.restype = ctypes.c_byte
lgeos.GEOSisEmpty.argtypes = [ctypes.c_void_p]
lgeos.GEOSisValid.restype = ctypes.c_byte
lgeos.GEOSisValid.argtypes = [ctypes.c_void_p]
lgeos.GEOSisSimple.restype = ctypes.c_byte
lgeos.GEOSisSimple.argtypes = [ctypes.c_void_p]
lgeos.GEOSisRing.restype = ctypes.c_byte
lgeos.GEOSisRing.argtypes = [ctypes.c_void_p]
lgeos.GEOSHasZ.restype = ctypes.c_byte
lgeos.GEOSHasZ.argtypes = [ctypes.c_void_p]
lgeos.GEOSGeomType.restype = ctypes.c_char_p
lgeos.GEOSGeomType.argtypes = [ctypes.c_void_p]
lgeos.GEOSGeomTypeId.restype = ctypes.c_int
lgeos.GEOSGeomTypeId.argtypes = [ctypes.c_void_p]
lgeos.GEOSGetSRID.restype = ctypes.c_int
lgeos.GEOSGetSRID.argtypes = [ctypes.c_void_p]
lgeos.GEOSSetSRID.restype = None
lgeos.GEOSSetSRID.argtypes = [ctypes.c_void_p, ctypes.c_int]
lgeos.GEOSGetNumGeometries.restype = ctypes.c_int
lgeos.GEOSGetNumGeometries.argtypes = [ctypes.c_void_p]
lgeos.GEOSGetGeometryN.restype = ctypes.c_void_p
lgeos.GEOSGetGeometryN.argtypes = [ctypes.c_void_p, ctypes.c_int]
lgeos.GEOSGetNumInteriorRings.restype = ctypes.c_int
lgeos.GEOSGetNumInteriorRings.argtypes = [ctypes.c_void_p]
lgeos.GEOSGetInteriorRingN.restype = ctypes.c_void_p
lgeos.GEOSGetInteriorRingN.argtypes = [ctypes.c_void_p, ctypes.c_int]
lgeos.GEOSGetExteriorRing.restype = ctypes.c_void_p
lgeos.GEOSGetExteriorRing.argtypes = [ctypes.c_void_p]
lgeos.GEOSGetNumCoordinates.restype = ctypes.c_int
lgeos.GEOSGetNumCoordinates.argtypes = [ctypes.c_void_p]
lgeos.GEOSGeom_getCoordSeq.restype = ctypes.c_void_p
lgeos.GEOSGeom_getCoordSeq.argtypes = [ctypes.c_void_p]
lgeos.GEOSGeom_getDimensions.restype = ctypes.c_int
lgeos.GEOSGeom_getDimensions.argtypes = [ctypes.c_void_p]
lgeos.GEOSArea.restype = ctypes.c_double
lgeos.GEOSArea.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSLength.restype = ctypes.c_int
lgeos.GEOSLength.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSDistance.restype = ctypes.c_int
lgeos.GEOSDistance.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
if geosVersion >= (1, 5, 0):
if hasattr(lgeos, 'GEOSFree'):
lgeos.GEOSFree.restype = None
lgeos.GEOSFree.argtypes = [ctypes.c_void_p]
# Prepared geometry, GEOS C API 1.5.0+
lgeos.GEOSPrepare.restype = ctypes.c_void_p
lgeos.GEOSPrepare.argtypes = [ctypes.c_void_p]
lgeos.GEOSPreparedGeom_destroy.restype = None
lgeos.GEOSPreparedGeom_destroy.argtypes = [ctypes.c_void_p]
lgeos.GEOSPreparedIntersects.restype = ctypes.c_int
lgeos.GEOSPreparedIntersects.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSPreparedContains.restype = ctypes.c_int
lgeos.GEOSPreparedContains.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSPreparedContainsProperly.restype = ctypes.c_int
lgeos.GEOSPreparedContainsProperly.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSPreparedCovers.restype = ctypes.c_int
lgeos.GEOSPreparedCovers.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSisValidReason.restype = allocated_c_char_p
lgeos.GEOSisValidReason.argtypes = [ctypes.c_void_p]
# Other, GEOS C API 1.5.0+
if geosVersion >= (1, 5, 0):
lgeos.GEOSUnionCascaded.restype = ctypes.c_void_p
lgeos.GEOSUnionCascaded.argtypes = [ctypes.c_void_p]
# 1.6.0
if geosVersion >= (1, 6, 0):
# Linear referencing features aren't found in versions 1.5,
# but not in all libs versioned 1.6.0 either!
if hasattr(lgeos, 'GEOSProject'):
lgeos.GEOSSingleSidedBuffer.restype = ctypes.c_void_p
lgeos.GEOSSingleSidedBuffer.argtypes = [ctypes.c_void_p, ctypes.c_double, ctypes.c_int, ctypes.c_int, ctypes.c_double, ctypes.c_int]
lgeos.GEOSProject.restype = ctypes.c_double
lgeos.GEOSProject.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
lgeos.GEOSProjectNormalized.restype = ctypes.c_double
lgeos.GEOSProjectNormalized.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
lgeos.GEOSInterpolate.restype = ctypes.c_void_p
lgeos.GEOSInterpolate.argtypes = [ctypes.c_void_p,
ctypes.c_double]
lgeos.GEOSInterpolateNormalized.restype = ctypes.c_void_p
lgeos.GEOSInterpolateNormalized.argtypes = [ctypes.c_void_p,
ctypes.c_double]
# TODO: Find out what version of geos_c came with geos 3.3.0
if geosVersion >= (1, 6, 3):
lgeos.GEOSUnaryUnion.restype = ctypes.c_void_p
lgeos.GEOSUnaryUnion.argtypes = [ctypes.c_void_p]
lgeos.GEOSPolygonize_full.restype = ctypes.c_void_p
lgeos.GEOSPolygonize_full.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
|
#!/usr/bin/env python
"""Unit tests for Credential Wallet class
NERC DataGrid Project
"""
__author__ = "P J Kershaw"
__date__ = "03/10/08"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = '$Id$'
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
import os
from string import Template
from cStringIO import StringIO
import cPickle as pickle
from elementtree import ElementTree
from time import sleep
from datetime import datetime, timedelta
from ndg.saml.utils import SAMLDateTime
from ndg.saml.xml.etree import AssertionElementTree
from ndg.security.common.test.unit.base import BaseTestCase
from ndg.security.common.utils.etree import prettyPrint
from ndg.security.common.credentialwallet import SAMLAssertionWallet
class CredentialWalletBaseTestCase(BaseTestCase):
THIS_DIR = os.path.dirname(__file__)
CONFIG_FILENAME = 'test_samlcredentialwallet.cfg'
CONFIG_FILEPATH = os.path.join(THIS_DIR, CONFIG_FILENAME)
class SAMLAttributeWalletTestCase(CredentialWalletBaseTestCase):
PICKLE_FILENAME = 'SAMLAttributeWalletPickle.dat'
PICKLE_FILEPATH = os.path.join(CredentialWalletBaseTestCase.THIS_DIR,
PICKLE_FILENAME)
ASSERTION_STR = (
"""<saml:Assertion ID="192c67d9-f9cd-457a-9242-999e7b943166" IssueInstant="$timeNow" Version="2.0" xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion">
<saml:Issuer Format="urn:esg:issuer">$issuerName</saml:Issuer>
<saml:Subject>
<saml:NameID Format="urn:esg:openid">https://esg.prototype.ucar.edu/myopenid/testUser</saml:NameID>
</saml:Subject>
<saml:Conditions NotBefore="$timeNow" NotOnOrAfter="$timeExpires" />
<saml:AttributeStatement>
<saml:Attribute FriendlyName="FirstName" Name="urn:esg:first:name" NameFormat="http://www.w3.org/2001/XMLSchema#string">
<saml:AttributeValue xsi:type="xs:string" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">Test</saml:AttributeValue>
</saml:Attribute>
<saml:Attribute FriendlyName="LastName" Name="urn:esg:last:name" NameFormat="http://www.w3.org/2001/XMLSchema#string">
<saml:AttributeValue xsi:type="xs:string" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">User</saml:AttributeValue>
</saml:Attribute>
<saml:Attribute FriendlyName="EmailAddress" Name="urn:esg:first:email:address" NameFormat="http://www.w3.org/2001/XMLSchema#string">
<saml:AttributeValue xsi:type="xs:string" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">[email protected]</saml:AttributeValue>
</saml:Attribute>
</saml:AttributeStatement>
</saml:Assertion>
"""
)
def __init__(self, *arg, **kw):
super(SAMLAttributeWalletTestCase, self).__init__(*arg, **kw)
def setUp(self):
self.assertion = self._createAssertion()
def _createAssertion(self, timeNow=None, validityDuration=60*60*8,
issuerName=BaseTestCase.SITEA_SAML_ISSUER_NAME):
if timeNow is None:
timeNow = datetime.utcnow()
timeExpires = timeNow + timedelta(seconds=validityDuration)
assertionStr = Template(
self.__class__.ASSERTION_STR).substitute(
dict(
issuerName=issuerName,
timeNow=SAMLDateTime.toString(timeNow),
timeExpires=SAMLDateTime.toString(timeExpires)
)
)
assertionStream = StringIO()
assertionStream.write(assertionStr)
assertionStream.seek(0)
assertionElem = ElementTree.parse(assertionStream).getroot()
return AssertionElementTree.fromXML(assertionElem)
def _addCredentials(self):
wallet = SAMLAssertionWallet()
wallet.addCredentials(self.__class__.SITEA_ATTRIBUTEAUTHORITY_URI,
[self.assertion])
return wallet
def test01AddCredentials(self):
wallet = self._addCredentials()
k = self.__class__.SITEA_ATTRIBUTEAUTHORITY_URI
self.assert_(len(wallet.retrieveCredentials(k)) == 1)
assertions = wallet.retrieveCredentials(
self.__class__.SITEA_ATTRIBUTEAUTHORITY_URI)
self.assert_(assertions)
print("SAML Assertion:\n%s" %
prettyPrint(AssertionElementTree.toXML(assertions[0])))
def test02VerifyCredential(self):
wallet = SAMLAssertionWallet()
self.assert_(wallet.isValidCredential(self.assertion))
expiredAssertion = self._createAssertion(
timeNow=datetime.utcnow() - timedelta(hours=24))
self.assert_(not wallet.isValidCredential(expiredAssertion))
futureAssertion = self._createAssertion(
timeNow=datetime.utcnow() + timedelta(hours=24))
self.assert_(not wallet.isValidCredential(futureAssertion))
def test03AuditCredentials(self):
# Add a short lived credential and ensure it's removed when an audit
# is carried to prune expired credentials
shortExpiryAssertion = self._createAssertion(validityDuration=1)
wallet = SAMLAssertionWallet()
wallet.addCredentials('a', [shortExpiryAssertion])
self.assert_(wallet.retrieveCredentials('a'))
sleep(2)
wallet.audit()
self.assert_(wallet.retrieveCredentials('a') is None)
def test04ClockSkewTolerance(self):
# Add a short lived credential but with the wallet set to allow for
# a clock skew of
shortExpiryAssertion = self._createAssertion(validityDuration=1)
wallet = SAMLAssertionWallet()
# Set a tolerance of five seconds
wallet.clockSkewTolerance = 5.*60*60
wallet.addCredentials('a', [shortExpiryAssertion])
self.assert_(wallet.retrieveCredentials('a'))
sleep(2)
wallet.audit()
self.assert_(wallet.retrieveCredentials('a'))
def test05ReplaceCredential(self):
# Replace an existing credential from a given institution with a more
# up to date one
k = self.__class__.SITEA_ATTRIBUTEAUTHORITY_URI
wallet = self._addCredentials()
self.assert_(len(wallet.retrieveCredentials(k)) == 1)
newAssertion = self._createAssertion()
wallet.addCredentials(k, [newAssertion])
self.assert_(len(wallet.retrieveCredentials(k)) == 1)
self.assert_(newAssertion.conditions.notOnOrAfter == \
wallet.retrieveCredentials(k)[0].conditions.notOnOrAfter)
def test06CredentialsFromSeparateKeys(self):
wallet = self._addCredentials()
wallet.addCredentials("MySite",
[self._createAssertion(issuerName="MySite"),
self._createAssertion()])
self.assert_(len(wallet.retrieveCredentials("MySite")) == 2)
k = self.__class__.SITEA_ATTRIBUTEAUTHORITY_URI
self.assert_(len(wallet.retrieveCredentials(k)) == 1)
def test07Pickle(self):
wallet = self._addCredentials()
outFile = open(self.__class__.PICKLE_FILEPATH, 'w')
pickle.dump(wallet, outFile)
outFile.close()
inFile = open(self.__class__.PICKLE_FILEPATH)
unpickledWallet = pickle.load(inFile)
assertions = unpickledWallet.retrieveCredentials(
self.__class__.SITEA_ATTRIBUTEAUTHORITY_URI)
self.assert_(assertions)
self.assert_(assertions[0].issuer.value == \
self.__class__.SITEA_SAML_ISSUER_NAME)
def test08CreateFromConfig(self):
wallet = SAMLAssertionWallet.fromConfig(
self.__class__.CONFIG_FILEPATH)
self.assert_(wallet.clockSkewTolerance == timedelta(seconds=0.01))
self.assert_(wallet.userId == 'https://openid.localhost/philip.kershaw')
class SAMLAuthzDecisionWalletTestCase(CredentialWalletBaseTestCase):
"""Test wallet for caching Authorisation Decision statements"""
PICKLE_FILENAME = 'SAMLAuthzDecisionWalletPickle.dat'
PICKLE_FILEPATH = os.path.join(CredentialWalletBaseTestCase.THIS_DIR,
PICKLE_FILENAME)
RESOURCE_ID = 'http://localhost/My%20Secured%20URI'
ASSERTION_STR = """
<saml:Assertion xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" Version="2.0" IssueInstant="$timeNow" ID="c32235a9-85df-4325-99a2-bad73668c01d">
<saml:Issuer Format="urn:oasis:names:tc:SAML:1.1:nameid-format:X509SubjectName">/O=NDG/OU=BADC/CN=attributeauthority.badc.rl.ac.uk</saml:Issuer>
<saml:Subject>
<saml:NameID Format="urn:esg:openid">https://openid.localhost/philip.kershaw</saml:NameID>
</saml:Subject>
<saml:Conditions NotOnOrAfter="$timeExpires" NotBefore="$timeNow"></saml:Conditions>
<saml:AuthzDecisionStatement Decision="Permit" Resource="$resourceId">
<saml:Action Namespace="urn:oasis:names:tc:SAML:1.0:action:ghpp">GET</saml:Action>
</saml:AuthzDecisionStatement>
</saml:Assertion>
"""
def setUp(self):
self.assertion = self._createAssertion()
def _createAssertion(self, timeNow=None, validityDuration=60*60*8,
issuerName=BaseTestCase.SITEA_SAML_ISSUER_NAME):
if timeNow is None:
timeNow = datetime.utcnow()
timeExpires = timeNow + timedelta(seconds=validityDuration)
assertionStr = Template(
self.__class__.ASSERTION_STR).substitute(
dict(
issuerName=issuerName,
timeNow=SAMLDateTime.toString(timeNow),
timeExpires=SAMLDateTime.toString(timeExpires),
resourceId=self.__class__.RESOURCE_ID,
)
)
assertionStream = StringIO()
assertionStream.write(assertionStr)
assertionStream.seek(0)
assertionElem = ElementTree.parse(assertionStream).getroot()
return AssertionElementTree.fromXML(assertionElem)
def _addCredentials(self):
wallet = SAMLAssertionWallet()
wallet.addCredentials(self.__class__.RESOURCE_ID, [self.assertion])
return wallet
def test01AddCredentials(self):
wallet = self._addCredentials()
self.assert_(
len(wallet.retrieveCredentials(self.__class__.RESOURCE_ID)) == 1)
assertion = wallet.retrieveCredentials(self.__class__.RESOURCE_ID)[-1]
print("SAML Assertion:\n%s" %
prettyPrint(AssertionElementTree.toXML(assertion)))
def test02VerifyCredential(self):
wallet = SAMLAssertionWallet()
self.assert_(wallet.isValidCredential(self.assertion))
expiredAssertion = self._createAssertion(
timeNow=datetime.utcnow() - timedelta(hours=24))
self.assert_(not wallet.isValidCredential(expiredAssertion))
futureAssertion = self._createAssertion(
timeNow=datetime.utcnow() + timedelta(hours=24))
self.assert_(not wallet.isValidCredential(futureAssertion))
def test06Pickle(self):
wallet = self._addCredentials()
outFile = open(self.__class__.PICKLE_FILEPATH, 'w')
pickle.dump(wallet, outFile)
outFile.close()
inFile = open(self.__class__.PICKLE_FILEPATH)
unpickledWallet = pickle.load(inFile)
self.assert_(unpickledWallet.retrieveCredentials(
self.__class__.RESOURCE_ID))
def test07CreateFromConfig(self):
wallet = SAMLAssertionWallet.fromConfig(
self.__class__.CONFIG_FILEPATH)
self.assert_(wallet.clockSkewTolerance == timedelta(seconds=0.01))
self.assert_(wallet.userId == 'https://openid.localhost/philip.kershaw')
if __name__ == "__main__":
unittest.main()
|
|
import unittest
import charitycheck
import re
import datetime
import anydbm as dbm
class TestIRSNonprofitDataContextManager(unittest.TestCase):
# we break test the principle of keeping tests
# independent below, sometimes with good reason,
# sometimes for convenience because this is a small
# module. These tests should not be run while the module
# is in use.
def test__download_irs_nonprofit_data(self):
# get fresh copy of irs and check for exceptions
# in writing permissions, internet connections,
# etc...
charitycheck.IRSNonprofitDataContextManager(
)._download_irs_nonprofit_data()
# signal that the test passed
self.assertTrue(True)
def test_context_manager_updates_data(self):
"""check that opening the context manager
updates the local irs pub78 data.
"""
with open(charitycheck._irs_data_path, 'a+') as irs_data:
irs_data.write("TESTSTRING_FOR_CHARITYCHECK")
found_test_phrase = False
irs_data.seek(-27, 2)
for line in irs_data:
if "TESTSTRING_FOR_CHARITYCHECK" in line:
found_test_phrase = True
self.assertTrue(found_test_phrase)
with charitycheck.IRSNonprofitDataContextManager() as new_irs_data:
# see if we've overwritten the old file
found_test_phrase = False
for line in new_irs_data:
if "TESTSTRING_FOR_CHARITYCHECK" in line:
found_test_phrase = True
# assert that the test phrase has been overwritten
self.assertFalse(found_test_phrase)
def test_file_format(self):
"""check that the file downloaded from the IRS
is in the format we expect.
"""
with charitycheck.IRSNonprofitDataContextManager() as irs_data:
in_expected_format = True
# check first two lines are \n characters
in_expected_format = (in_expected_format and
irs_data.readline() == '\n')
in_expected_format = (in_expected_format and
irs_data.readline() == '\n')
for i, line in enumerate(irs_data):
m = re.match(
r'^(?:\d{9}\|.+\|.+(?:\|[A-Z]{2})?\|.+\|(?:[A-Z],?)+''\n|\n)$',
line)
in_expected_format = in_expected_format and bool(m)
self.assertTrue(in_expected_format)
class TestMakeDBM(unittest.TestCase):
def test_make_dbm(self):
charitycheck.make_dbm() # catch any possible errors/exceptions
self.assertTrue(True)
def test_log_updates(self):
# get last updated time
with open(charitycheck._update_log, 'r') as log:
log.seek(-26, 2)
date_string = log.read()
last_updated = datetime.datetime(
# convert/unpack date_string into
# the proper format.
*(map(int, [date_string[0:4],
date_string[5:7],
date_string[8:10],
date_string[11:13],
date_string[14:16],
date_string[17:19],
date_string[20:]])))
# run make dbm
charitycheck.make_dbm()
# get new updated time
with open(charitycheck._update_log, 'r') as log:
log.seek(-26, 2)
date_string = log.read()
new_last_updated = datetime.datetime(
# convert/unpack date_string into
# the proper format.
*(map(int, [date_string[0:4],
date_string[5:7],
date_string[8:10],
date_string[11:13],
date_string[14:16],
date_string[17:19],
date_string[20:]])))
self.assertNotEqual(last_updated, new_last_updated)
def test_dbm_has_all_charities(self):
with open(charitycheck._irs_data_path) as irs_data:
nonprofits_present = True
db = dbm.open(charitycheck._publication78_dbm, 'r')
for nonprofit in irs_data:
nonprofits_present = (
nonprofits_present and
db[nonprofit[0:9]] == nonprofit[10:-1])
db.close()
self.assertTrue(nonprofits_present)
class TestGetNonprofitData(unittest.TestCase):
def test_nonprofits_info_is_found(self):
self.assertEqual(
# assume the red cross will be around awhile...
charitycheck.get_nonprofit_data('530196605'),
'American National Red Cross|Charlotte|NC|United States|PC')
class TestVerifyNonprofit(unittest.TestCase):
def test_verify_nonprofit_all_arguments_when_true(self):
self.assertTrue(charitycheck.verify_nonprofit(
ein='530196605', name='American National Red Cross',
city='Charlotte', state='NC', country='United States',
deductability_code='PC'))
def test_verify_nonprofit_some_arguments_when_true_1(self):
self.assertTrue(charitycheck.verify_nonprofit(
ein='530196605', name='American National Red Cross',
city='Charlotte', state='NC', country='United States',
deductability_code=None))
def test_verify_nonprofit_some_arguments_when_true_2(self):
self.assertTrue(charitycheck.verify_nonprofit(
ein='530196605', name='American National Red Cross',
city='Charlotte', state='NC', country=None,
deductability_code=None))
def test_verify_nonprofit_some_arguments_when_true_3(self):
self.assertTrue(charitycheck.verify_nonprofit(
ein='530196605', name='American National Red Cross',
city='Charlotte'))
def test_verify_nonprofit_some_arguments_when_true_4(self):
self.assertTrue(charitycheck.verify_nonprofit(
ein='530196605', name='American National Red Cross'))
def test_verify_nonprofit_some_arguments_when_true_5(self):
self.assertTrue(charitycheck.verify_nonprofit(
ein='530196605', state='NC', country='United States',
deductability_code='PC'))
def test_verify_nonprofit_some_arguments_when_true_6(self):
self.assertTrue(charitycheck.verify_nonprofit(
ein='530196605', name=None,
city='Charlotte', state='NC', country='United States',
deductability_code='PC'))
def test_verify_nonprofit_some_arguments_when_true_7(self):
self.assertTrue(charitycheck.verify_nonprofit(
ein='530196605', name='American National Red Cross',
city=None, state=None, country='United States',
deductability_code=None))
def test_verify_nonprofit_just_ein_when_true(self):
self.assertTrue(charitycheck.verify_nonprofit(
ein='530196605'))
def test_verify_nonprofit_some_arguments_when_false(self):
self.assertFalse(charitycheck.verify_nonprofit(
ein='530196605', name='American National Red Cross',
city='Boston')) # city is a false argument
def test_verify_nonprofit_bad_ein(self):
self.assertFalse(charitycheck.verify_nonprofit(ein='4'))
class TestGetDeductabilityCode(unittest.TestCase):
def test_get_deductability_code_all_arguments_true(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='530196605', name='American National Red Cross',
city='Charlotte', state='NC', country='United States'),
'PC')
def test_get_deductability_code_some_arguments_when_true_1(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='530196605', name='American National Red Cross',
city=None, state='NC', country='United States'),
'PC')
def test_get_deductability_code_some_arguments_when_true_2(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='530196605', name='American National Red Cross',
city='Charlotte', state='NC', country=None),
'PC')
def test_get_deductability_code_some_arguments_when_true_3(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='530196605', name='American National Red Cross',
city='Charlotte'),
'PC')
def test_get_deductability_code_some_arguments_when_true_4(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='530196605', name='American National Red Cross'),
'PC')
def test_get_deductability_code_some_arguments_when_true_5(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='530196605', state='NC', country='United States'),
'PC')
def test_get_deductability_code_some_arguments_when_true_6(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='530196605', name=None,
city='Charlotte', state='NC', country='United States'),
'PC')
def test_get_deductability_code_some_arguments_when_true_7(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='530196605', name='American National Red Cross',
city=None, state=None, country='United States'),
'PC')
def test_get_deductability_code_just_ein_when_true(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='530196605'),
'PC')
def test_get_deductability_code_some_arguments_when_false(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='530196605', name='American National Red Cross',
city='Boston'), # city is a false argument
'')
def test_get_deductability_code_bad_ein(self):
self.assertEqual(
charitycheck.get_deductability_code(
ein='6'),
'')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Simple Mac OS X Application Bundler for Mumble
#
# Loosely based on original bash-version by Sebastian Schlingmann (based, again, on a OSX application bundler
# by Thomas Keller).
#
import sys, os, string, re, shutil, plistlib, tempfile, exceptions, datetime, tarfile
from subprocess import Popen, PIPE
from optparse import OptionParser
options = None
def gitrev():
'''Get git revision of the current Mumble build.'''
return os.popen('git describe').read()[:-1]
def certificate_subject_OU(identity):
'''Extract the subject OU from the certificate matching the identity parameter in the specified keychain.'''
findCert = Popen(('/usr/bin/security', 'find-certificate', '-c', identity, '-p', options.keychain), stdout=PIPE)
pem, _ = findCert.communicate()
openssl = Popen(('/usr/bin/openssl', 'x509', '-subject', '-noout'), stdout=PIPE, stdin=PIPE)
subject, _ = openssl.communicate(pem)
attr = '/OU='
begin = subject.index(attr) + len(attr)
tmp = subject[begin:]
end = tmp.index('/')
return tmp[:end]
def lookup_file_identifier(path):
try:
d = plistlib.readPlist(os.path.join(path, 'Contents', 'Info.plist'))
return d['CFBundleIdentifier']
except:
return os.path.basename(path)
def codesign(path):
'''Call the codesign executable on the signable object at path.'''
certname = 'Developer ID Application: %s' % options.developer_id
OU = certificate_subject_OU(certname)
if hasattr(path, 'isalpha'):
path = (path,)
for p in path:
identifier = lookup_file_identifier(p)
reqs = None
with open('macx/scripts/codesign-requirements.tmpl', 'r') as f:
tmplReqs = f.read()
reqs = string.Template(tmplReqs).substitute({
'identifier': identifier,
'subject_OU': OU,
})
p = Popen(('codesign', '--keychain', options.keychain, '-vvvv', '-i', identifier, '-r='+reqs, '-s', certname, p))
retval = p.wait()
if retval != 0:
return retval
return 0
def prodsign(inf, outf):
'''Call the prodsign executable.'''
certname = 'Developer ID Installer: %s' % options.developer_id
p = Popen(('productsign', '--keychain', options.keychain, '--sign', certname, inf, outf))
retval = p.wait()
if retval != 0:
return retval
return 0
def create_overlay_package():
print '* Creating overlay installer'
bundle = os.path.join('release', 'MumbleOverlay.osax')
overlaylib = os.path.join('release', 'libmumbleoverlay.dylib')
if options.developer_id:
codesign(bundle)
codesign(overlaylib)
os.system('./macx/scripts/build-overlay-installer')
if options.developer_id:
os.rename('release/MumbleOverlay.pkg', 'release/MumbleOverlayUnsigned.pkg')
prodsign('release/MumbleOverlayUnsigned.pkg', 'release/MumbleOverlay.pkg')
class AppBundle(object):
def copy_murmur(self):
'''
Copy the murmurd binary into our Mumble app bundle
'''
print ' * Copying murmurd binary'
src = os.path.join(self.bundle, '..', 'murmurd')
dst = os.path.join(self.bundle, 'Contents', 'MacOS', 'murmurd')
shutil.copy(src, dst)
print ' * Copying murmurd configuration'
dst = os.path.join(self.bundle, 'Contents', 'MacOS', 'murmur.ini')
shutil.copy('scripts/murmur.ini.osx', dst)
def copy_g15helper(self):
'''
Copy the Mumble G15 helper daemon into our Mumble app bundle.
'''
if os.path.exists(os.path.join(self.bundle, '..', 'mumble-g15-helper')):
print ' * Copying G15 helper'
src = os.path.join(self.bundle, '..', 'mumble-g15-helper')
dst = os.path.join(self.bundle, 'Contents', 'MacOS', 'mumble-g15-helper')
shutil.copy(src, dst)
else:
print ' * No G15 helper found, skipping...'
def copy_resources(self, rsrcs):
'''
Copy needed resources into our bundle.
'''
print ' * Copying needed resources'
rsrcpath = os.path.join(self.bundle, 'Contents', 'Resources')
if not os.path.exists(rsrcpath):
os.mkdir(rsrcpath)
# Copy resources already in the bundle
for rsrc in rsrcs:
b = os.path.basename(rsrc)
if os.path.isdir(rsrc):
shutil.copytree(rsrc, os.path.join(rsrcpath, b), symlinks=True)
elif os.path.isfile(rsrc):
shutil.copy(rsrc, os.path.join(rsrcpath, b))
# Extras
shutil.copy('release/MumbleOverlay.pkg', os.path.join(rsrcpath, 'MumbleOverlay.pkg'))
def copy_codecs(self):
'''
Copy over dynamic CELT libraries.
'''
print ' * Copying CELT libraries.'
dst = os.path.join(self.bundle, 'Contents', 'Codecs')
os.makedirs(dst)
shutil.copy('release/libcelt0.0.7.0.dylib', dst)
shutil.copy('release/libcelt0.0.11.0.dylib', dst)
def copy_plugins(self):
'''
Copy over any built Mumble plugins.
'''
print ' * Copying positional audio plugins'
dst = os.path.join(self.bundle, 'Contents', 'Plugins')
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree('release/plugins/', dst, symlinks=True)
def update_plist(self):
'''
Modify our bundle's Info.plist to make it ready for release.
'''
if self.version is not None:
print ' * Changing version in Info.plist'
p = self.infoplist
p['CFBundleVersion'] = self.version
plistlib.writePlist(p, self.infopath)
def add_compat_warning(self):
'''
Add compat binary for when our binary is run on i386 or ppc.
The compat binary displays a warning dialog telling the user that they need to download a universal version of Mumble
'''
print ' * Splicing Mumble.compat into main bundle executable'
os.system('lipo -create release/Mumble.compat -arch x86_64 %s -output %s' % (self.binary, self.binary))
def set_min_macosx_version(self, version):
'''
Set the minimum version of Mac OS X version that this App will run on.
'''
print ' * Setting minimum Mac OS X version to: %s' % (version)
self.infoplist['LSMinimumSystemVersion'] = version
def done(self):
plistlib.writePlist(self.infoplist, self.infopath)
print ' * Done!'
print ''
def __init__(self, bundle, version=None):
self.framework_path = ''
self.handled_libs = {}
self.bundle = bundle
self.version = version
self.infopath = os.path.join(os.path.abspath(bundle), 'Contents', 'Info.plist')
self.infoplist = plistlib.readPlist(self.infopath)
self.binary = os.path.join(os.path.abspath(bundle), 'Contents', 'MacOS', self.infoplist['CFBundleExecutable'])
print ' * Preparing AppBundle'
class FolderObject(object):
class Exception(exceptions.Exception):
pass
def __init__(self):
self.tmp = tempfile.mkdtemp()
def copy(self, src, dst='/'):
'''
Copy a file or directory into the folder.
'''
asrc = os.path.abspath(src)
if dst[0] != '/':
raise self.Exception
# Determine destination
if dst[-1] == '/':
adst = os.path.abspath(self.tmp + '/' + dst + os.path.basename(src))
else:
adst = os.path.abspath(self.tmp + '/' + dst)
if os.path.isdir(asrc):
print ' * Copying directory: %s' % os.path.basename(asrc)
shutil.copytree(asrc, adst, symlinks=True)
elif os.path.isfile(asrc):
print ' * Copying file: %s' % os.path.basename(asrc)
shutil.copy(asrc, adst)
def symlink(self, src, dst):
'''
Create a symlink inside the folder.
'''
asrc = os.path.abspath(src)
adst = self.tmp + '/' + dst
print ' * Creating symlink %s' % os.path.basename(asrc)
os.symlink(asrc, adst)
def mkdir(self, name):
'''
Create a directory inside the folder.
'''
print ' * Creating directory %s' % os.path.basename(name)
adst = self.tmp + '/' + name
os.makedirs(adst)
class DiskImage(FolderObject):
def __init__(self, filename, volname):
FolderObject.__init__(self)
print ' * Preparing to create diskimage'
self.filename = filename
self.volname = volname
def create(self):
'''
Create the disk image itself.
'''
print ' * Creating diskimage. Please wait...'
if os.path.exists(self.filename):
shutil.rmtree(self.filename)
p = Popen(['hdiutil', 'create',
'-srcfolder', self.tmp,
'-format', 'UDBZ',
'-volname', self.volname,
self.filename])
retval = p.wait()
print ' * Removing temporary directory.'
shutil.rmtree(self.tmp)
print ' * Done!'
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('', '--release', dest='release', help='Build a release. This determines the version number of the release.')
parser.add_option('', '--snapshot', dest='snapshot', help='Build a snapshot release. This determines the \'snapshot version\'.')
parser.add_option('', '--git', dest='git', help='Build a snapshot release. Use the git revision number as the \'snapshot version\'.', action='store_true', default=False)
parser.add_option('', '--universal', dest='universal', help='Build an universal snapshot.', action='store_true', default=False)
parser.add_option('', '--only-appbundle', dest='only_appbundle', help='Only prepare the appbundle. Do not package.', action='store_true', default=False)
parser.add_option('', '--only-overlay', dest='only_overlay', help='Only create the overlay installer.', action='store_true', default=False)
parser.add_option('', '--developer-id', dest='developer_id', help='Identity (Developer ID) to use for code signing. (If not set, no code signing will occur)')
parser.add_option('', '--keychain', dest='keychain', help='The keychain to use when invoking code signing utilities. (Defaults to login.keychain', default='login.keychain')
parser.add_option('', '--no-server', dest='no_server', help='Exclude Murmur-related files from disk image.', action='store_true', default=False)
options, args = parser.parse_args()
# Release
if options.release:
ver = options.release
if options.universal:
fn = 'release/Mumble-Universal-%s.dmg' % ver
title = 'Mumble %s (Universal) ' %ver
else:
fn = 'release/Mumble-%s.dmg' % ver
title = 'Mumble %s ' % ver
# Snapshot
elif options.snapshot or options.git:
if not options.git:
ver = options.snapshot
else:
ver = gitrev()
if options.universal:
fn = 'release/Mumble-Universal-Snapshot-%s.dmg' % ver
title = 'Mumble Snapshot %s (Universal)' % ver
else:
fn = 'release/Mumble-Snapshot-%s.dmg' % ver
title = 'Mumble Snapshot %s' % ver
else:
print 'Neither snapshot or release selected. Bailing.'
sys.exit(1)
if not os.path.exists('release'):
print 'This script needs to be run from the root of the Mumble source tree.'
sys.exit(1)
# Fix overlay installer package
create_overlay_package()
if options.only_overlay:
sys.exit(0)
# Fix .ini files
os.system('cd scripts && sh mkini.sh')
# Do the finishing touches to our Application bundle before release
a = AppBundle('release/Mumble.app', ver)
if not options.no_server:
a.copy_murmur()
a.copy_g15helper()
a.copy_codecs()
a.copy_plugins()
a.copy_resources(['icons/mumble.icns'])
a.update_plist()
if not options.universal:
a.add_compat_warning()
a.set_min_macosx_version('10.6.0')
else:
a.set_min_macosx_version('10.4.8')
a.done()
# Sign our binaries, etc.
if options.developer_id:
print ' * Signing binaries with Developer ID `%s\'' % options.developer_id
binaries = [
# 1.2.x
'release/Mumble.app',
'release/Mumble.app/Contents/Plugins/liblink.dylib',
'release/Mumble.app/Contents/Plugins/libmanual.dylib',
'release/Mumble.app/Contents/Codecs/libcelt0.0.7.0.dylib',
'release/Mumble.app/Contents/Codecs/libcelt0.0.11.0.dylib',
]
g15path = 'release/Mumble.app/Contents/MacOS/mumble-g15-helper'
if os.path.exists(g15path):
binaries.append(g15path)
if not options.no_server:
binaries.append('release/Mumble.app/Contents/MacOS/murmurd')
codesign(binaries)
print ''
if options.only_appbundle:
sys.exit(0)
# Create diskimage
d = DiskImage(fn, title)
d.copy('macx/scripts/DS_Store', '/.DS_Store')
d.symlink('/Applications', '/Applications')
d.copy('release/Mumble.app')
d.create()
|
|
#!/usr/bin/env python
'''
Compliance Test Suite for testing the netcdf file name
http://www.imos.org.au/
'''
import sys
import argparse
import os.path
import datetime
import re
import yaml
from pkg_resources import resource_filename
from compliance_checker.base import BaseCheck, BaseNCCheck, Result
# Initial reference tables for file name check up
facility_code_file = resource_filename('compliance_checker', 'imos/facility_code.yml')
stream = file(facility_code_file, 'r') # 'document.yaml' contains a single YAML document.
facility_code_list = yaml.load(stream)
platform_code_file = resource_filename('compliance_checker', 'imos/platform_code.yml')
stream = file(platform_code_file, 'r') # 'document.yaml' contains a single YAML document.
platform_code_list = yaml.load(stream)
class IMOSFileNameCheck(BaseNCCheck):
@classmethod
def beliefs(cls):
return {}
def setup(self, ds):
if hasattr(ds, 'ds_loc'):
dataset_name = ds.ds_loc
else:
parser = argparse.ArgumentParser()
parser.add_argument('--test', '-t', '--test=', '-t=', action='append')
parser.add_argument('--criteria', '-c', nargs='?', default='normal')
parser.add_argument('--verbose' , '-v', action="count")
parser.add_argument('-f', '--format', default='text')
parser.add_argument('-o', '--output', default='-', action='store')
parser.add_argument('dataset_location', nargs='+')
args = parser.parse_args()
dataset_name = args.dataset_location[0]
head, tail = os.path.split(dataset_name)
file_names = [ name for name in tail.split('.') ]
file_names_length = len(file_names)
if file_names_length == 0:
self._file_name = ''
self._file_extension_name = ''
elif file_names_length == 1:
self._file_name = file_names[0]
self._file_extension_name = ''
else:
self._file_name = '.'.join([file_names[i] for i in xrange(-1, -(file_names_length+1), -1) if not i == -1])
self._file_extension_name = file_names[-1]
self._file_names = [ name for name in self._file_name.split('_') ]
self._file_names_length = len(self._file_names)
def check_extension_name(self, ds):
'''
Check file extension name and ensure it equals to nc
'''
ret_val = []
result_name = ['file_name','check_extension_name']
reasoning = ["File extension name is not equal to nc"]
if not self._file_extension_name == 'nc':
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
else:
result = Result(BaseCheck.HIGH, True, result_name, None)
ret_val.append(result)
return ret_val
def check_file_name(self, ds):
'''
Check file name and ensure it contains 6 to 10 fields, separated by '_'
'''
ret_val = []
result_name = ['file_name','check_file_name']
reasoning = ["File name doesn't contain 6 to 10 fields, separated by '_'"]
if self._file_names_length >= 6 and self._file_names_length <= 10:
result = Result(BaseCheck.HIGH, True, result_name, None)
else:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
ret_val.append(result)
return ret_val
def check_file_name_field1(self, ds):
'''
Check file name field1 and ensure it is "IMOS"
'''
ret_val = []
result_name = ['file_name','check_file_name_field1']
reasoning = ["File name field1 is not 'IMOS'"]
if self._file_names_length >= 0:
if self._file_names[0] != 'IMOS':
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
else:
result = Result(BaseCheck.HIGH, True, result_name, None)
else:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
ret_val.append(result)
return ret_val
def check_file_name_field2(self, ds):
'''
Check file name field2 and ensure it is valid facility, sub facility code
'''
ret_val = []
result_name = ['file_name','check_file_name_field2']
reasoning = ["File name field2 is not valid facility, sub facility code'"]
if self._file_names_length >= 2:
if self._file_names[1] not in facility_code_list:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
else:
result = Result(BaseCheck.HIGH, True, result_name, None)
else:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
ret_val.append(result)
return ret_val
def check_file_name_field3(self, ds):
'''
Check file name field3 and ensure it is made up of the characters "ABCEFGIKMOPRSTUVWZ"
'''
ret_val = []
result_name = ['file_name','check_file_name_field3']
reasoning = ["File name field3 is not made up of characters 'ABCEFGIKMOPRSTUVWZ'"]
if self._file_names_length >= 3:
if re.search('^[ABCEFGIKMOPRSTUVWZ]+$', self._file_names[2]) == None:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
else:
result = Result(BaseCheck.HIGH, True, result_name, None)
else:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
ret_val.append(result)
return ret_val
def check_file_name_field4(self, ds):
'''
Check file name field4 matches time_coverage_start attribute
'''
ret_val = []
result_name = ['file_name','check_file_name_field4']
reasoning = ["File name field4 doesn't match time_coverage_start attribute"]
time_coverage_start = getattr(ds.dataset, 'time_coverage_start', None)
passed = False
if time_coverage_start is not None:
# time_coverage_start format is yyyy-mm-ddTHH:MM:SSZ while
# field4 format is yyyymmddTHHMMSSZ
time_coverage_start = time_coverage_start.replace("-", "")
time_coverage_start = time_coverage_start.replace(":", "")
if self._file_names_length >= 4:
field4 = self._file_names[3]
if field4 != time_coverage_start:
passed = False
else:
passed = True
if passed:
result = Result(BaseCheck.HIGH, True, result_name, None)
else:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
ret_val.append(result)
return ret_val
def check_file_name_field5(self, ds):
'''
Check file name field5 matches platform_code or site_code attribute
Check file name field5 is valid platform code
'''
ret_val = []
result_name = ['file_name','check_file_name_field5']
if self._file_names_length >= 5:
reasoning = ["File name field5 is not valid platform code"]
if self._file_names[4] not in platform_code_list:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
else:
result = Result(BaseCheck.HIGH, True, result_name, None)
ret_val.append(result)
reasoning = ["File name field5 doesn't match platform_code or site_code attribute"]
platform_code = getattr(ds.dataset, 'platform_code', None)
site_code = getattr(ds.dataset, 'site_code', None)
check_values = []
check_values.append(platform_code)
check_values.append(site_code)
if any(check_values):
field5 = self._file_names[4]
if field5 in check_values:
result = Result(BaseCheck.HIGH, True, result_name, None)
else:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
ret_val.append(result)
return ret_val
def check_file_name_field6(self, ds):
'''
Check file name field6 is one of FV00, FV01, FV02 and consistent with
file_version attribute, if it exists
Field should be 'FV0X' where file_version starts with 'LEVEL X'
'''
ret_val = []
result_name = ['file_name','check_file_name_field6']
reasoning = ["File name field6 is not one of FV00, FV01, FV02"]
skip = False
passed = False
if self._file_names_length >= 6:
field6 = self._file_names[5]
if field6 == 'FV00' or field6 == 'FV01' or field6 == 'FV02':
passed = True
file_version = getattr(ds.dataset, 'file_version', None)
if file_version is not None:
passed = False
file_version_splits = [split for split in file_version.split(' ')]
if len(file_version_splits) >= 2:
if field6[3] == file_version_splits[1]:
passed = True
else:
passed = False
else:
passed = False
else:
skip = True
else:
passed = False
else:
passed = False
if not skip:
if passed:
result = Result(BaseCheck.HIGH, True, result_name, None)
else:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
ret_val.append(result)
return ret_val
def check_file_name_field7_to_field10(self, ds):
'''
Check file name field7 to filed 10 to meet one of the below conditions:
1) is a non-empty string (product type)
2) matches time_coverage_end attribute, format "END-YYYYMMDDTHHMMSSZ"
3) matches date_created attribute, format "C-YYYYMMDDTHHMMSSZ"
4) matches regexp "PART\d+"
Each condition must only match one field
'''
ret_val = []
result = None
result_name = ['file_name','check_file_name_field7_to_field10']
reasoning = ["Some of values from filed 7 to filed 10 are not correct"]
success = [None, None, None, None]
for i in range(6, self._file_names_length):
field = self._file_names[i]
try:
if field.startswith('END-'):
field_date = field[4:]
datetime.datetime.strptime(field_date, '%Y%m%dT%H%M%SZ')
if success[0] == None:
success[0] = True
continue
if field.startswith('C-'):
field_date = field[2:]
datetime.datetime.strptime(field_date, '%Y%m%dT%H%M%SZ')
if success[1] == None:
success[1] = True
continue
except ValueError:
pass
pattern = r'^PART\d+'
if re.search(pattern, field):
if success[2] == None:
success[2] = True
continue
if isinstance(field, basestring):
if field and not field.isdigit():
if success[3] == None:
success[3] = True
continue
if self._file_names_length > 6:
trues = [suc for suc in success if suc]
if len(trues) == self._file_names_length-6:
result = Result(BaseCheck.HIGH, True, result_name, None)
else:
result = Result(BaseCheck.HIGH, False, result_name, reasoning)
ret_val.append(result)
return ret_val
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
from pathlib import Path
from typing import AsyncGenerator
from unittest import mock
from idb.common.direct_companion_manager import DirectCompanionManager
from idb.common.types import (
CompanionInfo,
DomainSocketAddress,
IdbException,
TCPAddress,
)
from idb.utils.testing import TestCase, ignoreTaskLeaks
@ignoreTaskLeaks
class CompanionManagerTests(TestCase):
async def _managers(self) -> AsyncGenerator[DirectCompanionManager, None]:
# Covers a fresh tempfile
with tempfile.NamedTemporaryFile() as f:
yield DirectCompanionManager(
logger=mock.MagicMock(), state_file_path=f.name
)
# Covers a missing state file
with tempfile.TemporaryDirectory() as dir:
yield DirectCompanionManager(
logger=mock.MagicMock(), state_file_path=str(Path(dir) / "state_file")
)
# Covers a garbage tempfile
with tempfile.TemporaryDirectory() as dir:
path = str(Path(dir) / "state_file")
with open(path, "w") as f:
f.write("GARBAGEASDASDASD")
yield DirectCompanionManager(logger=mock.MagicMock(), state_file_path=path)
async def test_add_multiple(self) -> None:
async for manager in self._managers():
companion_a = CompanionInfo(
udid="a", address=TCPAddress(host="ahost", port=123), is_local=False
)
replaced = await manager.add_companion(companion_a)
self.assertIsNone(replaced)
companions = await manager.get_companions()
self.assertEqual(companions, [companion_a])
companion_b = CompanionInfo(
udid="b", address=TCPAddress(host="bhost", port=123), is_local=False
)
replaced = await manager.add_companion(companion_b)
self.assertIsNone(replaced)
companions = await manager.get_companions()
self.assertEqual(companions, [companion_a, companion_b])
companion_c = CompanionInfo(
udid="c", address=TCPAddress(host="chost", port=123), is_local=False
)
replaced = await manager.add_companion(companion_c)
self.assertIsNone(replaced)
companions = await manager.get_companions()
self.assertEqual(companions, [companion_a, companion_b, companion_c])
removed = await manager.remove_companion(companion_b.address)
companions = await manager.get_companions()
self.assertEqual(companions, [companion_a, companion_c])
self.assertEqual(removed, [companion_b])
removed = await manager.remove_companion("a")
companions = await manager.get_companions()
self.assertEqual(companions, [companion_c])
async def test_add_then_remove_companion_by_tcp_address(self) -> None:
async for manager in self._managers():
companion = CompanionInfo(
udid="asdasda",
address=TCPAddress(host="foohost", port=123),
is_local=False,
)
replaced = await manager.add_companion(companion)
self.assertIsNone(replaced)
companions = await manager.get_companions()
self.assertEqual(companions, [companion])
removed = await manager.remove_companion(companion.address)
companions = await manager.get_companions()
self.assertEqual(companions, [])
self.assertEqual(removed, [companion])
async def test_add_then_remove_companion_by_uxd_address(self) -> None:
async for manager in self._managers():
companion = CompanionInfo(
udid="asdasda",
address=DomainSocketAddress(path="/tmp/foo.sock"),
is_local=False,
)
replaced = await manager.add_companion(companion)
self.assertIsNone(replaced)
companions = await manager.get_companions()
self.assertEqual(companions, [companion])
removed = await manager.remove_companion(companion.address)
companions = await manager.get_companions()
self.assertEqual(companions, [])
self.assertEqual(removed, [companion])
async def test_add_then_remove_companion_by_udid(self) -> None:
async for manager in self._managers():
companion = CompanionInfo(
udid="asdasda",
address=TCPAddress(host="foohost", port=123),
is_local=False,
)
replaced = await manager.add_companion(companion)
self.assertIsNone(replaced)
companions = await manager.get_companions()
self.assertEqual(companions, [companion])
removed = await manager.remove_companion("asdasda")
companions = await manager.get_companions()
self.assertEqual(companions, [])
self.assertEqual(removed, [companion])
async def test_add_then_clear(self) -> None:
async for manager in self._managers():
companion = CompanionInfo(
udid="asdasda",
address=TCPAddress(host="foohost", port=123),
is_local=False,
)
await manager.add_companion(companion)
companions = await manager.get_companions()
self.assertEqual(companions, [companion])
await manager.clear()
companions = await manager.get_companions()
self.assertEqual(companions, [])
async def test_ambiguity_when_no_udid_multiple_companions(self) -> None:
async for manager in self._managers():
companion_a = CompanionInfo(
udid="a", address=TCPAddress(host="ahost", port=123), is_local=False
)
replaced = await manager.add_companion(companion_a)
self.assertIsNone(replaced)
companions = await manager.get_companions()
self.assertEqual(companions, [companion_a])
companion_b = CompanionInfo(
udid="b", address=TCPAddress(host="ahost", port=123), is_local=False
)
replaced = await manager.add_companion(companion_b)
self.assertIsNone(replaced)
companions = await manager.get_companions()
self.assertEqual(companions, [companion_a, companion_b])
with self.assertRaises(IdbException) as cm:
await manager.get_companion_info(target_udid=None)
self.assertIn(
"No UDID provided and there's multiple companions", str(cm.exception)
)
async def test_ambiguity_when_no_udid_no_companions(self) -> None:
async for manager in self._managers():
companions = await manager.get_companions()
self.assertEqual(companions, [])
with self.assertRaises(IdbException) as cm:
await manager.get_companion_info(target_udid=None)
self.assertIn("No UDID provided and no companions exist", str(cm.exception))
async def test_selects_when_no_udid_single_companion(self) -> None:
async for manager in self._managers():
companion = CompanionInfo(
udid="a", address=TCPAddress(host="ahost", port=123), is_local=False
)
await manager.add_companion(companion)
self.assertEqual(
companion, await manager.get_companion_info(target_udid=None)
)
async def test_selects_by_udid(self) -> None:
async for manager in self._managers():
# Add two companions
companion_a = CompanionInfo(
udid="a", address=TCPAddress(host="ahost", port=123), is_local=False
)
await manager.add_companion(companion_a)
companion_b = CompanionInfo(
udid="b", address=TCPAddress(host="bhost", port=123), is_local=False
)
await manager.add_companion(companion_b)
self.assertEqual(
companion_a, await manager.get_companion_info(target_udid="a")
)
self.assertEqual(
companion_b, await manager.get_companion_info(target_udid="b")
)
async def test_replace_companion(self) -> None:
async for manager in self._managers():
companion_first = CompanionInfo(
udid="a", address=TCPAddress(host="ahost", port=123), is_local=False
)
replaced = await manager.add_companion(companion_first)
self.assertIsNone(replaced)
companions = await manager.get_companions()
self.assertEqual(companions, [companion_first])
companion_second = CompanionInfo(
udid="a",
address=TCPAddress(host="anotherhost", port=321),
is_local=False,
)
replaced = await manager.add_companion(companion_second)
self.assertEqual(replaced, companion_first)
companions = await manager.get_companions()
self.assertEqual(companions, [companion_second])
|
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testtools import testcase
from sahara.tests.integration.configs import config as cfg
from sahara.tests.integration.tests import base as b
from sahara.tests.integration.tests import edp
from sahara.tests.integration.tests import scaling
from sahara.tests.integration.tests import swift
from sahara.utils import edp as utils_edp
class HDP2GatingTest(swift.SwiftTest, scaling.ScalingTest,
edp.EDPTest):
config = cfg.ITConfig().hdp2_config
SKIP_EDP_TEST = config.SKIP_EDP_TEST
SKIP_SWIFT_TEST = config.SKIP_SWIFT_TEST
SKIP_SCALING_TEST = config.SKIP_SCALING_TEST
def setUp(self):
super(HDP2GatingTest, self).setUp()
self.cluster_id = None
self.cluster_template_id = None
self.ng_template_ids = []
def _prepare_test(self):
self.hdp2_config = cfg.ITConfig().hdp2_config
self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
self.internal_neutron_net = None
if self.common_config.NEUTRON_ENABLED:
self.internal_neutron_net = self.get_internal_neutron_net_id()
self.floating_ip_pool = (
self.get_floating_ip_pool_id_for_neutron_net())
self.hdp2_config.IMAGE_ID, self.hdp2_config.SSH_USERNAME = (
self.get_image_id_and_ssh_username(self.hdp2_config))
@b.errormsg("Failure while 'rm-nn' node group template creation: ")
def _create_rm_nn_ng_template(self):
template = {
'name': 'test-node-group-template-hdp2-rm-nn',
'plugin_config': self.hdp2_config,
'description': 'test node group template for HDP plugin',
'node_processes': self.hdp2_config.MASTER_NODE_PROCESSES,
'floating_ip_pool': self.floating_ip_pool,
'node_configs': {}
}
self.ng_tmpl_rm_nn_id = self.create_node_group_template(**template)
self.ng_template_ids.append(self.ng_tmpl_rm_nn_id)
@b.errormsg("Failure while 'nm-dn' node group template creation: ")
def _create_nm_dn_ng_template(self):
template = {
'name': 'test-node-group-template-hdp2-nm-dn',
'plugin_config': self.hdp2_config,
'description': 'test node group template for HDP plugin',
'node_processes': self.hdp2_config.WORKER_NODE_PROCESSES,
'floating_ip_pool': self.floating_ip_pool,
'node_configs': {}
}
self.ng_tmpl_nm_dn_id = self.create_node_group_template(**template)
self.ng_template_ids.append(self.ng_tmpl_nm_dn_id)
@b.errormsg("Failure while cluster template creation: ")
def _create_cluster_template(self):
template = {
'name': 'test-cluster-template-hdp2',
'plugin_config': self.hdp2_config,
'description': 'test cluster template for HDP plugin',
'cluster_configs': {
'YARN': {
'yarn.log-aggregation-enable': False
}
},
'node_groups': [
{
'name': 'master-node-dn',
'node_group_template_id': self.ng_tmpl_rm_nn_id,
'count': 1
},
{
'name': 'worker-node-nm',
'node_group_template_id': self.ng_tmpl_nm_dn_id,
'count': 3
}
],
'net_id': self.internal_neutron_net
}
self.cluster_template_id = self.create_cluster_template(**template)
@b.errormsg("Failure while cluster creation: ")
def _create_cluster(self):
cluster_name = '%s-%s-v2' % (self.common_config.CLUSTER_NAME,
self.hdp2_config.PLUGIN_NAME)
cluster = {
'name': cluster_name,
'plugin_config': self.hdp2_config,
'cluster_template_id': self.cluster_template_id,
'description': 'test cluster',
'cluster_configs': {}
}
cluster_id = self.create_cluster(**cluster)
self.poll_cluster_state(cluster_id)
self.cluster_info = self.get_cluster_info(self.hdp2_config)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.hdp2_config)
@b.errormsg("Failure during check of Swift availability: ")
def _check_swift(self):
self.check_swift_availability(self.cluster_info)
@b.errormsg("Failure while EDP testing: ")
def _check_edp(self):
self.poll_jobs_status(list(self._run_edp_test()))
def _run_edp_test(self):
# check pig
pig_job = self.edp_info.read_pig_example_script()
pig_lib = self.edp_info.read_pig_example_jar()
yield self.edp_testing(
job_type=utils_edp.JOB_TYPE_PIG,
job_data_list=[{'pig': pig_job}],
lib_data_list=[{'jar': pig_lib}],
swift_binaries=True,
hdfs_local_output=True)
# check mapreduce
mapreduce_jar = self.edp_info.read_mapreduce_example_jar()
mapreduce_configs = self.edp_info.mapreduce_example_configs()
yield self.edp_testing(
job_type=utils_edp.JOB_TYPE_MAPREDUCE,
job_data_list=[],
lib_data_list=[{'jar': mapreduce_jar}],
configs=mapreduce_configs,
swift_binaries=True,
hdfs_local_output=True)
# check mapreduce streaming
yield self.edp_testing(
job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
job_data_list=[],
lib_data_list=[],
configs=self.edp_info.mapreduce_streaming_configs())
# check java
java_jar = self.edp_info.read_java_example_lib(2)
java_configs = self.edp_info.java_example_configs(2)
yield self.edp_testing(
utils_edp.JOB_TYPE_JAVA,
job_data_list=[],
lib_data_list=[{'jar': java_jar}],
configs=java_configs)
@b.errormsg("Failure while cluster scaling: ")
def _check_scaling(self):
datanode_count_after_resizing = (
self.cluster_info['node_info']['datanode_count']
+ self.hdp_config.SCALE_EXISTING_NG_COUNT)
change_list = [
{
'operation': 'resize',
'info': ['worker-node-nm',
datanode_count_after_resizing]
},
{
'operation': 'add',
'info': ['new-worker-node-tt-dn',
self.hdp2_config.SCALE_NEW_NG_COUNT,
'%s' % self.ng_tmpl_nm_dn_id]
}
]
self.cluster_info = self.cluster_scaling(self.cluster_info,
change_list)
self.await_active_workers_for_namenode(self.cluster_info['node_info'],
self.hdp2_config)
@b.errormsg(
"Failure during check of Swift availability after cluster scaling: ")
def _check_swift_after_scaling(self):
self.check_swift_availability(self.cluster_info)
@b.errormsg("Failure while EDP testing after cluster scaling: ")
def _check_edp_after_scaling(self):
self._check_edp()
@testcase.attr('hdp2')
@testcase.skipIf(config.SKIP_ALL_TESTS_FOR_PLUGIN,
'All tests for HDP2 plugin were skipped')
def test_hdp2_plugin_gating(self):
self._prepare_test()
self._create_rm_nn_ng_template()
self._create_nm_dn_ng_template()
self._create_cluster_template()
self._create_cluster()
self._check_swift()
self._check_edp()
if not self.hdp2_config.SKIP_SCALING_TEST:
self._check_scaling()
self._check_swift_after_scaling()
self._check_edp_after_scaling()
def tearDown(self):
self.delete_objects(self.cluster_id, self.cluster_template_id,
self.ng_template_ids)
super(HDP2GatingTest, self).tearDown()
|
|
#!/usr/bin/env python
'''
Created February, 2011
@author: Dr. Rainer Hessmer
DeadReckoning.py - A Python implementation of the tutorial
http://www.ros.org/wiki/pr2_controllers/Tutorials/Using%20the%20base%20controller%20with%20odometry%20and%20transform%20information
Copyright (c) 2011 Dr. Rainer Hessmer. All right reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Vanadium Labs LLC nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import roslib; roslib.load_manifest('ardros')
import rospy
import tf
from tf import transformations
import time
import math
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
class Driver(object):
'''
Implements the logic for driving a given distance or turning for a given amount
by monitoring the transform messages that contain the odometry based pose.
'''
def __init__(self):
rospy.init_node('DeadReckoning')
self._VelocityCommandPublisher = rospy.Publisher("cmd_vel", Twist)
self._TransformListener = tf.TransformListener()
# wait for the listener to get the first transform message
self._TransformListener.waitForTransform("/odom", "/base_link", rospy.Time(), rospy.Duration(4.0))
def DriveX(self, distance, speed):
'''
Drive in x direction a specified distance based on odometry information
distance [m]: the distance to travel in the x direction (>0: forward, <0: backwards)
speed [m/s]: the speed with which to travel; must be positive
'''
forward = (distance >= 0)
# record the starting transform from the odom to the base_link frame
# Note that here the 'from' frame precedes 'to' frame which is opposite to how they are
# ordered in tf.TransformBroadcaster's sendTransform function.
# startTranslation is a tuple holding the x,y,z components of the translation vector
# startRotation is a tuple holding the four components of the quaternion
(startTranslation, startRotation) = self._TransformListener.lookupTransform("/odom", "/base_link", rospy.Time(0))
done = False
velocityCommand = Twist()
if forward:
velocityCommand.linear.x = speed # going forward m/s
else:
velocityCommand.linear.x = -speed # going forward m/s
velocityCommand.angular.z = 0.0 # no angular velocity
while True:
try:
(currentTranslation, currentRotation) = self._TransformListener.lookupTransform("/odom", "/base_link", rospy.Time(0))
dx = currentTranslation[0] - startTranslation[0]
dy = currentTranslation[1] - startTranslation[1]
distanceMoved = math.sqrt(dx * dx + dy * dy)
print distanceMoved
if (forward):
arrived = distanceMoved >= distance
else:
arrived = distanceMoved >= -distance
if (arrived):
break
else:
# send the drive command
print("sending vel command" + str(velocityCommand))
self._VelocityCommandPublisher.publish(velocityCommand)
except (tf.LookupException, tf.ConnectivityException):
continue
rospy.sleep(0.1)
#stop
velocityCommand.linear.x = 0.0
velocityCommand.angular.z = 0.0
self._VelocityCommandPublisher.publish(velocityCommand)
return done
def Turn(self, angle, angularSpeed):
'''
Turn the robot based on odometry information
angle [rad]: the angle to turn (positive angles mean clockwise rotation)
angularSpeed [rad/s]: the speed with which to turn; must be positive
'''
ccw = (angle >= 0) # counter clockwise rotation
# record the starting transform from the odom to the base frame
# Note that here the 'from' frame precedes 'to' frame which is opposite to how they are
# ordered in tf.TransformBroadcaster's sendTransform function.
(startTranslation, startRotation) = self._TransformListener.lookupTransform("/odom", "/base_link", rospy.Time(0))
startAngle = 2 * math.atan2(startRotation[2], startRotation[3])
print "start angle: " + str(startAngle)
previousAngle = startAngle
angleOffset = 0.0
done = False
velocityCommand = Twist()
velocityCommand.linear.x = 0.0 # going forward m/s
if ccw:
velocityCommand.angular.z = angularSpeed
else:
velocityCommand.angular.z = -angularSpeed
while not rospy.is_shutdown():
try:
(currentTranslation, currentRotation) = self._TransformListener.lookupTransform("/odom", "/base_link", rospy.Time(0))
currentAngle = 2 * math.atan2(currentRotation[2], currentRotation[3])
print "currentAngle: " + str(currentAngle)
# we need to handle roll over of the angle
if (currentAngle * previousAngle < 0 and math.fabs(currentAngle - previousAngle) > math.pi / 2):
if (currentAngle > previousAngle):
print "subtracting"
angleOffset = angleOffset - 2 * math.pi
else:
print "adding"
angleOffset = angleOffset + 2 * math.pi
angleTurned = currentAngle + angleOffset - startAngle
previousAngle = currentAngle
print "angleTurned: " + str(angleTurned)
if (ccw):
arrived = (angleTurned >= angle)
else:
arrived = (angleTurned <= angle)
print arrived
if (arrived):
break
else:
# send the drive command
print("sending vel command" + str(velocityCommand))
self._VelocityCommandPublisher.publish(velocityCommand)
except (tf.LookupException, tf.ConnectivityException):
continue
time.sleep(0.1)
#stop
velocityCommand.linear.x = 0.0
velocityCommand.angular.z = 0.0
self._VelocityCommandPublisher.publish(velocityCommand)
return done
if __name__ == '__main__':
try:
driver = Driver()
driver.DriveX(distance = 2, speed = 0.1);
driver.Turn(angle = math.pi, angularSpeed = 0.3);
driver.DriveX(distance = 2, speed = 0.1);
driver.Turn(angle = -math.pi, angularSpeed = 0.3)
#driver.Turn(angle = 3 * math.pi, angularSpeed = 0.3);
except rospy.ROSInterruptException:
pass
|
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' submit.py '''
import glob
import logging
import os
import tempfile
import requests
from heron.common.src.python.utils.log import Log
from heron.proto import topology_pb2
from heron.tools.cli.src.python.result import SimpleResult, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.cli.src.python.execute as execute
import heron.tools.cli.src.python.jars as jars
import heron.tools.cli.src.python.opts as opts
import heron.tools.cli.src.python.result as result
import heron.tools.cli.src.python.rest as rest
import heron.tools.common.src.python.utils.config as config
import heron.tools.common.src.python.utils.classpath as classpath
# pylint: disable=too-many-return-statements
################################################################################
def launch_mode_msg(cl_args):
'''
Depending on the mode of launching a topology provide a message
:param cl_args:
:return:
'''
if cl_args['dry_run']:
return "in dry-run mode"
return ""
################################################################################
def create_parser(subparsers):
'''
Create a subparser for the submit command
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'submit',
help='Submit a topology',
usage="%(prog)s [options] cluster/[role]/[env] " + \
"topology-file-name topology-class-name [topology-args]",
add_help=True
)
cli_args.add_titles(parser)
cli_args.add_cluster_role_env(parser)
cli_args.add_topology_file(parser)
cli_args.add_topology_class(parser)
cli_args.add_config(parser)
cli_args.add_deactive_deploy(parser)
cli_args.add_dry_run(parser)
cli_args.add_extra_launch_classpath(parser)
cli_args.add_service_url(parser)
cli_args.add_system_property(parser)
cli_args.add_verbose(parser)
parser.set_defaults(subcommand='submit')
return parser
################################################################################
def launch_a_topology(cl_args, tmp_dir, topology_file, topology_defn_file, topology_name):
'''
Launch a topology given topology jar, its definition file and configurations
:param cl_args:
:param tmp_dir:
:param topology_file:
:param topology_defn_file:
:param topology_name:
:return:
'''
# get the normalized path for topology.tar.gz
topology_pkg_path = config.normalized_class_path(os.path.join(tmp_dir, 'topology.tar.gz'))
# get the release yaml file
release_yaml_file = config.get_heron_release_file()
# create a tar package with the cluster configuration and generated config files
config_path = cl_args['config_path']
tar_pkg_files = [topology_file, topology_defn_file]
generated_config_files = [release_yaml_file, cl_args['override_config_file']]
config.create_tar(topology_pkg_path, tar_pkg_files, config_path, generated_config_files)
# pass the args to submitter main
args = [
"--cluster", cl_args['cluster'],
"--role", cl_args['role'],
"--environment", cl_args['environ'],
"--submit_user", cl_args['submit_user'],
"--heron_home", config.get_heron_dir(),
"--config_path", config_path,
"--override_config_file", cl_args['override_config_file'],
"--release_file", release_yaml_file,
"--topology_package", topology_pkg_path,
"--topology_defn", topology_defn_file,
"--topology_bin", os.path.basename(topology_file) # pex file if pex specified
]
if Log.getEffectiveLevel() == logging.DEBUG:
args.append("--verbose")
if cl_args["dry_run"]:
args.append("--dry_run")
if "dry_run_format" in cl_args:
args += ["--dry_run_format", cl_args["dry_run_format"]]
lib_jars = config.get_heron_libs(
jars.scheduler_jars() + jars.uploader_jars() + jars.statemgr_jars() + jars.packing_jars()
)
extra_jars = cl_args['extra_launch_classpath'].split(':')
# invoke the submitter to submit and launch the topology
main_class = 'com.twitter.heron.scheduler.SubmitterMain'
res = execute.heron_class(
class_name=main_class,
lib_jars=lib_jars,
extra_jars=extra_jars,
args=args,
java_defines=[])
err_ctxt = "Failed to launch topology '%s' %s" % (topology_name, launch_mode_msg(cl_args))
succ_ctxt = "Successfully launched topology '%s' %s" % (topology_name, launch_mode_msg(cl_args))
res.add_context(err_ctxt, succ_ctxt)
return res
################################################################################
def launch_topology_server(cl_args, topology_file, topology_defn_file, topology_name):
'''
Launch a topology given topology jar, its definition file and configurations
:param cl_args:
:param topology_file:
:param topology_defn_file:
:param topology_name:
:return:
'''
service_apiurl = cl_args['service_url'] + rest.ROUTE_SIGNATURES['submit'][1]
service_method = rest.ROUTE_SIGNATURES['submit'][0]
data = dict(
name=topology_name,
cluster=cl_args['cluster'],
role=cl_args['role'],
environment=cl_args['environ'],
user=cl_args['submit_user'],
)
if cl_args['dry_run']:
data["dry_run"] = True
files = dict(
definition=open(topology_defn_file, 'rb'),
topology=open(topology_file, 'rb'),
)
err_ctxt = "Failed to launch topology '%s' %s" % (topology_name, launch_mode_msg(cl_args))
succ_ctxt = "Successfully launched topology '%s' %s" % (topology_name, launch_mode_msg(cl_args))
try:
r = service_method(service_apiurl, data=data, files=files)
ok = r.status_code is requests.codes.ok
created = r.status_code is requests.codes.created
s = Status.Ok if created or ok else Status.HeronError
if s is Status.HeronError:
Log.error(r.json().get('message', "Unknown error from api server %d" % r.status_code))
elif ok:
# this case happens when we request a dry_run
print r.json().get("response")
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as err:
Log.error(err)
return SimpleResult(Status.HeronError, err_ctxt, succ_ctxt)
return SimpleResult(s, err_ctxt, succ_ctxt)
################################################################################
def launch_topologies(cl_args, topology_file, tmp_dir):
'''
Launch topologies
:param cl_args:
:param topology_file:
:param tmp_dir:
:return: list(Responses)
'''
# the submitter would have written the .defn file to the tmp_dir
defn_files = glob.glob(tmp_dir + '/*.defn')
if len(defn_files) == 0:
return SimpleResult(Status.HeronError, "No topologies found under %s" % tmp_dir)
results = []
for defn_file in defn_files:
# load the topology definition from the file
topology_defn = topology_pb2.Topology()
try:
handle = open(defn_file, "rb")
topology_defn.ParseFromString(handle.read())
handle.close()
except Exception as e:
err_context = "Cannot load topology definition '%s': %s" % (defn_file, e)
return SimpleResult(Status.HeronError, err_context)
# launch the topology
Log.info("Launching topology: \'%s\'%s", topology_defn.name, launch_mode_msg(cl_args))
# check if we have to do server or direct based deployment
if cl_args['deploy_mode'] == config.SERVER_MODE:
res = launch_topology_server(
cl_args, topology_file, defn_file, topology_defn.name)
else:
res = launch_a_topology(
cl_args, tmp_dir, topology_file, defn_file, topology_defn.name)
results.append(res)
return results
################################################################################
def submit_fatjar(cl_args, unknown_args, tmp_dir):
'''
We use the packer to make a package for the jar and dump it
to a well-known location. We then run the main method of class
with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS.
This will run the jar file with the topology_class_name. The submitter
inside will write out the topology defn file to a location that
we specify. Then we write the topology defn file to a well known
location. We then write to appropriate places in zookeeper
and launch the scheduler jobs
:param cl_args:
:param unknown_args:
:param tmp_dir:
:return:
'''
# execute main of the topology to create the topology definition
topology_file = cl_args['topology-file-name']
main_class = cl_args['topology-class-name']
res = execute.heron_class(
class_name=main_class,
lib_jars=config.get_heron_libs(jars.topology_jars()),
extra_jars=[topology_file],
args=tuple(unknown_args),
java_defines=cl_args['topology_main_jvm_property'])
result.render(res)
if not result.is_successful(res):
err_context = ("Failed to create topology definition " \
"file when executing class '%s' of file '%s'") % (main_class, topology_file)
res.add_context(err_context)
return res
results = launch_topologies(cl_args, topology_file, tmp_dir)
return results
################################################################################
def submit_tar(cl_args, unknown_args, tmp_dir):
'''
Extract and execute the java files inside the tar and then add topology
definition file created by running submitTopology
We use the packer to make a package for the tar and dump it
to a well-known location. We then run the main method of class
with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS.
This will run the jar file with the topology class name.
The submitter inside will write out the topology defn file to a location
that we specify. Then we write the topology defn file to a well known
packer location. We then write to appropriate places in zookeeper
and launch the aurora jobs
:param cl_args:
:param unknown_args:
:param tmp_dir:
:return:
'''
# execute main of the topology to create the topology definition
topology_file = cl_args['topology-file-name']
java_defines = cl_args['topology_main_jvm_property']
main_class = cl_args['topology-class-name']
res = execute.heron_tar(
main_class,
topology_file,
tuple(unknown_args),
tmp_dir,
java_defines)
result.render(res)
if not result.is_successful(res):
err_context = ("Failed to create topology definition " \
"file when executing class '%s' of file '%s'") % (main_class, topology_file)
res.add_context(err_context)
return res
return launch_topologies(cl_args, topology_file, tmp_dir)
################################################################################
# Execute the pex file to create topology definition file by running
# the topology's main class.
################################################################################
# pylint: disable=unused-argument
def submit_pex(cl_args, unknown_args, tmp_dir):
# execute main of the topology to create the topology definition
topology_file = cl_args['topology-file-name']
topology_class_name = cl_args['topology-class-name']
res = execute.heron_pex(
topology_file, topology_class_name, tuple(unknown_args))
result.render(res)
if not result.is_successful(res):
err_context = ("Failed to create topology definition " \
"file when executing class '%s' of file '%s'") % (topology_class_name, topology_file)
res.add_context(err_context)
return res
return launch_topologies(cl_args, topology_file, tmp_dir)
################################################################################
# pylint: disable=unused-argument
def run(command, parser, cl_args, unknown_args):
'''
Submits the topology to the scheduler
* Depending on the topology file name extension, we treat the file as a
fatjar (if the ext is .jar) or a tar file (if the ext is .tar/.tar.gz).
* We upload the topology file to the packer, update zookeeper and launch
scheduler jobs representing that topology
* You can see your topology in Heron UI
:param command:
:param parser:
:param cl_args:
:param unknown_args:
:return:
'''
Log.debug("Submit Args %s", cl_args)
# get the topology file name
topology_file = cl_args['topology-file-name']
# check to see if the topology file exists
if not os.path.isfile(topology_file):
err_context = "Topology file '%s' does not exist" % topology_file
return SimpleResult(Status.InvocationError, err_context)
# check if it is a valid file type
jar_type = topology_file.endswith(".jar")
tar_type = topology_file.endswith(".tar") or topology_file.endswith(".tar.gz")
pex_type = topology_file.endswith(".pex")
if not jar_type and not tar_type and not pex_type:
ext_name = os.path.splitext(topology_file)
err_context = "Unknown file type '%s'. Please use .tar or .tar.gz or .jar or .pex file"\
% ext_name
return SimpleResult(Status.InvocationError, err_context)
# check if extra launch classpath is provided and if it is validate
if cl_args['extra_launch_classpath']:
valid_classpath = classpath.valid_java_classpath(cl_args['extra_launch_classpath'])
if not valid_classpath:
err_context = "One of jar or directory in extra launch classpath does not exist: %s" % \
cl_args['extra_launch_classpath']
return SimpleResult(Status.InvocationError, err_context)
# create a temporary directory for topology definition file
tmp_dir = tempfile.mkdtemp()
opts.cleaned_up_files.append(tmp_dir)
# if topology needs to be launched in deactivated state, do it so
if cl_args['deploy_deactivated']:
initial_state = topology_pb2.TopologyState.Name(topology_pb2.PAUSED)
else:
initial_state = topology_pb2.TopologyState.Name(topology_pb2.RUNNING)
# set the tmp dir and deactivated state in global options
opts.set_config('cmdline.topologydefn.tmpdirectory', tmp_dir)
opts.set_config('cmdline.topology.initial.state', initial_state)
# check the extension of the file name to see if it is tar/jar file.
if jar_type:
return submit_fatjar(cl_args, unknown_args, tmp_dir)
elif tar_type:
return submit_tar(cl_args, unknown_args, tmp_dir)
else:
return submit_pex(cl_args, unknown_args, tmp_dir)
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from ... import units as u
from .py3_test_quantity_annotations import *
# list of pairs (target unit/physical type, input unit)
x_inputs = [(u.arcsec, u.deg), ('angle', u.deg),
(u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s),
([u.arcsec, u.km], u.deg), ([u.arcsec, u.km], u.km), # multiple allowed
(['angle', 'length'], u.deg), (['angle', 'length'], u.km)]
y_inputs = [(u.arcsec, u.deg), ('angle', u.deg),
(u.kpc/u.Myr, u.km/u.s), ('speed', u.km/u.s)]
@pytest.fixture(scope="module",
params=list(range(len(x_inputs))))
def x_input(request):
return x_inputs[request.param]
@pytest.fixture(scope="module",
params=list(range(len(y_inputs))))
def y_input(request):
return y_inputs[request.param]
# ---- Tests that use the fixtures defined above ----
def test_args(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, 1*y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_args_nonquantity(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, 100)
assert isinstance(x, u.Quantity)
assert isinstance(y, int)
assert x.unit == x_unit
def test_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(u.UnitsError) as e:
x, y = myfunc_args(1*x_unit, 100*u.Joule) # has to be an unspecified unit
str_to = str(y_target)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' must be in units convertible to '{0}'.".format(str_to)
def test_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, 100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead."
def test_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg, y=1*y_unit):
return x, my_arg, y
x, my_arg, y = myfunc_args(1*x_unit, 100, y=100*y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg, int)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
def test_unused_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg1, y=y_unit, my_arg2=1000):
return x, my_arg1, y, my_arg2
x, my_arg1, y, my_arg2 = myfunc_args(1*x_unit, 100,
y=100*y_unit, my_arg2=10)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg1, int)
assert isinstance(y, u.Quantity)
assert isinstance(my_arg2, int)
assert y.unit == y_unit
assert my_arg2 == 10
def test_kwarg_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
with pytest.raises(u.UnitsError) as e:
x, y = myfunc_args(1*x_unit, y=100*u.Joule)
str_to = str(y_target)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' must be in units convertible to '{0}'.".format(str_to)
def test_kwarg_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
with pytest.raises(TypeError) as e:
x, y = myfunc_args(1*x_unit, y=100)
assert str(e.value) == "Argument 'y' to function 'myfunc_args' has no 'unit' attribute. You may want to pass in an astropy Quantity instead."
def test_kwarg_default(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10*y_unit):
return x, y
x, y = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_input(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x=1*x_unit, y=1*y_unit):
return x, y
kwargs = {'x': 10*x_unit, 'y': 10*y_unit}
x, y = myfunc_args(**kwargs)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_extra(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, **kwargs):
return x
x = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
# ---- Tests that don't used the fixtures ----
@pytest.mark.parametrize("x_unit,y_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_arg_equivalencies(x_unit, y_unit):
@u.quantity_input(x=x_unit, y=y_unit,
equivalencies=u.mass_energy())
def myfunc_args(x, y):
return x, y+(10*u.J) # Add an energy to check equiv is working
x, y = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == u.arcsec
assert y.unit == u.gram
@pytest.mark.parametrize("x_unit,energy_unit", [
(u.arcsec, u.eV),
('angle', 'energy')])
def test_kwarg_equivalencies(x_unit, energy_unit):
@u.quantity_input(x=x_unit, energy=energy_unit, equivalencies=u.mass_energy())
def myfunc_args(x, energy=10*u.eV):
return x, energy+(10*u.J) # Add an energy to check equiv is working
x, energy = myfunc_args(1*u.arcsec, 100*u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(energy, u.Quantity)
assert x.unit == u.arcsec
assert energy.unit == u.gram
def test_no_equivalent():
class test_unit:
pass
class test_quantity:
unit = test_unit()
@u.quantity_input(x=u.arcsec)
def myfunc_args(x):
return x
with pytest.raises(TypeError) as e:
x, y = myfunc_args(test_quantity())
assert str(e.value) == "Argument 'x' to function 'myfunc_args' has a 'unit' attribute without an 'is_equivalent' method. You may want to pass in an astropy Quantity instead."
def test_kwarg_invalid_physical_type():
@u.quantity_input(x='angle', y='africanswallow')
def myfunc_args(x, y=10*u.deg):
return x, y
with pytest.raises(ValueError) as e:
x, y = myfunc_args(1*u.arcsec, y=100*u.deg)
assert str(e.value) == "Invalid unit or physical type 'africanswallow'."
def test_default_value_check():
x_target = u.deg
x_unit = u.arcsec
with pytest.raises(TypeError):
@u.quantity_input(x=x_target)
def myfunc_args(x=1.):
return x
x = myfunc_args()
x = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
def test_args_None():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
y_unit = u.kpc
@u.quantity_input(x=[x_target, None], y=[None, y_target])
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1*x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(None, 1*y_unit)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
assert x is None
def test_args_None_kwarg():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=None):
return x, y
x, y = myfunc_args(1*x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(1*x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
with pytest.raises(TypeError):
x, y = myfunc_args(None, None)
|
|
import logging
import numpy as np
import pydart
from fileinfoworld import FileInfoWorld
from controller import Controller
import gltools
from spring_stair import SpringStair
from motion import *
from hyper import *
from plotter_torque import PlotterTorque
# from guppy import hpy
class Simulation(object):
def __init__(self, step_activation=None):
self.prefix = ''
self.postfix = ''
self.logger = logging.getLogger(__name__)
logger = self.logger
# Init pydart
pydart.init()
logger.info('pydart initialization OK')
# Create world
# if step_activation is None:
# skel_filename = 'data/skel/fullbody_baselineStairs2.skel'
# else:
step_activation = 0.0
skel_filename = 'data/skel/fullbody_springStair.skel'
# skel_filename = 'data/skel/soft_springStair.skel'
self.world = pydart.create_world(1.0 / 1000.0, skel_filename)
logger.info('pydart create_world OK: dt = %f' % self.world.dt)
# Configure human
self.skel = self.world.skels[0]
print 'Skeleton mass = ', self.skel.m
for i, body in enumerate(self.skel.bodies):
print i, body.name
for i, dof in enumerate(self.skel.dofs):
print i, dof.name
# # Configure stair: disable the movement of the first step
# self.stair = self.world.skels[1]
# self.stair.set_mobile(False)
self.stair = SpringStair(self.world, self)
logger.info('set step activation: %s' % step_activation)
self.stair.set_activation(step_activation)
# Load the reference motion
self.ref = FileInfoWorld()
self.ref.load('data/other/halfCycle.txt')
logger.info('load reference motions OK: # %d' % self.ref.num_frames)
self.ref.modify_pose(self.skel)
self.ref.append_mirrored_motion(self.skel)
self.ref.append_shifted_motion(self.skel)
self.ref.add_offset()
logger.info('modify reference motions OK: # %d' % self.ref.num_frames)
# Contruct the mutable motion
# self.motion = StepOffsetMotion(self.skel, self.ref, self.stair)
# self.motion = RadialBasisMotion(self.skel, self.ref, self.stair)
# self.motion = NNFeedbackMotion(self.skel, self.ref, self.stair)
# self.motion = FeedbackMotion(self.skel, self.ref, self.stair)
# self.motion = SinglePoseMotion(self.skel, self.ref, self.stair)
# self.motion = WindowedMotion(self.skel, self.ref, self.stair)
# self.motion = GlobalWindowedMotion(self.skel, self.ref, self.stair)
self.motion = AdaptiveWindowedMotion(self.skel, self.ref, self.stair)
self.motion.sim = self
# Create the controller
self.skel.controller = Controller(self,
self.skel,
self.world.dt,
self.motion)
# For check the target
self.target_index = 0
# Reset the scene
self.random_force = np.array([0.0, 0.0, 0.0])
self.reset_counter = 0
self.reset()
self.begin_time = 0.0
logger.info('set the initial pose OK')
def reset(self):
self.skel.controller.reset()
self.stair.reset()
self.world.reset()
q = self.motion.pose_at_frame(0, isRef=True)
q = pydart.SkelVector(q, self.skel)
q['j_heel_right_1'] += 0.05
self.skel.q = q
self.skel.qdot = self.motion.velocity_at_frame(0, isRef=True)
# self.random_force = 400.0 * (np.random.rand(3) - 0.5)
# self.random_force[1] = 0.0
# self.random_force[2] *= 0.1
# self.random_force = np.array([200.0, 0.0, 0.0])
# self.random_force = np.array([0.0, 0.0, 0.0])
# self.logger.info('force: %s' % self.random_force)
# if self.reset_counter % 50 == 0:
# h = hpy()
# print h.heap()
# self.reset_counter += 1
def get_time(self):
return self.world.t + self.begin_time - 0.0
def get_frame(self):
begin_index = int(1000.0 * self.begin_time)
return self.world.frame + begin_index - 0
def step(self):
self.stair.apply_force()
# i = max(self.world.frame, -200)
i = max(self.get_frame(), 0)
# if 800 < self.get_frame() < 900:
# self.skel.body('h_head').add_ext_force(self.random_force)
c = self.skel.controller
c.qhat = self.motion.pose_at_frame(i, isRef=False)
c.q_ref = self.motion.pose_at_frame(i, isRef=True)
c.qdhat = self.motion.velocity_at_frame(i, isRef=True)
# print i, self.skel.contacted_body_names()
# print self.world.t
# print self.skel.body('h_heel_left').C
# print [self.stair.step_height(j) for j in range(3)]
self.world.step()
# # Debug purpose
# self.skel.q = self.motion.pose_at_frame(i, isRef=True)
def num_frames(self):
return self.world.num_frames()
def set_frame(self, idx):
self.world.set_frame(idx)
def render(self):
gltools.render_COM(self.skel)
self.world.render()
# self.render_target()
if 800 < self.get_frame() < 900:
C = self.skel.body('h_head').C
f = self.random_force
gltools.render_line(C, C + 0.1 * f, (1, 0, 0))
# self.evaluator.render()
# self.planner.render()
def render_target(self):
# frame = min(self.world.frame, self.ref.num_frames - 1)
frame = min(self.get_frame(), self.ref.num_frames - 1)
frame = max(frame, 0)
self.render_target_at_frame(frame)
def render_target_at_frame(self, frame):
x = self.skel.x
qhat = self.ref.pose_at(frame, self.skel.id)
self.skel.q = qhat
self.skel.render_with_color(0.3, 0.3, 0.3, 0.5)
self.skel.x = x
def contacts(self):
return self.world.contacts()
def update_to_target(self):
q = self.ref.pose_at(self.target_index, self.skel.id)
# q = self.motion.pose_at_frame(self.target_index, self.skel.id)
self.skel.q = q
def key_pressed(self, key):
self.logger.info('key pressed: [%s]' % key)
if key == ']':
print self.target_index
self.target_index = (self.target_index + 10) % self.ref.num_frames
self.update_to_target()
elif key == '[':
print self.target_index
self.target_index = (self.target_index - 10) % self.ref.num_frames
self.update_to_target()
else:
if hasattr(self.motion, 'key_pressed'):
self.motion.key_pressed(key)
def optimize(self):
# self.solver = Optimizer(self, self.motion)
# self.solver.launch()
self.motion.launch(self)
def optimize_hyper(self):
print('optimize hyper parameters')
h = StairLatch(self)
h.launch()
def kill_optimizer(self):
self.solver.to_be_killed = True
def title(self, full=True):
if self.stair.num_steps() == 0:
title = 'Normal Stair'
else:
title = 'Stair_%.3f' % self.stair._activation
if not full:
return title
if len(self.prefix) > 0:
title = '%s_%s' % (self.prefix, title)
if len(self.postfix) > 0:
title = '%s_%s' % (title, self.postfix)
return title
def plot_torques(self):
pl = PlotterTorque()
pl.prefix = self.prefix
pl.postfix = self.postfix
pl.plot(self.skel.controller, self.title(False))
def change_solution(self, x, y):
self.logger.info('change solution x, y = %d, %d' % (x, y))
self.motion.set_solution(x, y)
def refresh_solutions(self):
(nx, ny) = self.motion.num_solutions()
self.logger.info('nx, ny = %d, %d' % (nx, ny))
return (nx, ny)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import time
import numpy as np
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework.errors import InvalidArgumentError
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _OptimizerOptions():
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold)))
if cse:
cfg.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
else:
cfg.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
if inline:
cfg.graph_options.rewrite_options.function_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
else:
cfg.graph_options.rewrite_options.function_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
if cfold:
cfg.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.ON)
else:
cfg.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
yield cfg
class FunctionTest(test.TestCase):
"""Test methods for verifying Function support.
These test methods are used as mix-ins in two test cases: with
and without C API support.
"""
def testIdentity(self):
@function.Defun(dtypes.float32, func_name="MyIdentity")
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], self.evaluate(call))
@test_util.run_deprecated_v1
def testIdentityImplicitDeref(self):
@function.Defun(dtypes.float32, func_name="MyIdentity")
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
var = variables.VariableV1([18.0])
call = MyIdentityFunc(var._ref()) # pylint: disable=protected-access
self.assertEqual("MyIdentity", call.op.name)
for cfg in _OptimizerOptions():
with session.Session(config=cfg) as sess:
self.evaluate(var.initializer)
self.assertAllEqual([18.0], self.evaluate(call))
def testIdentityOutputName(self):
@function.Defun(
dtypes.float32, func_name="MyIdentity", out_names=["my_result_name"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], self.evaluate(call))
def testTooManyOutputNames(self):
@function.Defun(
dtypes.float32,
func_name="MyIdentity",
out_names=["my_result1", "my_result2"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
(r"output names must be either empty or equal in size to outputs. "
"output names size = 2 outputs size = 1")):
MyIdentityFunc([18.0])
def testDefineFunction2Args(self):
@function.Defun(dtypes.float32, dtypes.float32, func_name="APlus2B")
def APlus2B(a, b):
return a + b * 2
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], self.evaluate(call))
def testFunctionWithNoOutput(self):
@function.Defun(dtypes.float32, dtypes.float32)
def APlus2B(a, b):
c = a + b * 2 # Create some ops to have nodes in the body
print(c) # Using 'print' to make lint happy
with ops.Graph().as_default():
# Call function. There should be no exceptions.
APlus2B([1.0], [2.0])
def testDefineFunction2ArgsOutputName(self):
@function.Defun(
dtypes.float32,
dtypes.float32,
func_name="APlus2B",
out_names=["my_result_name"])
def APlus2B(a, b):
return a + b * 2
# APlus2B is stateless.
self.assertEqual([], APlus2B.stateful_ops)
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], self.evaluate(call))
def testDefineFunctionDuplicateOutputs(self):
@function.Defun(dtypes.float32, func_name="Duplicate")
def Duplicate(a):
b = a + 1.0
return b, b
g = ops.Graph()
with g.as_default():
Duplicate([3.0])
func_sig = g.as_graph_def().library.function[0].signature
# The names given to both outputs should be different
# even though the same tensor is emitted to both.
out_names = [a.name for a in func_sig.output_arg]
self.assertEqual(2, len(out_names))
self.assertNotEqual(out_names[0], out_names[1])
def testGradientFunc(self):
@function.Defun(dtypes.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(dtypes.float32, dtypes.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops.symbolic_gradient(
input=[x, dy], Tout=[dtypes.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = ops.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with session.Session() as sess:
self.assertAllClose([5.0], self.evaluate(call_f))
self.assertAllClose([0.4], self.evaluate(call_g))
def testTanhSymGrad(self):
@function.Defun(dtypes.float32)
def Forward(x):
return math_ops.reduce_sum(math_ops.tanh(x))
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y = Forward(x)
dx = gradients_impl.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True)))
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
def testCustomGradient(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def XentLossGrad(logits, labels, dloss):
dlogits = array_ops.reshape(dloss, [-1, 1]) * (
nn_ops.softmax(logits) - labels)
dlabels = array_ops.zeros_like(labels)
# Takes exp(dlogits) to differentiate it from the "correct" gradient.
return math_ops.exp(dlogits), dlabels
@function.Defun(dtype, dtype, grad_func=XentLossGrad)
def XentLoss(logits, labels):
return math_ops.reduce_sum(labels * math_ops.log(nn_ops.softmax(logits)),
1)
g = ops.Graph()
with g.as_default():
logits = array_ops.placeholder(dtype)
labels = array_ops.placeholder(dtype)
loss = XentLoss(logits, labels)
dlogits = gradients_impl.gradients([loss], [logits])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dlogits, {logits: x, labels: y})
self.assertAllClose(out, np.exp(prob - y))
@test_util.disable_xla("This test never passed for XLA")
def testCustomGradientError(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def Grad(x, dy, dz):
# Should have returned 1 result.
return x, dy + dz
@function.Defun(dtype, grad_func=Grad)
def Forward(x):
return x, x
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(dtype)
out = math_ops.add_n(Forward(inp))
dinp = gradients_impl.gradients(out, [inp])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"SymGrad expects to return 1.*but get 2.*instead"):
_ = sess.run(dinp, {inp: x})
def testSymGradShape(self):
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, [25, 4])
y = array_ops.placeholder(dtypes.float32, [200, 100])
dz = array_ops.placeholder(dtypes.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops.symbolic_gradient(
input=[x, y, dz], Tout=[dtypes.float32] * 2, f="Foo")
self.assertEqual(x.get_shape(), dx.get_shape())
self.assertEqual(y.get_shape(), dy.get_shape())
@test_util.run_deprecated_v1
def testSymGradAttr(self):
@function.Defun(noinline=True)
def Foo(x):
return x * 2
self.assertTrue(
Foo.instantiate([dtypes.float32]).definition.attr["_noinline"].b)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(3.0)
y = Foo(x)
dx, = gradients_impl.gradients(y, [x])
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
with self.session(graph=g, config=cfg):
self.assertAllClose(y.eval(), 6.)
self.assertAllClose(dx.eval(), 2.)
def _testZNoDepOnY(self, use_const_grad_ys):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y): # pylint: disable=unused-argument
return x * 2
with ops.Graph().as_default():
# z = Foo(x, y). z doe
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
z = Foo(x, y)
if use_const_grad_ys:
dx, dy = gradients_impl.gradients([z], [x, y], grad_ys=[1.0])
else:
dx, dy = gradients_impl.gradients([z], [x, y])
with session.Session() as sess:
dx_val, dy_val = self.evaluate([dx, dy])
self.assertEqual([2.0], dx_val)
self.assertEqual([0.0], dy_val)
def testZNoDepOnY(self):
self._testZNoDepOnY(False)
def testZNoDepOnYConstGradYs(self):
# Tests for constant folding of grad_ys
self._testZNoDepOnY(True)
def testDefineFunctionNoArgs(self):
@function.Defun(func_name="AConstant")
def AConstant():
return constant_op.constant([42])
with ops.Graph().as_default():
call = AConstant()
self.assertEqual("AConstant", call.op.name)
with session.Session() as sess:
self.assertAllEqual([42], self.evaluate(call))
def testDefineFunctionNames(self):
@function.Defun(dtypes.float32, func_name="Foo")
def Foo(a):
return a + 1
with ops.Graph().as_default():
call1 = Foo([1.0])
self.assertEqual("Foo", call1.op.name)
call2 = Foo([1.0])
self.assertEqual("Foo_1", call2.op.name)
# pylint: disable=unexpected-keyword-arg
call3 = Foo([1.0], name="mine")
self.assertEqual("mine", call3.op.name)
with ops.name_scope("my"):
call4 = Foo([1.0], name="precious")
self.assertEqual("my/precious", call4.op.name)
def testNoOp(self):
@function.Defun(dtypes.float32)
def Foo(x):
y = logging_ops.Print(x, [], "Hello")
with ops.control_dependencies([y]):
z = control_flow_ops.no_op()
with ops.control_dependencies([z]):
return x * 2
with ops.Graph().as_default(), self.cached_session():
z = Foo(constant_op.constant(3.0))
self.assertAllEqual(z.eval(), 6.0)
def testAssertOp(self):
@function.Defun(dtypes.float32)
def Foo(x):
check = gen_logging_ops._assert(math_ops.greater(x, 0), [x])
with ops.control_dependencies([check]):
return x * 2
# Foo contains a stateful op (Assert).
self.assertEqual([("Assert", "Assert")], Foo.stateful_ops)
g = ops.Graph()
with g.as_default(), self.cached_session():
self.assertAllEqual(Foo(constant_op.constant(3.0)).eval(), 6.0)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion failed.*-3"):
self.assertAllEqual(Foo(constant_op.constant(-3.0)).eval(), 6.0)
@test_util.run_deprecated_v1
def testAssertWrapper(self):
@function.Defun(dtypes.float32)
def MyFn(x):
with ops.control_dependencies(
[control_flow_ops.Assert(math_ops.less_equal(x, 10.0), [x])]):
return array_ops.identity(x)
with self.cached_session():
self.assertEqual(1.0, MyFn(1.0).eval())
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
_ = MyFn(100.0).eval()
@test_util.run_deprecated_v1
def testWhileLoopCallsFunc(self):
with self.session(use_gpu=True) as sess:
@function.Defun(dtypes.float32)
def Times2(x):
constant_two = constant_op.constant(2, dtypes.int32)
two_on_gpu = math_ops.cast(constant_two, dtypes.float32)
return x * two_on_gpu
def Body(x):
x2 = Times2(x)
x2.set_shape([])
return x2
loop = control_flow_ops.while_loop(lambda x: x < 1e5, Body, [1.0])
ans = self.evaluate(loop)
self.assertAllClose(ans, 131072.)
@test_util.run_deprecated_v1
def testControlFlowStrictness(self):
"""Inlined functions must not execute in a untaken control flow branch."""
@function.Defun(dtypes.int32)
def AssertFail(x):
# Assertion that always fails and does not have a data dependency on `x`.
assert_false = control_flow_ops.Assert(False, [42])
with ops.control_dependencies([assert_false]):
return array_ops.identity(x)
with ops.device("CPU"):
pred = array_ops.placeholder(dtypes.bool)
x = array_ops.placeholder(dtypes.int32)
cond = control_flow_ops.cond(pred, lambda: x + 1, lambda: AssertFail(x))
# pylint: disable=unnecessary-lambda
loop = control_flow_ops.while_loop(lambda y: pred,
lambda y: AssertFail(y), [x])
# pylint: enable=unnecessary-lambda
rewriter_config = rewriter_config_pb2.RewriterConfig(
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
# Enables inlining.
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True),
rewrite_options=rewriter_config))
with session.Session(config=config) as sess:
# Since the 'False' branch is not taken, the assertion should not fire.
self.assertEqual(4, sess.run(cond, {pred: True, x: 3}))
# The assertion should still fire if the False branch is taken.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
sess.run(cond, {pred: False, x: 3})
# Similarly for loops.
self.assertEqual(3, sess.run(loop, {pred: False, x: 3}))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
sess.run(loop, {pred: True, x: 3})
@test_util.run_deprecated_v1
def testVar(self):
@function.Defun(dtypes.float32)
def Foo(x):
return x * x + 1
g = ops.Graph()
with g.as_default():
v = variables.Variable(constant_op.constant(10.0))
z = Foo(v)
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(z.eval(), 101.)
@test_util.run_deprecated_v1
def testResourceVarAsImplicitInput(self):
g = ops.Graph()
with g.as_default(), ops.device("cpu:0"):
expected_type = dtypes.float32
expected_shape = tensor_shape.TensorShape((4, 4))
v = variable_scope.get_variable(
"var", expected_shape, expected_type, use_resource=True)
@function.Defun()
def Foo():
captured = array_ops.identity(v)
self.assertEqual(expected_type, captured.dtype)
self.assertEqual(expected_shape, captured.shape)
return captured, array_ops.shape(captured)
expected_val = v.value()
actual_val, actual_shape = Foo()
with self.session(graph=g):
v.initializer.run()
self.assertAllEqual(expected_val.eval(), self.evaluate(actual_val))
self.assertAllEqual(expected_shape, self.evaluate(actual_shape))
def testDefineErrors(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "can not return None"):
@function.Defun()
def TwoNone():
return None, None
_ = TwoNone.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def DefaultArg(unused_a=12):
return constant_op.constant([1])
_ = DefaultArg.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def KwArgs(**unused_kwargs):
return constant_op.constant([1])
_ = KwArgs.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(dtypes.float32)
def PlusMinusV2(a, b):
return a + b, b - a
_ = PlusMinusV2.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(dtypes.float32, dtypes.float32, dtypes.float32)
def PlusMinusV3(a, b):
return a + b, b - a
_ = PlusMinusV3.definition
def testCallErrors(self):
@function.Defun()
def Const():
return constant_op.constant(1)
@function.Defun(dtypes.int32)
def PlusOne(a):
return a + 1
@function.Defun(dtypes.int32, dtypes.int32)
def PlusMinus(a, b):
return a + b, b - a
with ops.Graph().as_default():
_ = Const()
# pylint: disable=too-many-function-args
# pylint: disable=unexpected-keyword-arg
# pylint: disable=no-value-for-parameter
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1)
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne()
_ = PlusOne(1)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus()
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus(1)
_ = PlusMinus(1, 2)
_ = PlusOne(1, name="p1")
with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"):
_ = PlusOne(1, device="/device:GPU:0")
def testFunctionDecorator(self):
@function.Defun(dtypes.float32, func_name="Minus1")
def Minus1(b):
return b - 1.0
with ops.Graph().as_default():
call1 = Minus1([2.])
self.assertTrue(isinstance(Minus1, function._DefinedFunction))
self.assertEqual(Minus1.name, "Minus1")
# pylint: disable=unexpected-keyword-arg
call2 = Minus1(call1, name="next")
# pylint: enable=unexpected-keyword-arg
self.assertEqual("next", call2.op.name)
with session.Session() as sess:
self.assertAllEqual([1], self.evaluate(call1))
self.assertAllEqual([0], self.evaluate(call2))
def testNestedFunction(self):
@function.Defun(dtypes.float32)
def Cube(x):
return x * x * x
@function.Defun(dtypes.float32, dtypes.float32)
def CubeXPlusY(x, y):
return Cube(x) + y
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.cached_session():
self.assertAllEqual(z.eval(), 25.0)
def testNestedDefinedFunction(self):
@function.Defun(dtypes.float32, dtypes.float32)
def CubeXPlusY(x, y):
@function.Defun(dtypes.float32)
def Cube(x):
return x * x * x
return Cube(x) + y
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.cached_session():
self.assertAllEqual(z.eval(), 25.0)
def testUnusedFunction(self):
invoked = False
# pylint: disable=unused-variable
@function.Defun()
def Unused():
invoked = True
return constant_op.constant(42.)
self.assertFalse(invoked)
g = ops.Graph()
with g.as_default():
@function.Defun()
def Unused2():
invoked = True
return constant_op.constant(7.)
constant_op.constant(3.)
# pylint: enable=unused-variable
self.assertFalse(invoked)
gdef = g.as_graph_def()
self.assertEqual(0, len(gdef.library.function))
@test_util.run_deprecated_v1
def testReduction(self):
g = ops.Graph()
# BN0 is computing batch normed matrix along rows.
def BN0(x):
mean = math_ops.reduce_mean(x, [0])
var = math_ops.reduce_mean(math_ops.square(x - mean)) # biased var
rstd = math_ops.rsqrt(var + 1e-8)
return (x - mean) * rstd
# Wraps BatchNorm in a tf function.
@function.Defun(dtypes.float32)
def BN1(x):
return BN0(x)
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y0 = BN0(x) # A plain graph
y1 = BN1(x) # A tf function
dx0, = gradients_impl.gradients([y0], [x])
dx1, = gradients_impl.gradients([y1], [x])
# Both should produce the same result and gradient.
with self.session(graph=g) as sess:
vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))})
self.assertAllClose(vals[0], vals[1])
self.assertAllClose(vals[2], vals[3])
@test_util.run_deprecated_v1
def testCapture(self):
g = ops.Graph()
with g.as_default():
w = variables.Variable(constant_op.constant([[1.0]]))
b = variables.Variable(constant_op.constant([2.0]))
# Foo() captures w and b.
@function.Defun(dtypes.float32)
def Foo(x):
# Plus() captures b.
@function.Defun(dtypes.float32)
def Plus(y):
return y + b
return Plus(math_ops.matmul(w, x))
y = Foo(constant_op.constant([[10.]]))
@function.Defun()
def Bar():
return w
z = Bar()
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(y.eval(), [[12.0]])
self.assertAllEqual(z.eval(), [[1.0]])
def testCaptureControls(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([10.0])
x = logging_ops.Print(x, [x], "outer")
@function.Defun(dtypes.float32)
def Foo(y):
with ops.control_dependencies([x]):
y = logging_ops.Print(y, [y], "inner")
return y
with self.assertRaisesRegexp(ValueError, "not an element of this graph."):
# NOTE: We still do not support capturing control deps.
_ = Foo(x)
@test_util.run_deprecated_v1
def testCaptureInWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
@function.Defun()
def Foo():
return control_flow_ops.while_loop(lambda i: i < 10, lambda i: i + x,
[0])
y = Foo()
with self.session(graph=g) as sess:
self.assertEqual(self.evaluate(y), 10)
@test_util.run_deprecated_v1
def testCaptureInCond(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
@function.Defun(dtypes.bool)
def Foo(pred):
return control_flow_ops.cond(pred, lambda: x, lambda: x + 1)
y = Foo(True)
z = Foo(False)
with self.session(graph=g) as sess:
self.assertEqual(self.evaluate(y), 1)
self.assertEqual(self.evaluate(z), 2)
def testStableName(self):
@function.Defun()
def Foo(x, y, z):
return math_ops.tanh(math_ops.matmul(x, y) + z)
if sys.byteorder == "big":
self.assertEqual("Foo_kEdkAG8SJvg",
Foo.instantiate([dtypes.float32] * 3).name)
else:
self.assertEqual("Foo_aCYSbwBkR5A",
Foo.instantiate([dtypes.float32] * 3).name)
@test_util.run_deprecated_v1
def testSignatureHash(self):
# Foo.Inner and Bar.Inner have identical function body but have
# different signatures. They should be treated as two different functions.
@function.Defun()
def Foo(x):
@function.Defun()
def Inner(x):
return x + 10.
return Inner(x)
@function.Defun()
def Bar(x):
@function.Defun()
def Inner(x, unused_y, unused_z):
return x + 10.
return Inner(x, 2., 3.)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(10.0)
y = Foo(x)
z = Bar(x)
with self.session(graph=g) as sess:
v0, v1 = self.evaluate([y, z])
self.assertAllEqual(v0, 20.)
self.assertAllEqual(v1, 20.)
def testShapeFunction(self):
@function.Defun(
dtypes.float32, shape_func=lambda op: [op.inputs[0].get_shape()])
def Foo(x):
return x + 1.0
@function.Defun(
shape_func=lambda op: [[1] + op.inputs[0].get_shape().as_list()])
def Bar(x):
return array_ops.stack([x])
g = ops.Graph()
with g.as_default():
x = Foo([1.0, 2.0])
self.assertEqual(x.get_shape().as_list(), [2])
y = Bar(array_ops.zeros([1, 2, 3]))
self.assertAllEqual(y.get_shape().as_list(), [1, 1, 2, 3])
@test_util.run_deprecated_v1
def testVariableReuse(self):
def LinearWithReuse(input_tensor, reuse=None):
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=reuse):
w = variable_scope.get_variable(
"w", shape=[size, size], dtype=input_tensor.dtype)
return math_ops.matmul(input_tensor, w)
@function.Defun(dtypes.float32)
def Foo(inputs):
inputs = array_ops.reshape(inputs, [32, 100])
hidden = LinearWithReuse(inputs)
return LinearWithReuse(hidden, reuse=True)
input_op = array_ops.placeholder(shape=[32, 100], dtype=dtypes.float32)
output_op = Foo(input_op)
global_vars = variables.global_variables()
self.assertEqual(len(global_vars), 1)
self.assertEqual(global_vars[0].name, "linear/w:0")
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
output_val = sess.run(
output_op, feed_dict={input_op: np.random.rand(32, 100)})
self.assertEqual(output_val.shape, (32, 100))
@test_util.run_deprecated_v1
def testFunctionCallInDifferentVariableScopes(self):
@function.Defun(dtypes.float32)
def Foo(inputs):
var = variable_scope.get_variable(
"var",
shape=[10],
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
return inputs + var
input_op = array_ops.placeholder(shape=[10], dtype=dtypes.float32)
with variable_scope.variable_scope("vs1"):
out1_op = Foo(input_op)
with variable_scope.variable_scope("vs2"):
out2_op = Foo(input_op)
global_vars = variables.global_variables()
self.assertEqual(len(global_vars), 1)
self.assertEqual(global_vars[0].name, "vs1/var:0")
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
out1, out2 = sess.run(
[out1_op, out2_op], feed_dict={input_op: np.linspace(1, 10, 10)})
self.assertAllEqual(out1, np.linspace(2, 11, 10))
self.assertAllEqual(out2, np.linspace(2, 11, 10))
def testTwoInputsSameOp(self):
g = ops.Graph()
with g.as_default():
m = array_ops.placeholder(dtypes.float32)
s, u, v = linalg_ops.svd(m)
ss = math_ops.reduce_sum(s)
uu = math_ops.reduce_sum(u)
vv = math_ops.reduce_sum(v)
result = ss + uu + vv
f = graph_to_function_def.graph_to_function_def(
g,
g.get_operations()[1:], # skip the placeholder
[s, u, v],
[result])
self.assertEqual(len(f.signature.input_arg), 3)
def testGradientWithIntegerFunctionArgument(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(t, x):
return x[t]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(dtypes.float32)
t = constant_op.constant(0, dtypes.int32)
out = Foo(t, inp)
dinp, = gradients_impl.gradients(out, [inp])
x = np.zeros((2,)).astype(np.float32)
with session.Session(graph=g) as sess:
self.assertAllClose(
np.array([1.0, 0.0]).astype(np.float32), sess.run(dinp, {inp: x}))
@test_util.run_deprecated_v1
def testFunctionMarkedStateful(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(t, x):
return x[t]
@function.Defun(dtypes.int64)
def Bar(x):
return x
# NOTE(mrry): All functions are currently considered stateless by the
# runtime, so we simulate a "stateful" function.
# TODO(b/70565970): Remove this hack when we are able to build stateful
# functions using the API.
# pylint: disable=protected-access
Foo._signature.is_stateful = True
Bar._signature.is_stateful = True
# pylint: enable=protected-access
result_1 = Foo(3, [1.0, 2.0, 3.0, 4.0])
result_2 = Bar(constant_op.constant(100, dtype=dtypes.int64))
with session.Session() as sess:
self.assertEqual(4.0, self.evaluate(result_1))
self.assertEqual(100, self.evaluate(result_2))
self.assertEqual((4.0, 100), sess.run((result_1, result_2)))
@test_util.run_deprecated_v1
def testStatefulFunction(self):
@function.Defun()
def FunctionWithStatelessOp():
return constant_op.constant(42.0)
@function.Defun()
def FunctionWithStatefulOp():
return random_ops.random_uniform([100], maxval=10, dtype=dtypes.int32)
@function.Defun()
def FunctionWithStatelessFunctionCall():
return FunctionWithStatelessOp()
@function.Defun()
def FunctionWithStatefulFunctionCall():
return FunctionWithStatefulOp()
# Test that the `is_stateful` bit is propagated.
self.assertFalse(FunctionWithStatelessOp.definition.signature.is_stateful)
self.assertTrue(FunctionWithStatefulOp.definition.signature.is_stateful)
self.assertFalse(
FunctionWithStatelessFunctionCall.definition.signature.is_stateful)
self.assertTrue(
FunctionWithStatefulFunctionCall.definition.signature.is_stateful)
# Ensure that two invocations of the same random-number-generating
# function produce different results.
result1 = FunctionWithStatefulFunctionCall()
result2 = FunctionWithStatefulFunctionCall()
# Statefulness affects how the function is treated by the various
# optimization passes, so run the test in each optimizer
# configuration.
for config in _OptimizerOptions():
with session.Session(config=config) as sess:
val1, val2 = sess.run((result1, result2))
self.assertFalse(all(val1 == val2))
val3, val4 = sess.run((result1, result2))
self.assertFalse(all(val3 == val1))
self.assertFalse(all(val4 == val2))
def testStatefulFunctionWithWhitelisting(self):
t = random_ops.random_uniform([100], maxval=10, dtype=dtypes.int32)
@function.Defun(capture_by_value=True)
def StatefulFn():
return t + constant_op.constant(3, dtype=dtypes.int32)
# First time we try to capture a stateful RandomUniform op.
with self.assertRaisesRegexp(ValueError, "Cannot capture a stateful node"):
res = StatefulFn()
# This time we whitelist this op, so that its recreated.
@function.Defun(capture_by_value=True, whitelisted_stateful_ops=set([t.op]))
def StatefulFn2():
return t + constant_op.constant(3, dtype=dtypes.int32)
res = StatefulFn2()
with session.Session() as sess:
r = sess.run(res)
for i in r:
self.assertGreaterEqual(i, 3)
@test_util.run_deprecated_v1
def testSameFunctionOnTwoDevices(self):
@function.Defun(dtypes.float32)
def AddOne(x):
return x + 1.0
with ops.device("/cpu:0"):
f_0 = AddOne(41.0)
with ops.device("/cpu:1"):
f_1 = AddOne(43.0)
for config in _OptimizerOptions():
config.device_count["CPU"] = 2
with session.Session(config=config) as sess:
self.assertEqual(42.0, self.evaluate(f_0))
self.assertEqual(44.0, self.evaluate(f_1))
self.assertEqual((42.0, 44.0), sess.run((f_0, f_1)))
@test_util.run_deprecated_v1
def testGuaranteedConstsAreCaptured(self):
var = variables.Variable(1.0)
const = array_ops.guarantee_const(var)
also_const = array_ops.identity(const)
still_const = array_ops.identity(also_const)
not_const = still_const + var
also_not_const = array_ops.placeholder(dtypes.float32)
@function.Defun()
def CapturesGuaranteedConst():
output = const + also_const + still_const + not_const + also_not_const
first, second, third, fourth, fifth = function.get_extra_args()
self.assertEqual("GuaranteeConst", first.consumers()[0].node_def.op)
self.assertEqual("GuaranteeConst", second.consumers()[0].node_def.op)
self.assertEqual("GuaranteeConst", third.consumers()[0].node_def.op)
self.assertNotEqual("GuaranteeConst", fourth.consumers()[0].node_def.op)
self.assertNotEqual("GuaranteeConst", fifth.consumers()[0].node_def.op)
return output
with self.session(use_gpu=False) as sess:
self.evaluate(var.initializer)
_ = sess.run(CapturesGuaranteedConst(), {also_not_const: 1.0})
@test_util.run_deprecated_v1
def testSameFunctionDifferentGrads(self):
def PartOne(x):
# Default grad is dx = dy * 2
@function.Defun(dtypes.float32)
def Foo(x):
return x * 2
return Foo(x)
def PartTwo(x):
@function.Defun(dtypes.float32, dtypes.float32)
def Bar(x, dy):
return x + dy # crazy backprop
@function.Defun(dtypes.float32, grad_func=Bar)
def Foo(x):
return x * 2
return Foo(x)
def PartThree(x):
def Bar(op, dy):
return op.inputs[0] * dy / 2 # crazy backprop
@function.Defun(dtypes.float32, python_grad_func=Bar)
def Foo(x):
return x * 2
return Foo(x)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(100.)
x0 = x
y0 = PartOne(x0)
dx0, = gradients_impl.gradients(ys=[y0], xs=[x0])
x1 = x
y1 = PartTwo(x1)
dx1, = gradients_impl.gradients(ys=[y1], xs=[x1])
x2 = x
y2 = PartThree(x2)
dx2, = gradients_impl.gradients(ys=[y2], xs=[x2])
with self.session(graph=g) as sess:
v0, v1, v2 = self.evaluate([dx0, dx1, dx2])
self.assertAllEqual(v0, 2.)
self.assertAllEqual(v1, 101.)
self.assertAllEqual(v2, 50.)
class FunctionsFromProtos(test.TestCase):
def expectFunctionsEqual(self, func, grad_func=None, new_func=None):
if new_func is None:
# Make a copy of func.definition to avoid any bugs masked by using the
# same object
serialized_fdef = func.definition.SerializeToString()
# Serialize and then deserialize `func` to create `new_func`
fdef = function_pb2.FunctionDef.FromString(serialized_fdef)
new_func = function._from_definition(fdef, grad_func=grad_func)
self.assertEqual(func.name, new_func.name)
self.assertEqual(func.definition, new_func.definition)
self.assertEqual(func.grad_func_name, new_func.grad_func_name)
self.assertEqual(func.declared_input_types, new_func.declared_input_types)
self.assertEqual(func.captured_inputs, new_func.captured_inputs)
@test_util.run_deprecated_v1
def testBasic(self):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y):
return x + y
self.expectFunctionsEqual(Foo)
def testGradFunc(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G(x, dy):
return x * dy
@function.Defun(dtypes.float32, grad_func=G)
def F(x):
return math_ops.exp(x) - math_ops.exp(-x)
self.expectFunctionsEqual(F, grad_func=G)
def testCapturedInputs(self):
c = constant_op.constant(10, dtypes.int64)
@function.Defun(dtypes.int64)
def Foo(x):
return x + c
new_func = function._from_definition(Foo.definition)
self.assertEqual(Foo.name, new_func.name)
self.assertEqual(Foo.definition, new_func.definition)
self.assertEqual(Foo.grad_func_name, new_func.grad_func_name)
# Captured inputs are added as regular inputs to the function definition
self.assertEqual(new_func.declared_input_types,
Foo.declared_input_types + (dtypes.int64,))
self.assertEqual(len(new_func.captured_inputs), 0)
def testNestedFunctions(self):
@function.Defun(dtypes.float32)
def Outer(x):
@function.Defun(dtypes.float32)
def Inner(y):
return y + 1
return Inner(Inner(x))
self.expectFunctionsEqual(Outer)
def testFromLibrary(self):
# Define some functions with different gradient functions. Note that many of
# the below functions are identical since function bodies don't matter for
# this test.
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32, dtypes.float32)
def G2(x, dy):
return x * dy
# F1 and F2 have the same gradient function
@function.Defun(dtypes.float32, grad_func=G1)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32, grad_func=G1)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F3 has a different gradient function
@function.Defun(dtypes.float32, grad_func=G2)
def F3(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F4 has no gradient function
@function.Defun(dtypes.float32)
def F4(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Instantiate all functions
g = ops.Graph()
with g.as_default():
c = constant_op.constant(1.0, dtypes.float32)
f1 = F1(c)
f2 = F2(c)
f3 = F3(c)
f4 = F4(c)
gradients_impl.gradients([f1, f2, f3, f4], c)
library = g.as_graph_def().library
new_funcs = function.from_library(library)
def CheckNewFunc(func):
new_func = [f for f in new_funcs if f.name == func.name]
self.assertEqual(len(new_func), 1)
self.expectFunctionsEqual(func, new_func=new_func[0])
CheckNewFunc(G1)
CheckNewFunc(G2)
CheckNewFunc(F1)
CheckNewFunc(F2)
CheckNewFunc(F3)
CheckNewFunc(F4)
def testFromLibraryEmptyLib(self):
library = function_pb2.FunctionDefLibrary()
self.assertEqual(len(function.from_library(library)), 0)
def testFromLibraryMissingFuncDef(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
gradient = function_pb2.GradientDef()
gradient.function_name = F1.name
gradient.gradient_func = G1.name
# Create invalid function def that is missing G1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([F1.definition])
with self.assertRaisesRegexp(
ValueError,
"FunctionDefLibrary missing 'G1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function.from_library(library)
# Create invalid function def that is missing F1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([G1.definition])
with self.assertRaisesRegexp(
ValueError,
"FunctionDefLibrary missing 'F1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function.from_library(library)
def testFromLibraryCyclicGradFuncs(self):
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Create invalid function def library where F1 has gradient function F2 and
# F2 has gradient function F1
library = function_pb2.FunctionDefLibrary()
library.function.extend([F1.definition, F2.definition])
gradient1 = function_pb2.GradientDef()
gradient1.function_name = F1.name
gradient1.gradient_func = F2.name
gradient2 = function_pb2.GradientDef()
gradient2.function_name = F2.name
gradient2.gradient_func = F1.name
library.gradient.extend([gradient1, gradient2])
with self.assertRaisesRegexp(
ValueError, "FunctionDefLibrary contains cyclic gradient functions!"):
function.from_library(library)
def testExperimentalAttrs(self):
@function.Defun(dtypes.int32, experimental_tag="tag_value")
def FunctionWithStrAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=123)
def FunctionWithIntAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=123.0)
def FunctionWithFloatAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=True)
def FunctionWithBoolAttr(i):
return array_ops.identity(i)
self.assertTrue("experimental_tag" in FunctionWithStrAttr.definition.attr)
self.assertEqual(FunctionWithStrAttr.definition.attr["experimental_tag"].s,
b"tag_value")
self.assertTrue("experimental_tag" in FunctionWithIntAttr.definition.attr)
self.assertEqual(FunctionWithIntAttr.definition.attr["experimental_tag"].i,
123)
self.assertTrue("experimental_tag" in FunctionWithFloatAttr.definition.attr)
self.assertEqual(
FunctionWithFloatAttr.definition.attr["experimental_tag"].f, 123.0)
self.assertTrue("experimental_tag" in FunctionWithBoolAttr.definition.attr)
self.assertEqual(FunctionWithBoolAttr.definition.attr["experimental_tag"].b,
True)
class FunctionOverloadTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
@function.Defun()
def Sinh(x):
return 1 / 2. * (math_ops.exp(x) - math_ops.exp(-x))
g = ops.Graph()
with g.as_default():
x = Sinh(constant_op.constant(0.25, dtypes.float32))
y = Sinh(constant_op.constant(0.25, dtypes.float64))
with self.session(graph=g):
self.assertAllClose(x.eval(), np.sinh(0.25))
self.assertAllClose(y.eval(), np.sinh(0.25))
def testGradient(self):
@function.Defun(func_name="Spec")
def G(x, dy):
return x * dy
@function.Defun(grad_func=G)
def F(x):
return math_ops.exp(x) - math_ops.exp(-x)
for dtype in [dtypes.float32, dtypes.float64]:
g = ops.Graph()
with g.as_default():
x = constant_op.constant(0.25, dtype)
y = F(x)
dx, = gradients_impl.gradients(y, x)
with self.session(graph=g):
self.assertAllClose(dx.eval(), 0.25)
def testDocString(self):
@function.Defun()
def Foo(x):
"""Successor of x."""
return x + 1
g = ops.Graph()
with g.as_default():
_ = Foo(1)
self.assertEqual(g.as_graph_def().library.function[0].signature.description,
"Successor of x.")
class FunctionCaptureByValueTest(test.TestCase):
@test_util.run_deprecated_v1
def testCaptureByValue(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant([[1.0]])
b = constant_op.constant([2.0])
# Foo() captures w and b.
@function.Defun(dtypes.float32, capture_by_value=True)
def Foo(x):
# Plus() captures b.
@function.Defun(dtypes.float32, capture_by_value=True)
def Plus(y):
return y + b
self.assertEqual(0, len(Plus.captured_inputs))
return Plus(math_ops.matmul(w, x))
y = Foo(constant_op.constant([[10.]]))
self.assertEqual(0, len(Foo.captured_inputs))
with self.session(graph=g):
self.assertAllEqual(y.eval(), [[12.0]])
class UnrollLSTMTest(test.TestCase):
BATCH_SIZE = 16
LSTM_DIMS = 32
NUM_UNROLL = 20
def _Weights(self):
dims = self.LSTM_DIMS
return random_ops.random_uniform([2 * dims, 4 * dims], -1, 1, seed=123456)
def _Input(self):
return random_ops.random_uniform(
[self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS], seed=654321)
# Helper to construct a LSTM cell graph.
@classmethod
def LSTMCell(cls, x, mprev, cprev, weights):
xm = array_ops.concat([x, mprev], 1)
i_i, i_g, f_g, o_g = array_ops.split(
value=math_ops.matmul(xm, weights), num_or_size_splits=4, axis=1)
new_c = math_ops.sigmoid(f_g) * cprev + math_ops.sigmoid(
i_g) * math_ops.tanh(i_i)
new_c = math_ops.maximum(math_ops.minimum(new_c, 50.0), -50.0)
new_m = math_ops.sigmoid(o_g) * math_ops.tanh(new_c)
return new_m, new_c
def _BuildForward(self, weights, inp, mode="cell"):
def Loop(cell, w, i):
x = array_ops.unstack(i, self.NUM_UNROLL)
m = array_ops.zeros_like(x[0])
c = array_ops.zeros_like(x[0])
for i in range(self.NUM_UNROLL):
m, c = cell(x[i], m, c, w)
return m
cell = UnrollLSTMTest.LSTMCell
if mode == "complete":
# Constructs the complete graph in python.
return Loop(cell, weights, inp)
cell = function.Defun(dtypes.float32, dtypes.float32, dtypes.float32,
dtypes.float32)(
cell)
if mode == "cell":
# Just represent the LSTM as a function.
return Loop(cell, weights, inp)
if mode == "loop":
# Wraps the whole loop as a function.
@function.Defun(dtypes.float32, dtypes.float32)
def LSTMLoop(w, i):
return Loop(cell, w, i)
return LSTMLoop(weights, inp)
if mode == "loop10":
# Wraps 10 lstm steps into one function, and the whole loop
# into another calling the formers.
# Groups 10 steps at a time.
@function.Defun(dtypes.float32, dtypes.float32, dtypes.float32,
*([dtypes.float32] * 10))
def Loop10(w, m, c, *args):
for x in args:
m, c = cell(x, m, c, w)
return m, c
@function.Defun(dtypes.float32, dtypes.float32)
def LSTMLoop10(weights, inp):
x = array_ops.unstack(inp, self.NUM_UNROLL)
m = array_ops.zeros_like(x[0])
c = array_ops.zeros_like(x[0])
assert self.NUM_UNROLL % 10 == 0
for i in range(0, self.NUM_UNROLL, 10):
m, c = Loop10(weights, m, c, *x[i:i + 10])
return m
return LSTMLoop10(weights, inp)
def testUnrollLSTM(self):
# Run one step of the unrolled lstm graph.
def RunForward(mode, cfg=None):
tf_logging.info("mode = %s", mode)
g = ops.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
gdef = g.as_graph_def()
finish = time.time()
tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start,
len(str(gdef)), len(gdef.SerializeToString()))
with g.as_default(), session.Session(config=cfg) as sess:
return self.evaluate(m)
mv0 = RunForward("complete")
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
mv1 = RunForward("cell", cfg)
mv2 = RunForward("loop", cfg)
mv3 = RunForward("loop10", cfg)
self.assertAllClose(mv0, mv1, rtol=1e-4)
self.assertAllClose(mv0, mv2, rtol=1e-4)
self.assertAllClose(mv0, mv3, rtol=1e-4)
def testUnrollLSTMGrad(self):
# Run one step of the unrolled lstm graph.
def RunForwardBackward(mode, cfg=None):
tf_logging.info("mode = %s", mode)
g = ops.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
loss = math_ops.reduce_sum(math_ops.square(m))
dw = gradients_impl.gradients([loss], [weights])
gdef = g.as_graph_def()
finish = time.time()
tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start,
len(str(gdef)), len(gdef.SerializeToString()))
with g.as_default(), session.Session(config=cfg) as sess:
return self.evaluate(dw)
d0 = RunForwardBackward("complete")
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
d1 = RunForwardBackward("cell", cfg)
d2 = RunForwardBackward("loop", cfg)
d3 = RunForwardBackward("loop10", cfg)
self.assertAllClose(d0, d1, rtol=1e-4, atol=1e-4)
self.assertAllClose(d0, d2, rtol=1e-4, atol=1e-4)
self.assertAllClose(d0, d3, rtol=1e-4, atol=1e-4)
class FunctionInlineControlTest(test.TestCase):
def testFoo(self):
dtype = dtypes.float32
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
cell_func_call_pattern = re.compile(r"Cell[^/]*\(")
for noinline in [False, True]:
@function.Defun(dtype, noinline=noinline)
def Cell(v):
# If v is a vector [n, 1], x is a big square matrix.
x = math_ops.tanh(v + array_ops.transpose(v, [1, 0]))
return math_ops.reduce_sum(x, 1, keepdims=True)
@function.Defun(dtype)
def Forward(x):
for _ in range(10):
# pylint: disable=cell-var-from-loop
x = Cell(x)
return math_ops.reduce_sum(x, [0, 1])
self.assertEqual(noinline, Cell.definition.attr["_noinline"].b)
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype)
y = Forward(x)
dx, = gradients_impl.gradients([y], [x])
np.random.seed(321)
inp = np.random.uniform(-1, 1, [16, 1]).astype(np.float32)
run_metadata = config_pb2.RunMetadata()
with session.Session(graph=g, config=cfg) as sess:
ans = sess.run(
[y, dx], {x: inp},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
print(ans[0], np.sum(ans[1]))
self.assertAllClose(ans[0], 255.971, rtol=1e-3)
self.assertAllClose(np.sum(ans[1]), 13.0408, rtol=1e-3)
def MetadataHasCell(run_metadata):
for dev_stats in run_metadata.step_stats.dev_stats:
for node_stats in dev_stats.node_stats:
if cell_func_call_pattern.search(node_stats.timeline_label):
return True
return False
self.assertEqual(MetadataHasCell(run_metadata), noinline)
class ModuleFunctionTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
@function.Defun(*[dtypes.float32] * 3)
def LinearWithCApi(w, b, x):
return nn_ops.relu(math_ops.matmul(x, w) + b)
@function.Defun(*[dtypes.float32] * 5)
def Linear2WithCApi(w1, b1, w2, b2, x):
return LinearWithCApi(w2, b2, LinearWithCApi(w1, b1, x))
with ops.Graph().as_default():
a, b, c, d, e = [
constant_op.constant([[_]], dtype=dtypes.float32) for _ in range(5)
]
y = LinearWithCApi(a, b, c)
z = Linear2WithCApi(a, b, c, d, e)
with session.Session() as sess:
self.assertAllEqual([[1]], self.evaluate(y))
self.assertAllEqual([[5]], self.evaluate(z))
class VariableHoistingTest(test.TestCase):
def _testSimpleModel(self, use_forward_func, use_resource=False):
def _Model(x):
w = variable_scope.get_variable(
"w", (64, 64),
initializer=init_ops.random_uniform_initializer(seed=312),
use_resource=use_resource)
b = variable_scope.get_variable(
"b", (64),
initializer=init_ops.zeros_initializer(),
use_resource=use_resource),
return math_ops.sigmoid(math_ops.matmul(x, w) + b)
@function.Defun()
def Model(x):
return _Model(x)
cvars = []
@function.Defun()
def Grad(x, y0):
if use_forward_func:
y = Model(x)
else:
y = _Model(x)
loss = math_ops.reduce_mean(
math_ops.reduce_sum(y0 * math_ops.log(y), 1), 0)
arg_w, arg_b = function.get_extra_args()
self.assertEqual(arg_w.get_shape(), tensor_shape.TensorShape([64, 64]))
self.assertEqual(arg_b.get_shape(), tensor_shape.TensorShape([64]))
dw, db = gradients_impl.gradients(loss, [arg_w, arg_b])
cvars.extend(function.get_extra_vars())
return loss, dw, db
g = ops.Graph()
with g.as_default():
x = random_ops.random_normal([64, 64], seed=100)
y0 = random_ops.random_normal([64, 64], seed=200)
with variable_scope.variable_scope("Foo"):
loss, dw, db = Grad(x, y0)
self.assertEqual(2, len(cvars))
w, b = cvars[:2]
self.assertEqual("Foo/w", w.op.name)
self.assertEqual("Foo/b", b.op.name)
with self.session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
w, b, x, y0, loss, dw, db = self.evaluate([w, b, x, y0, loss, dw, db])
self.assertAllEqual(w.shape, (64, 64))
self.assertAllClose(np.sum(w), 2050.44)
self.assertAllEqual(b.shape, (64,))
self.assertAllClose(np.sum(b), 0.0)
self.assertAllClose(loss, -2.27, rtol=1e-2)
self.assertAllEqual(dw.shape, (64, 64))
self.assertAllClose(np.sum(dw), -1.04, rtol=1e-2)
self.assertAllEqual(db.shape, (64,))
self.assertAllClose(np.sum(db), 0.509, rtol=1e-2)
@test_util.run_deprecated_v1
def testBasic(self):
self._testSimpleModel(True)
self._testSimpleModel(False)
@test_util.run_deprecated_v1
def testBasicResource(self):
self._testSimpleModel(True, use_resource=True)
self._testSimpleModel(False, use_resource=True)
class DevicePlacementTest(test.TestCase):
def testNoDeviceGraph(self):
with ops.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Matmul(a, b):
return math_ops.matmul(a, b)
Matmul(1., 2.)
gdef = ops.get_default_graph().as_graph_def()
self.assertAllEqual(len(gdef.library.function), 1)
fdef = gdef.library.function[0]
for node in fdef.node_def:
self.assertAllEqual(node.device, "")
def testNestedDevices(self):
with ops.Graph().as_default(), ops.device("CPU:0"):
@function.Defun(*[dtypes.float32] * 2)
def Matmul(a, b):
return math_ops.matmul(a, b)
with ops.device("CPU:1"):
@function.Defun(*[dtypes.float32] * 2)
def Divide(a, b):
return math_ops.divide(a, b)
Divide(Matmul(1., 2.), 3.)
gdef = ops.get_default_graph().as_graph_def()
matmul_fdef = [
f for f in gdef.library.function if "Matmul" in f.signature.name
]
divide_fdef = [
f for f in gdef.library.function if "Divide" in f.signature.name
]
self.assertAllEqual(len(matmul_fdef), 1)
self.assertAllEqual(len(divide_fdef), 1)
for node in matmul_fdef[0].node_def:
self.assertAllEqual(node.device, "/device:CPU:0")
for node in divide_fdef[0].node_def:
self.assertAllEqual(node.device, "/device:CPU:1")
def _testNestedDeviceWithSameFunction(self, func_name):
def MatmulWrap(a, b):
@function.Defun(
func_name=func_name, *[dtypes.int32] * 2)
def Matmul(a, b):
return math_ops.matmul(a, b)
return Matmul(a, b)
with ops.Graph().as_default(), ops.device("CPU:0"):
c = MatmulWrap(1, 2)
with ops.device("CPU:1"):
MatmulWrap(c, 3)
gdef = ops.get_default_graph().as_graph_def()
devices = []
for node in gdef.library.function[0].node_def:
devices.append(node.device)
for node in gdef.library.function[1].node_def:
devices.append(node.device)
self.assertAllEqual(sorted(devices), ["/device:CPU:0", "/device:CPU:1"])
def testFunctionWithName(self):
with self.assertRaises(InvalidArgumentError) as cm:
self._testNestedDeviceWithSameFunction("MatmulTest")
self.assertEqual(
cm.exception.message,
"Cannot add function \'MatmulTest\' because a different "
"function with the same name already exists.")
def testFunctionWithoutName(self):
self._testNestedDeviceWithSameFunction(None)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2015, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Loop nodes.
There are for and loop nodes, but both are reduced to loops with break/continue
statements for it. These re-formulations require that optimization of loops has
to be very general, yet the node type for loop, becomes very simple.
"""
from nuitka.optimizations.TraceCollections import ConstraintCollectionBranch
from nuitka.tree.Extractions import getVariablesWritten
from .Checkers import checkStatementsSequenceOrNone
from .NodeBases import NodeBase, StatementChildrenHavingBase
class StatementLoop(StatementChildrenHavingBase):
kind = "STATEMENT_LOOP"
named_children = (
"body",
)
checkers = {
"body" : checkStatementsSequenceOrNone
}
def __init__(self, body, source_ref):
StatementChildrenHavingBase.__init__(
self,
values = {
"body" : body
},
source_ref = source_ref
)
self.loop_variables = None
getLoopBody = StatementChildrenHavingBase.childGetter("body")
setLoopBody = StatementChildrenHavingBase.childSetter("body")
def mayReturn(self):
loop_body = self.getLoopBody()
if loop_body is not None and loop_body.mayReturn():
return True
return False
def mayBreak(self):
# The loop itself may never break another loop.
return False
def mayContinue(self):
# The loop itself may never continue another loop.
return False
def isStatementAborting(self):
loop_body = self.getLoopBody()
if loop_body is None:
return True
else:
return not loop_body.mayBreak()
def computeLoopBody(self, constraint_collection):
abort_context = constraint_collection.makeAbortStackContext(
catch_breaks = True,
catch_continues = True,
catch_returns = False
)
with abort_context:
loop_body = self.getLoopBody()
if loop_body is not None:
# Look ahead. what will be written and degrade about that if we
# are in the first iteration, later we will have more precise
# knowledge.
if self.loop_variables is None:
self.loop_variables = getVariablesWritten(
loop_body
)
loop_entry_traces = set()
# Mark all variables as loop wrap around that are written in
# the loop and hit a 'continue'.
for variable in self.loop_variables:
loop_entry_traces.add(
constraint_collection.markActiveVariableAsLoopMerge(
variable = variable
)
)
result = loop_body.computeStatementsSequence(
constraint_collection = constraint_collection
)
# Might be changed.
if result is not loop_body:
self.setLoopBody(result)
loop_body = result
if loop_body is not None:
# Emulate terminal continue if not aborting.
if not loop_body.isStatementAborting():
constraint_collection.onLoopContinue()
continue_collections = constraint_collection.getLoopContinueCollections()
self.loop_variables = set()
for loop_entry_trace in loop_entry_traces:
variable = loop_entry_trace.getVariable()
loop_end_traces = set()
for continue_collection in continue_collections:
loop_end_trace = continue_collection.getVariableCurrentTrace(variable)
if loop_end_trace is not loop_entry_trace:
loop_end_traces.add(loop_end_trace)
if loop_end_traces:
loop_entry_trace.addLoopContinueTraces(loop_end_traces)
self.loop_variables.add(variable)
# If we break, the outer collections becomes a merge of all those breaks
# or just the one, if there is only one.
break_collections = constraint_collection.getLoopBreakCollections()
return loop_body, break_collections
def computeStatement(self, constraint_collection):
outer_constraint_collection = constraint_collection
constraint_collection = ConstraintCollectionBranch(
parent = constraint_collection,
name = "loop"
)
loop_body, break_collections = self.computeLoopBody(constraint_collection)
# Consider trailing "continue" statements, these have no effect, so we
# can remove them.
if loop_body is not None:
assert loop_body.isStatementsSequence()
statements = loop_body.getStatements()
assert statements # Cannot be empty
# If the last statement is a "continue" statement, it can simply
# be discarded.
last_statement = statements[-1]
if last_statement.isStatementContinueLoop():
if len(statements) == 1:
self.setLoopBody(None)
loop_body = None
else:
last_statement.replaceWith(None)
constraint_collection.signalChange(
"new_statements",
last_statement.getSourceReference(),
"""\
Removed useless terminal 'continue' as last statement of loop."""
)
if break_collections:
outer_constraint_collection.mergeMultipleBranches(break_collections)
# Consider leading "break" statements, they should be the only, and
# should lead to removing the whole loop statement. Trailing "break"
# statements could also be handled, but that would need to consider if
# there are other "break" statements too. Numbering loop exits is
# nothing we have yet.
if loop_body is not None:
assert loop_body.isStatementsSequence()
statements = loop_body.getStatements()
assert statements # Cannot be empty
if len(statements) == 1 and statements[-1].isStatementBreakLoop():
return None, "new_statements", """\
Removed useless loop with immediate 'break' statement."""
return self, None, None
class StatementContinueLoop(NodeBase):
kind = "STATEMENT_CONTINUE_LOOP"
def __init__(self, source_ref):
NodeBase.__init__(self, source_ref = source_ref)
def isStatementAborting(self):
return True
def mayRaiseException(self, exception_type):
return False
def mayContinue(self):
return True
def computeStatement(self, constraint_collection):
# This statement being aborting, will already tell everything.
constraint_collection.onLoopContinue()
return self, None, None
class StatementBreakLoop(NodeBase):
kind = "STATEMENT_BREAK_LOOP"
def __init__(self, source_ref):
NodeBase.__init__(self, source_ref = source_ref)
def isStatementAborting(self):
return True
def mayRaiseException(self, exception_type):
return False
def mayBreak(self):
return True
def computeStatement(self, constraint_collection):
# This statement being aborting, will already tell everything.
constraint_collection.onLoopBreak()
return self, None, None
|
|
# Temporary version of simplejson in a single file to get around older packaging
# restrictions in previous releases of Access Grid 2.
# ---------------------------------------------------------------------------------------
# simplejson/scanner.py
"""
Iterator based sre token scanner
"""
import sre_parse, sre_compile, sre_constants
from sre_constants import BRANCH, SUBPATTERN
from re import VERBOSE, MULTILINE, DOTALL
import re
__all__ = ['Scanner', 'pattern']
FLAGS = (VERBOSE | MULTILINE | DOTALL)
class Scanner(object):
def __init__(self, lexicon, flags=FLAGS):
self.actions = [None]
# combine phrases into a compound pattern
s = sre_parse.Pattern()
s.flags = flags
p = []
for idx, token in enumerate(lexicon):
phrase = token.pattern
try:
subpattern = sre_parse.SubPattern(s,
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
except sre_constants.error:
raise
p.append(subpattern)
self.actions.append(token)
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def iterscan(self, string, idx=0, context=None):
"""
Yield match, end_idx for each match
"""
match = self.scanner.scanner(string, idx).match
actions = self.actions
lastend = idx
end = len(string)
while True:
m = match()
if m is None:
break
matchbegin, matchend = m.span()
if lastend == matchend:
break
action = actions[m.lastindex]
if action is not None:
rval, next_pos = action(m, context)
if next_pos is not None and next_pos != matchend:
# "fast forward" the scanner
matchend = next_pos
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
fn.regex = re.compile(pattern, flags)
return fn
return decorator
# ---------------------------------------------------------------------------------------
# simplejson/encoder.py
"""
Implementation of JSONEncoder
"""
import re
#try:
# from simplejson import _speedups
#except ImportError:
# _speedups = None
_speedups = None
ESCAPE = re.compile(r'[\x00-\x19\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"/]|[^\ -~])')
ESCAPE_DCT = {
# escape all forward slashes to prevent </script> attack
'/': '\\/',
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return str(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def encode_basestring_ascii(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = _speedups.encode_basestring_ascii
_need_utf8 = True
except AttributeError:
_need_utf8 = False
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8'):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_need_utf8 and _encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_need_utf8 and _encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo":["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks...
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8' and _need_utf8)):
o = o.decode(_encoding)
return encode_basestring_ascii(o)
# This doesn't pass the iterator directly to ''.join() because it
# sucks at reporting exceptions. It's going to do this internally
# anyway because it uses PySequence_Fast or similar.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
# ---------------------------------------------------------------------------------------
# simplejson/decoder.py
"""
Implementation of JSONDecoder
"""
import re
# from simplejson.scanner import Scanner, pattern
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
import struct
import sys
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
'true': True,
'false': False,
'null': None,
}
def JSONConstant(match, context, c=_CONSTANTS):
return c[match.group(0)], None
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
def JSONNumber(match, context):
match = JSONNumber.regex.match(match.string, *match.span())
integer, frac, exp = match.groups()
if frac or exp:
res = float(integer + (frac or '') + (exp or ''))
else:
res = int(integer)
return res, None
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
STRINGCHUNK = re.compile(r'(.*?)(["\\])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def scanstring(s, end, encoding=None, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
try:
m = unichr(int(esc, 16))
if len(esc) != 4 or not esc.isalnum():
raise ValueError
except ValueError:
raise ValueError(errmsg("Invalid \\uXXXX escape", s, end))
end += 5
_append(m)
return u''.join(chunks), end
def JSONString(match, context):
encoding = getattr(context, 'encoding', None)
return scanstring(match.string, match.end(), encoding)
pattern(r'"')(JSONString)
WHITESPACE = re.compile(r'\s*', FLAGS)
def JSONObject(match, context, _w=WHITESPACE.match):
pairs = {}
s = match.string
end = _w(s, match.end()).end()
nextchar = s[end:end + 1]
# trivial empty object
if nextchar == '}':
return pairs, end + 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
encoding = getattr(context, 'encoding', None)
iterscan = JSONScanner.iterscan
while True:
key, end = scanstring(s, end, encoding)
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end = _w(s, end + 1).end()
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == '}':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
object_hook = getattr(context, 'object_hook', None)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
pattern(r'{')(JSONObject)
def JSONArray(match, context, _w=WHITESPACE.match):
values = []
s = match.string
end = _w(s, match.end()).end()
# look-ahead for trivial empty array
nextchar = s[end:end + 1]
if nextchar == ']':
return values, end + 1
iterscan = JSONScanner.iterscan
while True:
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
values.append(value)
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
end = _w(s, end).end()
return values, end
pattern(r'\[')(JSONArray)
ANYTHING = [
JSONObject,
JSONArray,
JSONString,
JSONConstant,
JSONNumber,
]
JSONScanner = Scanner(ANYTHING)
class JSONDecoder(object):
"""
Simple JSON <http://json.org> decoder
Performs the following translations in decoding:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
_scanner = Scanner(ANYTHING)
__all__ = ['__init__', 'decode', 'raw_decode']
def __init__(self, encoding=None, object_hook=None):
"""
``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
"""
self.encoding = encoding
self.object_hook = object_hook
def decode(self, s, _w=WHITESPACE.match):
"""
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, **kw):
"""
Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
kw.setdefault('context', self)
try:
obj, end = self._scanner.iterscan(s, **kw).next()
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
__all__ = ['JSONDecoder']
# ---------------------------------------------------------------------------------------
# simplejson/__init__.py
r"""
A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson
>>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> simplejson.loads('"\\"foo\\bar"')
u'"foo\x08ar'
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io)
[u'streaming API']
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
__version__ = '1.7.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
# from decoder import JSONDecoder
# from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8'
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if cls is None and encoding is None and object_hook is None and not kw:
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
return cls(encoding=encoding, **kw).decode(s)
def read(s):
"""
json-py API compatibility hook. Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of read(s)",
DeprecationWarning)
return loads(s)
def write(obj):
"""
json-py API compatibility hook. Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of write(s)",
DeprecationWarning)
return dumps(obj)
simplejson_dumps = dumps
simplejson_loads = loads
# ---------------------------------------------------------------------------------------
# simplejson/jsonfilter
import cgi
class JSONFilter(object):
def __init__(self, app, mime_type='text/x-json'):
self.app = app
self.mime_type = mime_type
def __call__(self, environ, start_response):
# Read JSON POST input to jsonfilter.json if matching mime type
response = {'status': '200 OK', 'headers': []}
def json_start_response(status, headers):
response['status'] = status
response['headers'].extend(headers)
environ['jsonfilter.mime_type'] = self.mime_type
if environ.get('REQUEST_METHOD', '') == 'POST':
if environ.get('CONTENT_TYPE', '') == self.mime_type:
args = [_ for _ in [environ.get('CONTENT_LENGTH')] if _]
data = environ['wsgi.input'].read(*map(int, args))
environ['jsonfilter.json'] = simplejson.loads(data)
res = simplejson_dumps(self.app(environ, json_start_response))
jsonp = cgi.parse_qs(environ.get('QUERY_STRING', '')).get('jsonp')
if jsonp:
content_type = 'text/javascript'
res = ''.join(jsonp + ['(', res, ')'])
elif 'Opera' in environ.get('HTTP_USER_AGENT', ''):
# Opera has bunk XMLHttpRequest support for most mime types
content_type = 'text/plain'
else:
content_type = self.mime_type
headers = [
('Content-type', content_type),
('Content-length', len(res)),
]
headers.extend(response['headers'])
start_response(response['status'], headers)
return [res]
def factory(app, global_conf, **kw):
return JSONFilter(app, **kw)
# ---------------------------------------------------------------------------------------
|
|
"""Example running MemN2N on a single bAbI task.
Download tasks from facebook.ai/babi """
from __future__ import absolute_import
from __future__ import print_function
import random
from itertools import chain
from six.moves import range, reduce
import logging
import sys
from sklearn import metrics
import tensorflow as tf
import numpy as np
from dialog_data_utils import (
load_task,
vectorize_data_dialog,
get_candidates_list
)
from memn2n import MemN2N
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logger = logging.getLogger(__file__)
tf.flags.DEFINE_float(
"learning_rate",
0.01,
"Learning rate for Adam Optimizer."
)
tf.flags.DEFINE_float("epsilon", 1e-8, "Epsilon value for Adam Optimizer.")
tf.flags.DEFINE_float("max_grad_norm", 40.0, "Clip gradients to this norm.")
tf.flags.DEFINE_integer(
"evaluation_interval",
1,
"Evaluate and print results every x epochs"
)
tf.flags.DEFINE_integer("batch_size", 1, "Batch size for training.")
tf.flags.DEFINE_integer("hops", 3, "Number of hops in the Memory Network.")
tf.flags.DEFINE_integer("epochs", 100, "Number of epochs to train for.")
tf.flags.DEFINE_integer(
"embedding_size",
20,
"Embedding size for embedding matrices."
)
tf.flags.DEFINE_integer("memory_size", 20, "Maximum size of memory.")
tf.flags.DEFINE_integer("task_id", 1, "bAbI task id, 1 <= id <= 6")
tf.flags.DEFINE_integer("random_state", 273, "Random state.")
tf.flags.DEFINE_string(
"data_dir",
"../babi_tools/dialog-bAbI-tasks/",
"Directory containing bAbI tasks"
)
FLAGS = tf.flags.FLAGS
random.seed(FLAGS.random_state)
print("Started Task:", FLAGS.task_id)
# task data
train, dev, test, oov = load_task(FLAGS.data_dir, FLAGS.task_id)
all_dialogues = train + dev + test + oov
data = reduce(lambda x, y: x + y, all_dialogues, [])
vocab = sorted(
reduce(
lambda x, y: x | y,
(set(list(chain.from_iterable(s)) + q + a) for s, q, a in data)
)
)
answer_candidates = get_candidates_list(FLAGS.data_dir)
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
answer_idx = dict(
(candidate, i + 1)
for i, candidate in enumerate(answer_candidates)
)
max_story_size = max(map(len, (s for s, _, _ in data)))
mean_story_size = int(np.mean([len(s) for s, _, _ in data]))
sentence_size = max(map(len, chain.from_iterable(s for s, _, _ in data)))
query_size = max(map(len, (q for _, q, _ in data)))
memory_size = min(FLAGS.memory_size, max_story_size)
vocab_size = len(word_idx) + 1 # +1 for nil word
answer_vocab_size = len(answer_idx) + 1
sentence_size = max(query_size, sentence_size) # for the position
print("Longest sentence length", sentence_size)
print("Longest story length", max_story_size)
print("Average story length", mean_story_size)
def train_model(in_model, in_train_sqa, in_test_sqa, in_batches):
best_train_accuracy, best_test_accuracy = 0.0, 0.0
for t in range(1, FLAGS.epochs+1):
s_train, q_train, a_train = in_train_sqa
s_test, q_test, a_test = in_test_sqa
train_labels = np.argmax(a_train, axis=1)
test_labels = np.argmax(a_test, axis=1)
np.random.shuffle(in_batches)
total_cost = 0.0
for start, end in in_batches:
s = s_train[start:end]
q = q_train[start:end]
a = a_train[start:end]
# back-propagating each batch
cost_t = in_model.batch_fit(s, q, a)
total_cost += cost_t
if t % FLAGS.evaluation_interval == 0:
# evaluate on the whole trainset
train_preds = in_model.predict(s_train, q_train)
train_acc = metrics.accuracy_score(train_preds, train_labels)
# evaluating on the whole testset
test_preds = in_model.predict(s_test, q_test)
test_acc = metrics.accuracy_score(test_preds, test_labels)
logger.info('-----------------------')
logger.info('Epoch:\t{}'.format(t))
logger.info('Total Cost:\t{}'.format(total_cost))
logger.info('Training Accuracy:\t{}'.format(train_acc))
logger.info('Testing Accuracy:\t{}'.format(test_acc))
logger.info('-----------------------')
best_train_accuracy, best_test_accuracy = max(
(best_train_accuracy, best_test_accuracy),
(train_acc, test_acc)
)
return best_train_accuracy, best_test_accuracy
def main(in_trainset_size, in_split_number):
# train/validation/test sets
all_dialogues_idx = reduce(lambda x, y: x + [y], range(len(all_dialogues)), [])
random.shuffle(all_dialogues_idx)
trainset_idx = all_dialogues_idx[
in_split_number * in_trainset_size:
in_split_number * in_trainset_size + in_trainset_size
]
testset_idx = filter(lambda x: x not in trainset_idx, all_dialogues_idx)
dialogues_train = map(lambda x: all_dialogues[x], trainset_idx)
dialogues_test = map(lambda x: all_dialogues[x], testset_idx)
data_train = reduce(lambda x, y: x + y, dialogues_train, [])
data_test = reduce(lambda x, y: x + y, dialogues_test, [])
train_s, train_q, train_a = vectorize_data_dialog(
data_train,
word_idx,
answer_idx,
sentence_size,
memory_size
)
test_s, test_q, test_a = vectorize_data_dialog(
data_test,
word_idx,
answer_idx,
sentence_size,
memory_size
)
print("Training Size (dialogues)", len(dialogues_train))
print("Testing Size (dialogues)", len(dialogues_test))
print("Training Size (stories)", len(data_train))
print("Testing Size (stories)", len(data_test))
tf.set_random_seed(FLAGS.random_state)
batch_size = FLAGS.batch_size
optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate,
epsilon=FLAGS.epsilon
)
batches = zip(
range(0, len(data_train) - batch_size, batch_size),
range(batch_size, len(data_train), batch_size)
)
batches = [(start, end) for start, end in batches]
with tf.Session() as sess:
model = MemN2N(
batch_size,
vocab_size,
sentence_size,
memory_size,
FLAGS.embedding_size,
answer_vocab_size=answer_vocab_size,
session=sess,
hops=FLAGS.hops,
max_grad_norm=FLAGS.max_grad_norm,
optimizer=optimizer
)
best_accuracy_per_epoch = train_model(
model,
(train_s, train_q, train_a),
(test_s, test_q, test_a),
batches
)
return best_accuracy_per_epoch
if __name__ == '__main__':
if len(sys.argv) != 3:
print (
'Usage: dialog_cross_validation.py '
'<#training dialogues> <split number>'
)
trainset_size, split_number = map(int, sys.argv[1:3])
accuracies = main(trainset_size, split_number)
print ('train: {0:.3f}, test: {1:.3f}'.format(*accuracies))
|
|
#!/usr/bin/python
# Copyright (c) 2013 Dropbox, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from collections import defaultdict, namedtuple
import itertools
import textwrap
import sys
import os
import re
# Helper for flattening nested lists
flatten = itertools.chain.from_iterable
CodepointInfo = namedtuple("CodepointInfo",
[ "codepoint", "name", "category", "ccc", "bidi_category", "decomposition",
"decimal_value", "digit_value", "numeric_value", "mirrored", "old_name", "comment",
"uppercase", "lowercase", "titlecase" ])
Decomposition = namedtuple("Decomposition", [ "type", "mapping" ])
# The highest possible codepoint is 0x10FFFF, so we need 21 bits to represent a codepoint.
UNICODE_CODE_SPACE_BITS = 21
def from_hex(s):
"""Parse a hex string.
"""
return int(s, 16) if s else None
def file_lines(*path_components):
"""Helper for reading all lines out of a file, ignoring comments.
"""
with open(os.path.join(*path_components)) as f:
for line in f.readlines():
line = line.strip().split("#", 1)[0]
if line and line[0] != '@':
yield line
def parse_data(base_path):
"""Parse the Unicode character data and composition exclusion files.
"""
codepoints = {}
for line in file_lines(base_path, "UnicodeData.txt"):
fields = line.strip().split(";")
if fields[5]:
decomp_fields = fields[5].split(" ")
if decomp_fields[0].startswith("<"):
decomp = Decomposition(decomp_fields[0], map(from_hex, decomp_fields[1:]))
else:
decomp = Decomposition("canonical", map(from_hex, decomp_fields))
else:
decomp = Decomposition("none", None)
info = CodepointInfo(
from_hex(fields[0]), fields[1], fields[2], int(fields[3]), fields[4], decomp,
fields[6], fields[7], fields[8], fields[9], fields[10], fields[11],
from_hex(fields[12]), from_hex(fields[13]), from_hex(fields[14]))
codepoints[info.codepoint] = info
exclusions = set(map(from_hex, file_lines(base_path, "CompositionExclusions.txt")))
return codepoints, exclusions
def parse_collation(base_path):
"""Parse the DUCET file allkeys.txt
"""
collation_elements = {}
def parse_element(el_string):
# we treat * variable weight as non-ignorable because this is not actually used for sorting
return map(from_hex, re.split('\.|\*',el_string))
for line in file_lines(base_path, "allkeys.txt"):
fields = line.strip().split(";");
if fields[0]:
codepoints = tuple(map(from_hex, fields[0].strip().split()))
fields = re.split('\]\[(?:\.|\*)', fields[1].strip(' []*.'))
collation_elements[codepoints] = map(parse_element, fields)
return collation_elements
def recursive_decompose(data, pt):
"""Return the full decomposition for codepoint pt.
"""
info = data.get(pt, None)
if info and info.decomposition.type == "canonical":
return flatten(recursive_decompose(data, pt) for pt in info.decomposition.mapping)
else:
return (pt, )
def bytes_needed(data):
"""Find an appropriate type to represent all values in data.
"""
if any(d < 0 for d in data):
prefix, nbits = "", max(max(data).bit_length(), (-1 - min(data)).bit_length()) + 1
else:
prefix, nbits = "u", max(data).bit_length()
return prefix, next(v for v in (1, 2, 4) if v * 8 >= nbits)
def try_split(arr, shift):
"""Try splitting arr into a 2-level trie with chunks of size 2**shift.
Return the two levels of the tree as dicts, as well as shift.
"""
table1, table2 = [], []
size = 2 ** shift
chunks = {}
for i in range(0, len(arr), size):
this_chunk = tuple(arr[i:i+size])
if this_chunk not in chunks:
chunks[this_chunk] = len(table2) >> shift
table2.extend(this_chunk)
table1.append(chunks[this_chunk])
return table1, table2, shift
def split_array(arr):
"""Split arr into a 2-level trie.
"""
return min(
( try_split(arr, shift) for shift in xrange(len(arr).bit_length()) ),
key = lambda (t1, t2, shift): bytes_needed(t1)[1] * len(t1)
+ bytes_needed(t2)[1] * len(t2)
)
def dump_table(name, data):
"""Dump data as a C array called 'name'.
"""
prefix, nbytes = bytes_needed(data)
typ = "%sint%s_t" % (prefix, nbytes * 8)
return len(data) * nbytes, "static const %s %s[] = {\n %s\n};\n" % (
typ, name, "\n ".join(textwrap.wrap(", ".join(map(str, data)))))
def sublist_index(haystack, needle):
n = len(needle)
for i in xrange(len(haystack) - n + 1):
if haystack[i:i+n] == needle:
return i
def make_translation_map(name, translation_func):
translation_map = [ 0 ] * 0x110000
value_table = []
value_index_cache = {}
for codepoint, info in data.iteritems():
value = translation_func(info)
if value not in value_index_cache:
value_index_cache[value] = len(value_table)
value_table.append(value)
translation_map[codepoint] = value_index_cache[value]
# End the table at the highest non-zero value
translation_map = translation_map[:max(i for i, v in enumerate(translation_map) if v) + 1]
index1, index2, shift = split_array(translation_map)
vb, v = dump_table(name + "_values", value_table)
t1b, t1 = dump_table(name + "_t1", index1)
t2b, t2 = dump_table(name + "_t2", index2)
out = "%s\n%s\n%s\n" % (v, t1, t2)
out += """int32_t %s(int32_t codepoint) {
int offset_index;
if (codepoint >= %d) return 0;
offset_index = %s_t2[(%s_t1[codepoint >> %d] << %d) + (codepoint & %d)];
return %s_values[offset_index];
}""" % (name, len(translation_map), name, name, shift, shift, (1 << shift) - 1, name)
return vb + t1b + t2b, out
def make_direct_map(name, func):
out_map = [ func(data[codepoint]) if codepoint in data else 0
for codepoint in xrange(0x110000) ]
# End the table at the highest non-zero value
out_map = out_map[:max(i for i, v in enumerate(out_map) if v) + 1]
index1, index2, shift = split_array(out_map)
t1b, t1 = dump_table(name + "_t1", index1)
t2b, t2 = dump_table(name + "_t2", index2)
out = "%s\n%s\n" % (t1, t2)
out += """int32_t %s(int32_t codepoint) {
if (codepoint >= %d) return 0;
return %s_t2[(%s_t1[codepoint >> %d] << %d) + (codepoint & %d)];
}""" % (name, len(out_map), name, name, shift, shift, (1 << shift) - 1)
return t1b + t2b, out
def make_collation_element_table(collation_elements):
# The Defualt Unicode Collation Element Table (DUCET) is a mapping from sequences of
# codepoints to sequences of collation elements. We only implement "level 1" (see
# Unicode TR10 for more detail), so a collation element is the same as a "weight",
# a 16-bit integer. We use 32-bit integers throughout to represent weights.
#
# This function produces a hash table mapping sequences of codepoints to sequences of
# collation elements. The actual collation algorithm is implmented in
# minutf_collation.cpp; it takes an input string and performs a number of lookups in the
# hash table to produce a sort key. We use a simple hash function, defined below and
# also in C++, to hash sequences of codepoints into buckets.
#
# The DUCET is serialized as a sequence of records, of variable length. Each record is
# simply a key (nonempty sequence of codepoints) followed by a value (sequence of
# weights; values may be empty). These records are variable-length, so the high-order
# bits of the first word of the key contain metadata:
#
# Bit 31 Set if this is the *last* record in its bucket
# Bits 30:29 Length of key
# Bits 28:24 Length of value
# Bits 21:0: First codepoint in key
#
# These records are serialized into an array called "ducet_data". A second array called
# ducet_bucket_indexes maps hash buckets to the index in ducet_data of the first record
# for that bucket. So, the lookup algorithm is:
#
# - Given a sequence of codepoint, hash them to find which bucket any mappings for that
# key would be in.
# - Read ducet_bucket_indexes[bucket] to find where in ducet_data to start reading
# - Process variable-length records starting at ducet_data[ducet_bucket_indexes[bucket]]
# and see if any key matches the input. Stop when a record indicating that it's the last
# is found.
def get_level_1_elements(elements):
return [el[0] for el in elements if el[0] != 0]
level1_elements = { key: get_level_1_elements(all_levels)
for key, all_levels
in collation_elements.iteritems() }
# How many bits do we need to store key and value lengths?
longest_key = max(len(key) for key in level1_elements.iterkeys())
longest_value = max(len(value) for value in level1_elements.itervalues())
KEY_BITS = longest_key.bit_length()
VALUE_BITS = longest_value.bit_length()
BUCKETS = len(level1_elements)
HASH_MULTIPLIER = 1031
DUCET_DATA_HIGH_BIT = 31
bucket_to_data = defaultdict(list)
def bucket(seq):
out = 0
for i in seq:
out = (out * HASH_MULTIPLIER + i) % BUCKETS
return out # % BUCKETS
for key, value in sorted(level1_elements.iteritems()):
header_word = (len(key) << (DUCET_DATA_HIGH_BIT - KEY_BITS)) \
| (len(value) << (DUCET_DATA_HIGH_BIT - KEY_BITS - VALUE_BITS))
assert (header_word & ~(~0 << UNICODE_CODE_SPACE_BITS)) == 0
data = [ header_word | key[0] ] + list(key[1:]) + list(value)
bucket_to_data[bucket(key)].append(data)
# First, figure out what the total length of data_array should be, so we know where
# to point empty buckets.
data_array_len = 0
for b in range(BUCKETS):
if b in bucket_to_data:
for d in bucket_to_data[b]:
data_array_len += len(d)
bucket_to_offset = []
data_array = []
collision_count = defaultdict(int)
for b in range(BUCKETS):
if b in bucket_to_data:
bucket_to_offset.append(len(data_array))
collision_count[len(bucket_to_data[b])] += 1
# Set the high bit of the first word of the last record in this bucket.
bucket_to_data[b][-1][0] |= (1 << DUCET_DATA_HIGH_BIT)
for d in bucket_to_data[b]:
data_array.extend(d)
else:
bucket_to_offset.append(data_array_len)
assert len(data_array) == data_array_len
header = "// %r\n" % (collision_count, )
dd_bytes, dd = dump_table("ducet_data", data_array)
off_bytes, off = dump_table("ducet_bucket_indexes", bucket_to_offset)
footer = "#define DUCET_HASH_BUCKETS %d\n" % (BUCKETS, )
footer += "#define DUCET_HASH_MULTIPLIER %d\n" % (HASH_MULTIPLIER, )
footer += "#define DUCET_LONGEST_KEY %d\n" % (longest_key, )
footer += "#define DUCET_KEY_BITS %d\n" % (KEY_BITS, )
footer += "#define DUCET_VALUE_BITS %d\n" % (VALUE_BITS, )
footer += "#define DUCET_DATA_HIGH_BIT %d\n" % (DUCET_DATA_HIGH_BIT, )
return dd_bytes + off_bytes, header + dd + off + footer
data, exclusions = parse_data("data-6.3.0")
collation_elements = parse_collation("data-6.3.0")
ccc = { codepoint: info.ccc for (codepoint, info) in data.iteritems() }
# Recursively calculate decomposition mappings and reorder combining characters
decomposition_map = {
pt: sorted(recursive_decompose(data, pt), key = lambda pt: ccc.get(pt, 0))
for pt, info in data.iteritems()
if info.decomposition.type == "canonical"
}
composition_map = {
tuple(info.decomposition.mapping): codepoint
for codepoint, info in data.items()
if codepoint not in exclusions
and info.decomposition.type == "canonical"
and len(info.decomposition.mapping) == 2
and info.ccc == 0
and ccc.get(info.decomposition.mapping[0], 0) == 0
}
# Make a shorter list of all interesting codepoints
interesting_codepoints = [0] + sorted(
set(flatten([ cp ] + dc for cp, dc in decomposition_map.iteritems()))
| set(flatten((k1, k2, v) for ((k1, k2), v) in composition_map.iteritems()))
)
interesting_codepoint_map = { pt: idx for idx, pt in enumerate(interesting_codepoints) }
# Assemble decomposition sequences
decomposition_sequences = [ 0 ]
decomposition_starts = {}
for codepoint, decomposition in decomposition_map.iteritems():
decomposition = [ interesting_codepoint_map[cp] for cp in decomposition ]
idx = sublist_index(decomposition_sequences, decomposition)
if idx is None:
idx = len(decomposition_sequences)
decomposition_sequences.extend(decomposition)
assert len(decomposition) in (1, 2, 3, 4)
assert idx < (1 << 14)
decomposition_starts[codepoint] = idx | ((len(decomposition) - 1) << 14)
k2map = defaultdict(set)
for (k1, k2), v in composition_map.iteritems():
k2map[k1].add((k2, v))
comp_seqs = []
comp_map = {}
for k1, k2vs in k2map.iteritems():
comp_map[k1] = len(comp_seqs) / 2
last_k2, last_v = k2vs.pop()
for k2, v in k2vs:
comp_seqs.append(interesting_codepoint_map[k2])
comp_seqs.append(interesting_codepoint_map[v])
comp_seqs.append(interesting_codepoint_map[last_k2] | 0x8000)
comp_seqs.append(interesting_codepoint_map[last_v])
if len(sys.argv) >= 2 and sys.argv[1] == "--collation":
out = {
"ducet_level1": make_collation_element_table(collation_elements)
}
else:
out = {
"lower_offset": make_translation_map("lowercase_offset", lambda info: info.lowercase - info.codepoint if info.lowercase else 0),
# "upper_offset": make_translation_map("uppercase_offset", lambda info: info.uppercase - info.codepoint if info.uppercase else 0),
"ccc": make_direct_map("ccc", lambda info: info.ccc),
"xref": dump_table("xref", interesting_codepoints),
"decomp_seq": dump_table("decomp_seq", decomposition_sequences),
"decomp_idx": make_direct_map("decomp_idx", lambda info: decomposition_starts.get(info.codepoint, 0)),
"comp_seq": dump_table("comp_seq", comp_seqs),
"comp_idx": make_direct_map("comp_idx", lambda info: comp_map.get(info.codepoint, 0)),
}
# for k in sorted(out.keys()):
# (nbytes, defs) = out[k]
for k, (nbytes, defs) in out.iteritems():
print defs
print >>sys.stderr, "%s: %d" % (k, nbytes)
print >>sys.stderr, "total: %s" % sum(nbytes for nbytes, defs in out.values())
|
|
"""
old_api.py: adapter for nengo_theano and [Jython] nengo-1.4
The purpose of this emulation layer is to run scripts and tests that were
written for nengo-1.4 and theano_nengo. Authors are encouraged to use
the "api.py" file instead of this file for their current work.
"""
import logging
import random
import numpy as np
from .model import Model
from .objects import ShapeMismatch, Filter, Transform
from .objects import Constant, Decoder, Encoder, Signal
from .objects import Probe as _Probe
from .objects import LIF, Direct, is_constant
from . import simulator
logger = logging.getLogger(__name__)
def compute_transform(dim_pre, dim_post, array_size_post, array_size_pre,
weight=1, index_pre=None, index_post=None, transform=None):
"""Helper function used by :func:`nef.Network.connect()` to create
the `dim_post` by `dim_pre` transform matrix.
Values are either 0 or *weight*. *index_pre* and *index_post*
are used to determine which values are non-zero, and indicate
which dimensions of the pre-synaptic ensemble should be routed
to which dimensions of the post-synaptic ensemble.
:param int dim_pre: first dimension of transform matrix
:param int dim_post: second dimension of transform matrix
:param int array_size: size of the network array
:param float weight: the non-zero value to put into the matrix
:param index_pre: the indexes of the pre-synaptic dimensions to use
:type index_pre: list of integers or a single integer
:param index_post:
the indexes of the post-synaptic dimensions to use
:type index_post: list of integers or a single integer
:returns:
a two-dimensional transform matrix performing
the requested routing
"""
all_pre = dim_pre * array_size_pre
if transform is None:
# create a matrix of zeros
transform = [[0] * all_pre for i in range(dim_post * array_size_post)]
# default index_pre/post lists set up *weight* value
# on diagonal of transform
# if dim_post * array_size_post != all_pre,
# then values wrap around when edge hit
if index_pre is None:
index_pre = range(all_pre)
elif isinstance(index_pre, int):
index_pre = [index_pre]
if index_post is None:
index_post = range(dim_post * array_size_post)
elif isinstance(index_post, int):
index_post = [index_post]
for i in range(max(len(index_pre), len(index_post))):
pre = index_pre[i % len(index_pre)]
post = index_post[i % len(index_post)]
transform[post][pre] = weight
transform = np.asarray(transform)
# reformulate to account for post.array_size_post
if transform.shape == (dim_post * array_size_post, all_pre):
rval = np.zeros((array_size_pre, dim_pre, array_size_post, dim_post))
for i in range(array_size_post):
for j in range(dim_post):
rval[:, :, i, j] = transform[i * dim_post + j].reshape(
array_size_pre, dim_pre)
transform = rval
else:
raise NotImplementedError()
rval = np.asarray(transform)
return rval
def sample_unit_signal(dimensions, num_samples, rng):
"""Generate sample points uniformly distributed within the sphere.
Returns float array of sample points: dimensions x num_samples
"""
samples = rng.randn(num_samples, dimensions)
# normalize magnitude of sampled points to be of unit length
norm = np.sum(samples * samples, axis=1)
samples /= np.sqrt(norm)[:, None]
# generate magnitudes for vectors from uniform distribution
scale = rng.rand(num_samples, 1) ** (1.0 / dimensions)
# scale sample points
samples *= scale
return samples.T
def filter_coefs(pstc, dt):
"""
Use like: fcoef, tcoef = filter_coefs(pstc=pstc, dt=dt)
transform(tcoef, a, b)
filter(fcoef, b, b)
"""
pstc = max(pstc, dt)
decay = np.exp(-dt / pstc)
return decay, (1.0 - decay)
# -- James and Terry arrived at this by eyeballing some graphs.
# Not clear if this should be a constant at all, it
# may depend on fn being estimated, number of neurons, etc...
DEFAULT_RCOND=0.01
class EnsembleOrigin(object):
def __init__(self, ensemble, name, func=None,
pts_slice=slice(None, None, None),
rcond=DEFAULT_RCOND,
):
"""The output from a population of neurons (ensemble),
performing a transformation (func) on the represented value.
:param Ensemble ensemble:
the Ensemble to which this origin is attached
:param function func:
the transformation to perform to the ensemble's
represented values to get the output value
"""
eval_points = ensemble.babbling_signal
# compute the targets at the sampled points
if func is None:
# if no function provided, use identity function as default
targets = eval_points.T
else:
# otherwise calculate targets using provided function
# scale all our sample points by ensemble radius,
# calculate function value, then scale back to unit length
# this ensures that we accurately capture the shape of the
# function when the radius is > 1 (think for example func=x**2)
targets = np.array([func(s) for s in eval_points.T])
if len(targets.shape) < 2:
targets.shape = targets.shape[0], 1
n, = targets.shape[1:]
dt = ensemble.model.dt
self.sigs = []
self.decoders = []
self.transforms = []
for ii in range(ensemble.array_size):
# -- N.B. this is only accurate for models firing well
# under the simulator's dt.
A = ensemble.neurons[ii].babbling_rate * dt
b = targets
weights, res, rank, s = np.linalg.lstsq(A, b, rcond=rcond)
sig = ensemble.model.add(Signal(n=n, name='%s[%i]' % (name, ii)))
decoder = ensemble.model.add(Decoder(
sig=sig,
pop=ensemble.neurons[ii],
weights=weights.T))
# set up self.sig as an unfiltered signal
transform = ensemble.model.add(Transform(1.0, sig, sig))
self.sigs.append(sig)
self.decoders.append(decoder)
self.transforms.append(transform)
class Ensemble:
"""An ensemble is a collection of neurons representing a vector space.
"""
def __init__(self, model, name, neurons, dimensions, dt, tau_ref=0.002, tau_rc=0.02,
max_rate=(200, 300), intercept=(-1.0, 1.0), radius=1.0,
encoders=None, seed=None, neuron_type='lif',
array_size=1, eval_points=None, decoder_noise=0.1,
noise_type='uniform', noise=None, mode='spiking'):
"""Construct an ensemble composed of the specific neuron model,
with the specified neural parameters.
:param int neurons: number of neurons in this population
:param int dimensions:
number of dimensions in the vector space
that these neurons represent
:param float tau_ref: length of refractory period
:param float tau_rc:
RC constant; approximately how long until 2/3
of the threshold voltage is accumulated
:param tuple max_rate:
lower and upper bounds on randomly generated
firing rates for each neuron
:param tuple intercept:
lower and upper bounds on randomly generated
x offsets for each neuron
:param float radius:
the range of input values (-radius:radius)
per dimension this population is sensitive to
:param list encoders: set of possible preferred directions
:param int seed: seed value for random number generator
:param string neuron_type:
type of neuron model to use, options = {'lif'}
:param int array_size: number of sub-populations for network arrays
:param list eval_points:
specific set of points to optimize decoders over by default
:param float decoder_noise: amount of noise to assume when computing
decoder
:param string noise_type:
the type of noise added to the input current.
Possible options = {'uniform', 'gaussian'}.
Default is 'uniform' to match the Nengo implementation.
:param float noise:
noise parameter for noise added to input current,
sampled at every timestep.
If noise_type = uniform, this is the lower and upper
bound on the distribution.
If noise_type = gaussian, this is the variance.
"""
if seed is None:
seed = np.random.randint(1000)
self.n_neurons = neurons
del neurons # neurons usually means the nonlinearities
n_neurons = self.n_neurons
self.dimensions = dimensions
self.array_size = array_size
self.radius = radius
self.noise = noise
self.noise_type = noise_type
self.decoder_noise = decoder_noise
self.mode = mode
self.model = model
self.name = name
# make sure that eval_points is the right shape
if eval_points is not None:
eval_points = np.array(eval_points)
if len(eval_points.shape) == 1:
eval_points.shape = [1, eval_points.shape[0]]
self.eval_points = eval_points
# make sure intercept is the right shape
if isinstance(intercept, (int,float)):
intercept = [intercept, 1]
elif len(intercept) == 1:
intercept.append(1)
# make dictionary for origins
self.origin = {}
# set up a dictionary for decoded_input
self.decoded_input = {}
self.input_signals = [
model.add(Signal(n=dimensions,
name='%s.input_signals[%i]' % (name, ii)))
for ii in range(array_size)]
# if we're creating a spiking ensemble
if self.mode == 'spiking':
self.rng = np.random.RandomState(seed)
self.max_rate = max_rate
self._make_encoders(encoders)
self.encoders /= radius
self.babbling_signal = sample_unit_signal(
self.dimensions, 500, self.rng) * radius
self.neurons = []
for ii in range(array_size):
neurons_ii = self.model.add(
# TODO: handle different neuron types,
LIF(n_neurons, tau_rc=tau_rc, tau_ref=tau_ref,
name=name + '[%i]' % ii)
)
self.neurons.append(neurons_ii)
max_rates = self.rng.uniform(
size=self.n_neurons,
low=max_rate[0], high=max_rate[1])
threshold = self.rng.uniform(
size=self.n_neurons,
low=intercept[0], high=intercept[1])
neurons_ii.set_gain_bias(max_rates, threshold)
# pre-multiply encoder weights by gain
self.encoders[ii] *= neurons_ii.gain[:, None]
# -- alias self.encoders to the matrices
# in the model encoders (objects)
self.model.add(Encoder(
self.input_signals[ii],
neurons_ii,
weights=self.encoders[ii]))
neurons_ii.babbling_rate = neurons_ii.rates(
np.dot(
self.encoders[ii],
self.babbling_signal).T)
# set up a dictionary for encoded_input connections
self.encoded_input = {}
# list of learned terminations on ensemble
self.learned_terminations = []
# make default origin
self.add_origin('X')
elif self.mode == 'direct':
# make default origin
self.add_origin('X',
dimensions=self.dimensions*self.array_size)
# reset n_neurons to 0
self.n_neurons = 0
def add_termination(self, name, pstc,
decoded_input=None, encoded_input=None):
"""Accounts for a new termination that takes the given input
(a theano object) and filters it with the given pstc.
Adds its contributions to the set of decoded, encoded,
or learn input with the same pstc. Decoded inputs
are represented signals, encoded inputs are
decoded_output * weight matrix, learn input is
activities * weight_matrix.
Can only have one of decoded OR encoded OR learn input != None.
:param float pstc: post-synaptic time constant
:param decoded_input:
theano object representing the decoded output of
the pre population multiplied by this termination's
transform matrix
:param encoded_input:
theano object representing the encoded output of
the pre population multiplied by a connection weight matrix
:param learn_input:
theano object representing the learned output of
the pre population multiplied by a connection weight matrix
"""
raise NotImplementedError()
# make sure one and only one of
# (decoded_input, encoded_input) is specified
if decoded_input is not None: assert (encoded_input is None)
elif encoded_input is not None: assert (decoded_input is None)
else: assert False
if decoded_input:
if self.mode is not 'direct':
# rescale decoded_input by this neuron's radius
source = TT.true_div(decoded_input, self.radius)
# ignore radius in direct mode
else: source = decoded_input
name = self.get_unique_name(name, self.decoded_input)
self.decoded_input[name] = filter.Filter(
name=name, pstc=pstc, source=source,
shape=(self.array_size, self.dimensions))
elif encoded_input:
name = self.get_unique_name(name, self.encoded_input)
self.encoded_input[name] = filter.Filter(
name=name, pstc=pstc, source=encoded_input,
shape=(self.array_size, self.n_neurons))
def add_learned_termination(self, name, pre, error, pstc,
learned_termination_class=None,
**kwargs):
"""Adds a learned termination to the ensemble.
Input added to encoded_input, and a learned_termination object
is created to keep track of the pre and post
(self) spike times, and adjust the weight matrix according
to the specified learning rule.
:param Ensemble pre: the pre-synaptic population
:param Ensemble error: the Origin that provides the error signal
:param float pstc:
:param learned_termination_class:
"""
raise NotImplementedError()
#TODO: is there ever a case we wouldn't want this?
assert error.dimensions == self.dimensions * self.array_size
# generate an initial weight matrix if none provided,
# random numbers between -.001 and .001
if 'weight_matrix' not in kwargs.keys():
# XXX use self.rng
weight_matrix = np.random.uniform(
size=(self.array_size * pre.array_size,
self.n_neurons, pre.n_neurons),
low=-.001, high=.001)
kwargs['weight_matrix'] = weight_matrix
else:
# make sure it's an np.array
#TODO: error checking to make sure it's the right size
kwargs['weight_matrix'] = np.array(kwargs['weight_matrix'])
learned_term = learned_termination_class(
pre=pre, post=self, error=error, **kwargs)
learn_projections = [TT.dot(
pre.neurons.output[learned_term.pre_index(i)],
learned_term.weight_matrix[i % self.array_size])
for i in range(self.array_size * pre.array_size)]
# now want to sum all the output to each of the post ensembles
# going to reshape and sum along the 0 axis
learn_output = TT.sum(
TT.reshape(learn_projections,
(pre.array_size, self.array_size, self.n_neurons)), axis=0)
# reshape to make it (array_size x n_neurons)
learn_output = TT.reshape(learn_output,
(self.array_size, self.n_neurons))
# the input_current from this connection during simulation
self.add_termination(name=name, pstc=pstc, encoded_input=learn_output)
self.learned_terminations.append(learned_term)
return learned_term
def add_origin(self, name, func=None, **kwargs):
"""Create a new origin to perform a given function
on the represented signal.
:param string name: name of origin
:param function func:
desired transformation to perform over represented signal
:param list eval_points:
specific set of points to optimize decoders over for this origin
"""
# if we're in spiking mode create an ensemble_origin with decoders
# and the whole shebang for interpreting the neural activity
if self.mode == 'spiking':
self.origin[name] = EnsembleOrigin(
ensemble=self,
func=func,
name='%s.%s' % (self.name, name),
**kwargs)
# if we're in direct mode then this population is just directly
# performing the specified function, use a basic origin
elif self.mode == 'direct':
raise NotImplementedError()
if func is not None:
if 'initial_value' not in kwargs.keys():
# [func(np.zeros(self.dimensions)) for i in range(self.array_size)]
init = func(np.zeros(self.dimensions))
init = np.array([init for i in range(self.array_size)])
kwargs['initial_value'] = init.flatten()
if kwargs.has_key('dt'): del kwargs['dt']
self.origin[name] = origin.Origin(func=func, **kwargs)
def get_unique_name(self, name, dic):
"""A helper function that runs through a dictionary
and checks for the key name, adds a digit to the end
until a unique key has been created.
:param string name: desired key name
:param dict dic: the dictionary to search through
:returns string: a unique key name for dic
"""
i = 0
while dic.has_key(name + '_' + str(i)):
i += 1
return name + '_' + str(i)
def _make_encoders(self, encoders):
"""Generates a set of encoders.
:param int neurons: number of neurons
:param int dimensions: number of dimensions
:param theano.tensor.shared_randomstreams snrg:
theano random number generator function
:param list encoders:
set of possible preferred directions of neurons
"""
if encoders is None:
# if no encoders specified, generate randomly
encoders = self.rng.randn(
self.array_size, self.n_neurons, self.dimensions)
else:
# if encoders were specified, cast list as array
encoders = np.array(encoders).T
# repeat array until 'encoders' is the same length
# as number of neurons in population
encoders = np.tile(encoders,
(self.n_neurons / len(encoders) + 1)
).T[:self.n_neurons, :self.dimensions]
encoders = np.tile(encoders, (self.array_size, 1, 1))
# normalize encoders across represented dimensions
norm = np.sum(encoders * encoders, axis=2)[:, :, None]
self.encoders = encoders / np.sqrt(norm)
def theano_tick(self):
if self.mode == 'direct':
# set up matrix to store accumulated decoded input
X = np.zeros((self.array_size, self.dimensions))
# updates is an ordered dictionary of theano variables to update
for di in self.decoded_input.values():
# add its values to the total decoded input
X += di.value.get_value()
# if we're calculating a function on the decoded input
for o in self.origin.values():
if o.func is not None:
val = np.float32([o.func(X[i]) for i in range(len(X))])
o.decoded_output.set_value(val.flatten())
def update(self, dt):
"""Compute the set of theano updates needed for this ensemble.
Returns a dictionary with new neuron state,
termination, and origin values.
:param float dt: the timestep of the update
"""
### find the total input current to this population of neurons
# set up matrix to store accumulated decoded input
X = None
# updates is an ordered dictionary of theano variables to update
updates = OrderedDict()
for ii, di in enumerate(self.decoded_input.values()):
# add its values to the total decoded input
if ii == 0:
X = di.value
else:
X += di.value
updates.update(di.update(dt))
# if we're in spiking mode, then look at the input current and
# calculate new neuron activities for output
if self.mode == 'spiking':
# apply respective biases to neurons in the population
J = TT.as_tensor_variable(np.array(self.bias))
for ei in self.encoded_input.values():
# add its values directly to the input current
J += (ei.value.T * self.alpha.T).T
updates.update(ei.update(dt))
# only do this if there is decoded_input
if X is not None:
# add to input current for each neuron as
# represented input signal x preferred direction
J = map_gemv(1.0, self.shared_encoders, X, 1.0, J)
# if noise has been specified for this neuron,
if self.noise:
# generate random noise values, one for each input_current element,
# with standard deviation = sqrt(self.noise=std**2)
# When simulating white noise, the noise process must be scaled by
# sqrt(dt) instead of dt. Hence, we divide the std by sqrt(dt).
if self.noise_type.lower() == 'gaussian':
J += self.srng.normal(
size=self.bias.shape, std=np.sqrt(self.noise/dt))
elif self.noise_type.lower() == 'uniform':
J += self.srng.uniform(
size=self.bias.shape,
low=-self.noise/np.sqrt(dt),
high=self.noise/np.sqrt(dt))
# pass that total into the neuron model to produce
# the main theano computation
updates.update(self.neurons.update(J, dt))
for l in self.learned_terminations:
# also update the weight matrices on learned terminations
updates.update(l.update(dt))
# and compute the decoded origin decoded_input from the neuron output
for o in self.origin.values():
updates.update(o.update(dt, updates[self.neurons.output]))
if self.mode == 'direct':
# if we're in direct mode then just directly pass the decoded_input
# to the origins for decoded_output
for o in self.origin.values():
if o.func is None:
if len(self.decoded_input) > 0:
updates.update(OrderedDict({o.decoded_output:
TT.flatten(X).astype('float32')}))
return updates
class Probe(object):
def __init__(self, probe, net):
self.probe = probe
self.net = net
def get_data(self):
sim = self.net.model.sim_obj
lst = sim.probe_data(self.probe)
rval = np.asarray(lst).reshape(len(lst), -1)
return rval
class Network(object):
def __init__(self, name,
seed=None,
fixed_seed=None,
dt=0.001,
simulator=simulator.Simulator):
self.random = random.Random()
if seed is not None:
self.random.seed(seed)
self.fixed_seed = fixed_seed
self.model = Model(name, dt=dt, simulator=simulator)
self.ensembles = {}
self.inputs = {}
@property
def dt(self):
return self.model.dt
def make_input(self, name, value):
if callable(value):
rval = self.model.add(Signal(name=name))
fval = np.asarray(value(0))
pop = self.model.add(Direct(n_in=1, n_out=fval.size, fn=value))
self.model.add(Encoder(
self.model.simtime, pop, weights=np.asarray([[1]])))
self.inputs[name] = pop.output_signal
# move from signals_tmp -> signals
self.model.add(Transform(1.0,
pop.output_signal,
pop.output_signal))
else:
value = np.asarray(value, dtype='float')
N, = value.shape
rval = self.model.add(Constant(n=N, value=value, name=name))
self.inputs[name] = rval
return rval
def make_array(self, name, neurons, array_size, dimensions=1, **kwargs):
"""Generate a network array specifically.
This function is depricated; use for legacy code
or non-theano API compatibility.
"""
return self.make(
name=name, neurons=neurons, dimensions=dimensions,
array_size=array_size, **kwargs)
def make(self, name, *args, **kwargs):
if 'seed' not in kwargs.keys():
if self.fixed_seed is not None:
kwargs['seed'] = self.fixed_seed
else:
# if no seed provided, get one randomly from the rng
kwargs['seed'] = self.random.randrange(0x7fffffff)
kwargs['dt'] = self.dt
rval = Ensemble(self.model, name, *args, **kwargs)
self.ensembles[name] = rval
return rval
def connect(self, name1, name2, func=None, pstc=0.005, **kwargs):
if name1 in self.ensembles:
src = self.ensembles[name1]
dst = self.ensembles[name2]
if func is None:
oname = 'X'
else:
oname = func.__name__
if oname not in src.origin:
src.add_origin(oname, func)
decoded_origin = src.origin[oname]
transform = compute_transform(
array_size_pre=src.array_size,
dim_pre=decoded_origin.sigs[0].n,
array_size_post=dst.array_size,
dim_post=dst.dimensions,
**kwargs)
if pstc > self.dt:
smoothed_signals = []
for ii in range(src.array_size):
filtered_signal = self.model.add(Signal(
n=decoded_origin.sigs[ii].n, #-- views not ok here
name=decoded_origin.sigs[ii].name + '::pstc=%s' % pstc))
fcoef, tcoef = filter_coefs(pstc, dt=self.dt)
self.model.add(Transform(tcoef,
decoded_origin.sigs[ii],
filtered_signal))
self.model.add(Filter(
fcoef, filtered_signal, filtered_signal))
smoothed_signals.append(filtered_signal)
for jj in range(dst.array_size):
for ii in range(src.array_size):
if np.all(transform[ii, :, jj] == 0):
continue
self.model.add(Filter(
transform[ii, :, jj].T,
smoothed_signals[ii],
dst.input_signals[jj]))
else:
smoothed_signals = decoded_origin.sigs
for ii in range(src.array_size):
for jj in range(dst.array_size):
if np.all(transform[ii, :, jj] == 0):
continue
self.model.add(Transform(
transform[ii, :, jj].T,
smoothed_signals[ii],
dst.input_signals[jj]))
elif name1 in self.inputs:
assert func is None, "Cannot compute a function on an input"
src = self.inputs[name1]
dst_ensemble = self.ensembles[name2]
dst_len = dst_ensemble.array_size * dst_ensemble.dimensions
transform = np.array(kwargs.get('transform', 1.0))
if transform.ndim == 2 and (transform.shape[1],) != src.shape:
raise ShapeMismatch((transform.shape[1],), src.shape)
if transform.ndim == 2 and (transform.shape[0],) != (dst_len,):
raise ShapeMismatch((transform.shape[0],), (dst_len,))
for ii in range(0, dst_len, dst_ensemble.dimensions):
src_ii = src[ii:ii+dst_ensemble.dimensions]
dst_ii = dst_ensemble.input_signals[ii]
if pstc > self.dt:
src_filtered = self.model.add(Signal(
n=src_ii.size, #-- views not ok here
name=src.name + '::d=%d,pstc=%s' % (ii,pstc)))
fcoef, tcoef = filter_coefs(pstc, dt=self.dt)
if is_constant(src_ii):
self.model.add(Filter(tcoef, src_ii, src_filtered))
else:
self.model.add(Transform(tcoef, src_ii, src_filtered))
self.model.add(Filter(fcoef, src_filtered, src_filtered))
src_ii = src_filtered
self.model.add(Filter(transform, src_ii, dst_ii))
else:
raise ValueError(
"\"%s\" is not an ensemble or an input name" % name1)
def _raw_probe(self, sig, dt_sample):
"""
Create an un-filtered probe of the named signal,
without constructing any filters or transforms.
"""
return Probe(self.model.add(_Probe(sig, dt_sample)), self)
def _probe_signals(self, srcs, dt_sample, pstc):
"""
set up a probe for signals (srcs) that will record
their value *after* decoding, transforming, filtering.
This is appropriate for inputs, constants, non-linearities, etc.
But if you want to probe the part of a filter that is purely the
decoded contribution from a population, then use
_probe_decoded_signals.
"""
src_n = srcs[0].size
probe_sig = self.model.add(Signal(
n=len(srcs) * src_n,
name='probe(%s)' % srcs[0].name
))
if pstc > self.dt:
# -- create a new smoothed-out signal
fcoef, tcoef = filter_coefs(pstc=pstc, dt=self.dt)
self.model.add(Filter(fcoef, probe_sig, probe_sig))
for ii, src in enumerate(srcs):
self.model.add(Filter(
tcoef, src, probe_sig[ii * src_n: (ii + 1) * src_n]))
return Probe(
self.model.add(_Probe(probe_sig, dt_sample)),
self)
else:
for ii, src in enumerate(srcs):
self.model.add(Filter(
1.0, src, probe_sig[ii * src_n: (ii + 1) * src_n]))
return Probe(self.model.add(_Probe(probe_sig, dt_sample)),
self)
def _probe_decoded_signals(self, srcs, dt_sample, pstc):
"""
set up a probe for signals (srcs) that will record
their value just from being decoded.
This is appropriate for functions decoded from nonlinearities.
"""
src_n = srcs[0].size
probe_sig = self.model.add(Signal(
n=len(srcs) * src_n,
name='probe(%s,pstc=%f)' % (srcs[0].name, pstc)
))
if pstc > self.dt:
# -- create a new smoothed-out signal
fcoef, tcoef = filter_coefs(pstc=pstc, dt=self.dt)
self.model.add(Filter(fcoef, probe_sig, probe_sig))
for ii, src in enumerate(srcs):
self.model.add(Transform(
tcoef, src, probe_sig[ii * src_n: (ii + 1) * src_n]))
return Probe(
self.model.add(_Probe(probe_sig, dt_sample)),
self)
else:
for ii, src in enumerate(srcs):
self.model.add(Transform(
1.0, src, probe_sig[ii * src_n: (ii + 1) * src_n]))
return Probe(self.model.add(_Probe(probe_sig, dt_sample)),
self)
def make_probe(self, name, dt_sample, pstc):
if name in self.ensembles:
ens = self.ensembles[name]
srcs = ens.origin['X'].sigs
return self._probe_decoded_signals(srcs, dt_sample, pstc)
elif name in self.inputs:
src = self.inputs[name]
return self._probe_signals([src], dt_sample, pstc)
else:
raise NotImplementedError()
def run(self, simtime, verbose=False):
self.model.run(simtime)
|
|
__author__ = 'Administrator'
import json
import time
import random
from cinder.volume import driver
from hwcloud.database_manager import DatabaseManager
from hwcloud.hws_service.client import HWSClient
from cinder.openstack.common import log as logging
from cinder.volume.drivers.hws import sshutils as sshclient
from oslo.config import cfg
from keystoneclient.v2_0 import client as kc
from cinder.openstack.common import fileutils
from cinder.openstack.common import excutils
from cinder.image import image_utils
import traceback
import string
import os
hws_opts = [cfg.StrOpt('project_id', help='project_id'),
cfg.StrOpt('flavor_id', help='flavor id'),
cfg.StrOpt('vpc_id', help='vpc_id'),
cfg.StrOpt('subnet_id', help='subnet_id'),
cfg.StrOpt('image_id', help='image_id'),
cfg.StrOpt('gong_yao', help='gong yao'),
cfg.StrOpt('si_yao', help='si yao'),
cfg.StrOpt('service_region', help='region where resource to create in'),
cfg.StrOpt('resource_region', help='region where resource to create in'),
cfg.StrOpt('service_protocol', help='protocol', default='https'),
cfg.StrOpt('service_port', help='port', default='443'),
cfg.StrOpt('volume_type', help='default volume_typ', default='SATA')]
CONF = cfg.CONF
hws_group = 'hws'
CONF.register_opts(hws_opts, hws_group)
remote_vgw_keystone_opts = [
cfg.StrOpt('tenant_name',
default='admin',
help='tenant name for connecting to keystone in admin context'),
cfg.StrOpt('user_name',
default='cloud_admin',
help='username for connecting to cinder in admin context'),
cfg.StrOpt('keystone_auth_url',
default='https://identity.cascading.hybrid.huawei.com:443/identity-admin/v2.0',
help='value of keystone url'),
]
remote_vgw_keystone_group = 'keystone_authtoken'
CONF.register_opts(remote_vgw_keystone_opts, remote_vgw_keystone_group)
hws_vgw_opts = [
cfg.StrOpt('user_name',
default='root',
help='user name for local az hws v2v gateway host'),
cfg.StrOpt('password',
default='Huawei@CLOUD8!',
help='password for local az hws v2v gateway host'),
cfg.StrOpt('host_ip',
default='172.21.0.23',
help='ip for local az hws v2v gateway host'),
cfg.StrOpt('ssh_retry_times',
default='3',
help='ssh retry times'),
cfg.StrOpt('hws_instance_id',
# default='72dca101-e822-4923-a3a1-ffac838ff5d5',
default='a83325ee-4917-4896-9eac-227f5934115a',
help='hws vgw instance id'),
cfg.StrOpt('hws_vgw_ip',
# default='117.78.35.163',
default='117.78.36.181',
help='hws vgw instance id'),
]
hws_vgw_group = 'hws_vgw'
CONF.register_opts(hws_vgw_opts, hws_vgw_group)
LOG = logging.getLogger(__name__)
SATA = 'SATA'
SSD = 'SSD'
SAS = 'SAS'
SUPPORT_VOLUME_TYPE = [SATA, SSD, SAS]
HWS_SERVER_STATUS = {
'active': 'ACTIVE',
'shutoff': 'SHUTOFF'
}
HWS_REAL_DEVNAME = {
'/dev/sda': '/dev/xvda',
'/dev/sdb': '/dev/xvde',
'/dev/sdc': '/dev/xvdf',
'/dev/sdd': '/dev/xvdg',
'/dev/sde': '/dev/xvdh',
'/dev/sdf': '/dev/xvdi',
'/dev/sdg': '/dev/xvdj',
'/dev/sdh': '/dev/xvdk',
'/dev/sdi': '/dev/xvdl',
'/dev/sdj': '/dev/xvdm',
'/dev/sdk': '/dev/xvdn'
}
class HWSDriver(driver.VolumeDriver):
VERSION = "1.0"
def __init__(self, *args, **kwargs):
super(HWSDriver, self).__init__( *args, **kwargs)
gong_yao = CONF.hws.gong_yao
si_yao = CONF.hws.si_yao
region = CONF.hws.service_region
protocol = CONF.hws.service_protocol
port = CONF.hws.service_port
self.hws_client = HWSClient(gong_yao, si_yao, region, protocol, port)
self.db_manager = DatabaseManager()
self.project_id = CONF.hws.project_id
self.availability_zone = CONF.hws.resource_region
self.volume_type_default = CONF.hws.volume_type
self.hws_vgw_user = CONF.hws_vgw.user_name
self.hws_vgw_password = CONF.hws_vgw.password
self.hws_wgw_ip = CONF.hws_vgw.host_ip
self.hws_vgw_ip = CONF.hws_vgw.hws_vgw_ip
def create_volume(self, volume):
"""Create a volume.
"""
LOG.info('VOLUME: %s' % dir(volume))
LOG.info('IMAGE ID: %s' % volume.get('image_id'))
if not volume.get('image_id'):
volume_name = self._get_display_name(volume)
project_id = self.project_id
size = volume.size
volume_type = self.volume_type_default
job_info = self.hws_client.evs.create_volume(project_id, self.availability_zone,
size, volume_type, name=volume_name)
self._deal_with_job(job_info, project_id, self._add_volume_mapping_to_db, None, volume)
else:
return {'provider_location': 'HWS CLOUD'}
def _get_display_name(self, volume):
original_display_name = volume.display_name
if len(original_display_name) < 20:
display_name = original_display_name
else:
display_name = self._get_random_name(8)
return display_name
def _get_random_name(self, length):
return ''.join(random.sample(string.ascii_letters + string.digits, length))
def _get_instance_volume_list(self, instance_id):
"""
:param project_id: string, hws project id
:param volume_id: string, hws volume id
:return volume_list_rsp:
"""
volume_list_rsp = self.hws_client.ecs.get_volume_list(self.project_id, instance_id)
if volume_list_rsp['status'] != 200:
error_info = 'hws_v2v: get hws v2v gateway host volume list error, Exception: %s' \
% json.dumps(volume_list_rsp)
LOG.error(error_info)
raise Exception(error_info)
return volume_list_rsp
def _get_volume_detail(self, volume_id):
"""
:param project_id: string, hws project id
:param volume_id: string, hws volume id
:return volume_detail_rsp:
"""
volume_detail_rsp = self.hws_client.evs.get_volume_detail(self.project_id, volume_id)
if volume_detail_rsp['status'] != 200:
error_info = 'hws_v2v: get hws volume detail error, Exception: %s' \
% json.dumps(volume_detail_rsp)
LOG.error(error_info)
raise Exception(error_info)
return volume_detail_rsp
def _attach_volume(self, instance_id, volume_id, device_name):
"""
:param project: string, hws project id
:param instance_id: string, hws server id
:param volume_id: string, hws volume id
:param device_name: device name, e.g. '/dev/sdb'
:param cascading_volume_id: string, cascading volume id
:return:
"""
job_attach_volume = self.hws_client.ecs.attach_volume(self.project_id,
instance_id,
volume_id,
device_name)
self._deal_with_job(job_attach_volume, self.project_id)
def _deal_java_error(self, java_response):
"""
{
'status': 'error',
'body': {
'message': '<MESSAGE>',
'exception': '<EXCEPTION>'
}
}
:param java_response: dict
:return:
"""
if 'error' == java_response['status']:
error_message = java_response['body']['message']
exception = java_response['body']['exception']
LOG.error('Java error message: %s, exception: %s' % (error_message, exception))
raise exception.NovaException(exception)
if 200 == java_response['status']:
return
elif 202 == java_response['status']:
return
else:
error_info = json.dumps(java_response)
LOG.error(error_info)
raise Exception(error_info)
def _power_on(self, instance_id):
start_result = self.hws_client.ecs.start_server(self.project_id, instance_id)
self._deal_java_error(start_result)
def _power_off(self, instance_id):
stop_result = self.hws_client.ecs.stop_server(self.project_id, instance_id)
self._deal_java_error(stop_result)
def _get_server_status(self, instance_id):
try:
server = self.hws_client.ecs.get_detail(self.project_id, instance_id)
if server and server['status'] == 200:
status = server['body']['server']['status']
except Exception:
msg = traceback.format_exc()
raise Exception(msg)
return status
def _stop_server(self, instance_id):
status = self._get_server_status(instance_id)
if HWS_SERVER_STATUS['active'] == status:
self._power_off(instance_id)
time.sleep(20)
retry_times = 10
# query server status until server status is SHUTOFF
while retry_times > 0:
time.sleep(5)
status = self._get_server_status(instance_id)
LOG.error('status: %s' % status)
if HWS_SERVER_STATUS['shutoff'] == status:
break
retry_times -= 1
if HWS_SERVER_STATUS['shutoff'] != status:
msg = "hws_v2v: stop server failed, hws_instance_id: %s, status: %s " %\
(instance_id, status)
raise Exception(msg)
def _detach_volume(self, instance_id, volume_id):
"""
Detach the disk attached to the instance.
:param connection_info:
{
u'driver_volume_type': u'vcloud_volume',
u'serial': u'824d397e-4138-48e4-b00b-064cf9ef4ed8',
u'data': {
u'backend': u'vcloud',
u'qos_specs': None,
u'access_mode': u'rw',
u'display_name': u'volume_02',
u'volume_id': u'824d397e-4138-48e4-b00b-064cf9ef4ed8'
}
}
:param instance:
:param mountpoint: string, e.g. '/dev/sdb'
:param encryption:
:return:
"""
job_detach_volume = self.hws_client.ecs.detach_volume(self.project_id,
instance_id,
volume_id)
self._deal_with_job(job_detach_volume, self.project_id)
def _get_instance_next_devname(self, instance_id):
volume_list_rsp = self._get_instance_volume_list(instance_id)
volume_list = volume_list_rsp['body']['volumeAttachments']
used_device_letter = set()
all_letters = set(string.ascii_lowercase)
for volume in volume_list:
used_device_letter.add(volume.get('device')[-1])
unused_device_letter = list(all_letters - used_device_letter)
LOG.error(used_device_letter)
LOG.error(all_letters)
next_dev_name = volume.get('device')[:-1] + unused_device_letter[0]
return next_dev_name
def _get_management_url(self, kc, image_name, **kwargs):
endpoint_info = kc.service_catalog.get_endpoints(**kwargs)
endpoint_list = endpoint_info.get(kwargs.get('service_type'), None)
region_name = image_name.split('_')[-1]
if endpoint_list:
for endpoint in endpoint_list:
if region_name == endpoint.get('region'):
return endpoint.get('publicURL')
def _copy_volume_to_file(self, image_meta, dev_name):
image_id = image_meta.get('id')
dest_file_path = os.path.join('/tmp', image_id)
real_devname = HWS_REAL_DEVNAME[dev_name]
try:
ssh_client = sshclient.SSH(user=self.hws_vgw_user,
host=self.hws_vgw_ip,
password=self.hws_vgw_password)
# convert volume to image
cmd = 'qemu-img convert -c -O qcow2 %s %s' % \
(real_devname, dest_file_path)
LOG.error('begin time of %s is %s' %
(cmd, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()
)))
ssh_client.run(cmd)
LOG.debug("Finished running cmd : %s" % cmd)
LOG.error('end time of %s is %s' %
(cmd, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error('Failed to copy volume to image by vgw.',
traceback.format_exc())
finally:
if ssh_client:
# delete the temp file which is used for convert volume to image
ssh_client.close()
@sshclient.RetryDecorator(max_retry_count=CONF.hws_vgw.ssh_retry_times,
exceptions=(sshclient.SSHError, sshclient.SSHTimeout))
def _copy_file_to_remote_vgw(self, image_meta):
image_id = image_meta.get('id')
image_name = image_meta.get('name')
dest_file_path = os.path.join('/tmp', image_id)
kwargs = {
'auth_url': CONF.keystone_authtoken.keystone_auth_url,
'tenant_name': CONF.keystone_authtoken.tenant_name,
'user_name': CONF.keystone_authtoken.user_name,
'password': CONF.keystone_authtoken.password,
'insecure': True
}
keystone_client = kc.Client(**kwargs)
# get remote v2v gateway
vgw_url = self._get_management_url(keystone_client, image_name, service_type='v2v')
try:
ssh_client = sshclient.SSH(user=self.hws_vgw_user,
host=self.hws_vgw_ip,
password=self.hws_vgw_password)
LOG.debug('The remote vgw url is %(vgw_url)s',
{'vgw_url': vgw_url})
# eg: curl -X POST --http1.0 -T
# /tmp/467bd6e1-5a6e-4daa-b8bc-356b718834f2
# http://172.27.12.245:8090/467bd6e1-5a6e-4daa-b8bc-356b718834f2
cmd = 'curl -X POST --http1.0 -T %s ' % dest_file_path
cmd += vgw_url
if cmd.endswith('/'):
cmd += image_id
else:
cmd += '/' + image_id
LOG.error('begin time of %s is %s' %
(cmd, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()
)))
ssh_client.run(cmd)
LOG.error('end time of %s is %s' %
(cmd, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()
)))
LOG.debug("Finished running cmd : %s" % cmd)
ssh_client.run('rm -f %s' % dest_file_path)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error('Failed to copy volume to image by vgw.',
traceback.format_exc())
finally:
if ssh_client:
ssh_client.close()
def copy_volume_to_image(self, context, volume, image_service, image_meta):
container_format = image_meta.get('container_format')
#if container_format == 'vgw_url':
if container_format == 'bare':
try:
# 1.get the hws volume id
cascaded_volume_id = volume['id']
hws_volume_id = self.db_manager.get_cascaded_volume_id(cascaded_volume_id)
if not hws_volume_id:
msg = 'get hws volume id error, cascaded id: %s' % cascaded_volume_id
LOG.error(msg)
raise Exception('get hws volume id error')
# 2. get the hws_volume status
volume_detail_rsp = self._get_volume_detail(hws_volume_id)
status = volume_detail_rsp['body']['volume']['status']
# attachments = volume_detail_rsp['body']['volume']['attachments']
# attach_num = len(attachments)
# origin_instance_id = None
# attach_back = False
# 3. detach volume from origin instance
# if status == 'in-use':
# if attach_num != 1:
# msg = 'hws_v2v: get attachments info error, num: %s' % attach_num
# LOG.error(msg)
# raise Exception(msg)
# origin_instance_id = attachments[0]['server_id']
# # volume can only be detached when sever stop
# self._stop_server(origin_instance_id)
# self._detach_volume(origin_instance_id, hws_volume_id)
# attach_back = True
# volume_detail_rsp = self._get_volume_detail(hws_volume_id)
# status = volume_detail_rsp['body']['status']
# 4. attach volume to hws v2v gateway host
if status != 'available':
msg = 'attach volume to local v2v gateway host error, status : %s, cascaded_volume_id: %s, ' \
'hws_volume_id %s' % (status, cascaded_volume_id, hws_volume_id)
LOG.error(msg)
raise Exception('attach volume to local v2v gateway failed')
hws_vgw_instance_id = CONF.hws_vgw.hws_instance_id
# if not hws_vgw_instance_id:
# LOG.error(
# 'hws_v2v: get cascaded v2v gateway instance id error: %s' % CONF.hws_vgw.cascaded_instance_id)
# raise Exception('hws_v2v: get cascaded v2v gateway instance error.')
dev_name = self._get_instance_next_devname(hws_vgw_instance_id)
self._attach_volume(hws_vgw_instance_id, hws_volume_id, dev_name)
# 5. copy volume to file
self._copy_volume_to_file(image_meta, dev_name)
# 6. copy file to remote v2v gateway
# self._copy_file_to_remote_vgw(image_meta)
# 7. create a empty file to glance
with image_utils.temporary_file() as tmp:
image_utils.upload_volume(context,
image_service,
image_meta,
tmp)
fileutils.delete_if_exists(tmp)
# 8. detach volume from hws v2v gateway
self._stop_server(hws_vgw_instance_id)
self._detach_volume(hws_vgw_instance_id, hws_volume_id)
self._power_on(hws_vgw_instance_id)
finally:
attach_back = True
# if attach_back is True:
# origin_dev_name = attachments[0]['device']
# self._attach_volume(origin_instance_id, hws_volume_id, origin_dev_name)
# self._power_on(origin_instance_id)
@sshclient.RetryDecorator(max_retry_count=CONF.hws_vgw.ssh_retry_times,
exceptions=(sshclient.SSHError, sshclient.SSHTimeout))
def _copy_file_to_volume(self, image_id, dev_name):
try:
real_devname = HWS_REAL_DEVNAME[dev_name]
dest_file_path = os.path.join('/tmp', image_id)
ssh_client = sshclient.SSH(user=self.hws_vgw_user,
host=self.hws_vgw_ip,
password=self.hws_vgw_password)
# copy data to volume
cmd = 'qemu-img convert %s %s' % \
(dest_file_path, real_devname)
ssh_client.run(cmd)
LOG.debug("Finished running cmd : %s" % cmd)
# cmd = 'rm -rf %s' % dest_file_path
# ssh_client.run(cmd)
except Exception as e:
LOG.error('Failed to copy data to volume from vgw. '
'traceback: %s', traceback.format_exc())
raise e
finally:
if ssh_client:
ssh_client.close()
def copy_image_to_volume(self, context, volume, image_service, image_id):
image_meta = image_service.show(context, image_id)
container_format = image_meta.get('container_format')
# if container_format == 'vgw_url':
if container_format == 'bare':
# 1.get the hws_volume_id
cascaded_volume_id = volume['id']
self.create_volume(volume)
hws_volume_id = self.db_manager.get_cascaded_volume_id(cascaded_volume_id)
if not cascaded_volume_id:
LOG.error('get cascaded volume id error: %s' % cascaded_volume_id)
raise Exception('get cascaded volume id error.')
# 2. get the hws_volume status
time.sleep(30)
retry_times = 10
while retry_times > 0:
volume_detail_rsp = self._get_volume_detail(hws_volume_id)
status = volume_detail_rsp['body']['volume']['status']
if status == 'available':
break
else:
time.sleep(5)
retry_times -= 1
if status != 'available':
LOG.error('create hws volume failed, status: %s, cascaded_volume_id: %s, hws_volume_id: %s'
% (status, cascaded_volume_id, hws_volume_id))
raise Exception('create hws volume failed.')
# 2. attach volume to hws v2v gateway host
hws_vgw_instance_id = CONF.hws_vgw.hws_instance_id
# if not hws_vgw_instance_id:
# LOG.error('hws_v2v: get cascaded v2v gateway instance id error.' % CONF.hws_vgw.cascaded_instance_id)
# raise Exception('get cascaded v2v gateway instance id error.')
dev_name = self._get_instance_next_devname(hws_vgw_instance_id)
self._attach_volume(hws_vgw_instance_id, hws_volume_id, dev_name)
# 3. copy image's file to volume
self._copy_file_to_volume(image_id, dev_name)
# 4. detach volume from hws v2v gateway
self._stop_server(hws_vgw_instance_id)
self._detach_volume(hws_vgw_instance_id, hws_volume_id)
self._power_on(hws_vgw_instance_id)
# Not to create volume when call cinder create volume API
# Only when attache or detach, or create server by volume, then create volume.
elif not image_id:
volume_name = self._get_display_name(volume)
project_id = self.project_id
size = volume.size
volume_type = self.volume_type_default
image_hws_id = self._get_cascaded_image_id(image_id)
job_info = self.hws_client.evs.create_volume(project_id, self.availability_zone,
size, volume_type, name=volume_name, imageRef=image_hws_id)
self._deal_with_job(job_info, project_id, self._add_volume_mapping_to_db, None, volume)
def _get_volume_type(self, volume_type):
if volume_type not in SUPPORT_VOLUME_TYPE:
LOG.info('VOLUME TYPE: %s is not support in HWS Clouds, support type is: [%s]. Use SATA as default' %
(volume_type, SUPPORT_VOLUME_TYPE))
volume_type = SATA
return volume_type
def _get_cascaded_image_id(self, cascading_image_id):
cascaded_image_id = self.db_manager.get_cascaded_image_id(cascading_image_id)
if not cascaded_image_id:
LOG.error('No image mapping in HWS Cloud.')
raise Exception('No image mapping in HWS Cloud.')
return cascaded_image_id
def _add_volume_mapping_to_db(self, job_detail_of_create_volume, volume):
"""
:param job_detail_of_create_volume:
:return:
"""
hws_volume_id = job_detail_of_create_volume['body']['entities']['volume_id']
volume_id = volume.id
self.db_manager.add_volume_mapping(volume_id, hws_volume_id)
LOG.info('Success to add volume mapping: {%s: %s}' % (volume_id, hws_volume_id))
def _deal_with_job(self, job_info, project_id,
function_deal_with_success=None,
function_deal_with_fail=None,
object=None):
if job_info['status'] == 200:
job_id = job_info['body']['job_id']
while True:
time.sleep(5)
job_detail_info = self.hws_client.evs.get_job_detail(project_id, job_id)
if job_detail_info:
if job_detail_info['status'] == 200:
job_status = job_detail_info['body']['status']
if job_status == 'RUNNING':
LOG.debug('job<%s> is still RUNNING.' % job_id)
continue
elif job_status == 'FAIL':
if function_deal_with_fail:
function_deal_with_fail(job_detail_info, object)
error_info = 'job<%s> FAIL, ERROR INFO: %s' % (job_id, json.dumps(job_detail_info))
raise Exception(error_info)
elif job_status == 'SUCCESS':
if function_deal_with_success:
function_deal_with_success(job_detail_info, object)
success_info = 'job<%s> SUCCESS.' % job_id
LOG.info(success_info)
break
elif job_detail_info['status'] == 'error':
error_message = job_detail_info['body']['message']
exception = job_detail_info['body']['exception']
LOG.error('Java error message: %s, exception: %s' % (error_message, exception))
continue
else:
info = json.dumps(job_detail_info)
LOG.info('Job info get has some issue: %s, will retry to get again.' % info )
continue
else:
retry_info = 'job detail info is empty, will retry to get. JOB DETAIL: %s' % job_detail_info
LOG.info(retry_info)
continue
else:
error_info = json.dumps(job_info)
LOG.error('Job init FAIL, error info: %s' % error_info)
raise Exception(error_info)
def _deal_with_create_volume_fail(self, job_detail_info, volume):
"""
deal with create volume fail.
If hws volume is created, but fail, then save id mapping in db. then raise exception.
if hws volume id is not created, raise exception directly.
{
"body": {
"status": "FAIL",
"entities": {
"volume_id": "1be7a768-59b6-4ef6-b4c0-a4f8039fa626"
},
"job_id": "8aace0c751b0a3bd01523529e4f70d35",
"job_type": "createVolume",
"begin_time": "2016-01-12T09:28:04.086Z",
"end_time": "2016-01-12T09:28:32.252Z",
"error_code": "EVS.2024",
"fail_reason": "EbsCreateVolumeTask-fail:volume is error!"
},
"status": 200
}
:param job_detail_info:
:param volume:
:return:
"""
job_id = job_detail_info.get('body').get('job_id')
error_info = 'job<%s> FAIL, ERROR INFO: %s' % (job_id, json.dumps(job_detail_info))
if job_detail_info.get('body').get('entities'):
hws_volume_id = job_detail_info.get('body').get('entities').get('volume_id')
if hws_volume_id:
LOG.info('HWS volume is created, id is: %s' % hws_volume_id)
volume_id = volume.id
self.db_manager.add_volume_mapping(volume_id, hws_volume_id)
LOG.debug('Success to add volume mapping: {%s: %s}' % (volume_id, hws_volume_id))
raise Exception(error_info)
raise Exception(error_info)
def delete_volume(self, volume):
cascading_volume_id = volume.id
project_id = self.project_id
cascaded_volume_id = self.db_manager.get_cascaded_volume_id(cascading_volume_id)
LOG.info('VOLUME_ID: %s' % cascaded_volume_id)
if cascaded_volume_id:
volume_get = self.hws_client.evs.get_volume_detail(project_id, cascaded_volume_id)
if volume_get['status'] == 200:
job_info = self.hws_client.evs.delete_volume(project_id, cascaded_volume_id)
self._deal_with_job(job_info,project_id, self._delete_volume_mapping, None, volume)
elif volume_get['status'] == 404 and volume_get.get('body').get('itemNotFound'):
LOG.info('cascaded volume is not exist, so directly return delete success')
return
else:
error_info = 'Delete volume fail, Exception: %s' % json.dumps(volume_get)
LOG.error(error_info)
raise Exception(error_info)
else:
LOG.info('cascaded volume is not exist, so directly return delete success')
return
def _delete_volume_mapping(self, job_detail_info, volume):
cascading_volume_id = volume.id
self.db_manager.delete_volume_mapping(cascading_volume_id)
LOG.info('Delete volume mapping for cascading volume id: %s' % cascading_volume_id)
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
# pdb.set_trace()
if not self._stats:
backend_name = self.configuration.safe_get('volume_backend_name')
LOG.debug('*******backend_name is %s' %backend_name)
if not backend_name:
backend_name = 'HC_HWS'
data = {'volume_backend_name': backend_name,
'vendor_name': 'Huawei',
'driver_version': self.VERSION,
'storage_protocol': 'LSI Logic SCSI',
'reserved_percentage': 0,
'total_capacity_gb': 1000,
'free_capacity_gb': 1000}
self._stats = data
return self._stats
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
LOG.debug('vCloud Driver: initialize_connection')
driver_volume_type = 'hwclouds_volume'
data = {}
data['backend'] = 'hwclouds'
data['volume_id'] = volume['id']
data['display_name'] = volume['display_name']
return {'driver_volume_type': driver_volume_type,
'data': data}
def check_for_setup_error(self):
"""Check configuration file."""
pass
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume."""
pass
def create_export(self, context, volume):
"""Export the volume."""
pass
def create_snapshot(self, snapshot):
pass
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
pass
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
pass
def do_setup(self, context):
"""Instantiate common class and log in storage system."""
pass
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def extend_volume(self, volume, new_size):
"""Extend a volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector"""
LOG.debug('vCloud Driver: terminate_connection')
pass
def validate_connector(self, connector):
"""Fail if connector doesn't contain all the data needed by driver."""
LOG.debug('vCloud Driver: validate_connector')
pass
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
from prompt_toolkit.completion import Completer, Completion
import azclishell.configuration
from azclishell.argfinder import ArgsFinder
from azclishell.command_tree import in_tree
from azclishell.layout import get_scope
from azclishell.util import parse_quotes
from azure.cli.core.parser import AzCliCommandParser
from azure.cli.core.util import CLIError
SELECT_SYMBOL = azclishell.configuration.SELECT_SYMBOL
def dynamic_param_logic(text):
""" validates parameter values for dynamic completion """
is_param = False
started_param = False
prefix = ""
param = ""
txtspt = text.split()
if txtspt:
param = txtspt[-1]
if param.startswith("-"):
is_param = True
elif len(txtspt) > 2 and txtspt[-2]\
and txtspt[-2].startswith('-'):
is_param = True
param = txtspt[-2]
started_param = True
prefix = txtspt[-1]
return is_param, started_param, prefix, param
def reformat_cmd(text):
""" reformat the text to be stripped of noise """
# remove az if there
text = text.replace('az', '')
# disregard defaulting symbols
if text and SELECT_SYMBOL['scope'] == text[0:2]:
text = text.replace(SELECT_SYMBOL['scope'], "")
if get_scope():
text = get_scope() + ' ' + text
return text
def gen_dyn_completion(comp, started_param, prefix, text):
""" how to validate and generate completion for dynamic params """
if len(comp.split()) > 1:
completion = '\"' + comp + '\"'
else:
completion = comp
if started_param:
if comp.lower().startswith(prefix.lower()) and comp not in text.split():
yield Completion(completion, -len(prefix))
else:
yield Completion(completion, -len(prefix))
def sort_completions(gen):
""" sorts the completions """
def _get_weight(val):
""" weights the completions with required things first the lexicographically"""
priority = ''
if val.display_meta and val.display_meta.startswith('[REQUIRED]'):
priority = ' ' # a space has the lowest ordinance
return priority + val.text
return sorted(list(gen), key=_get_weight)
# pylint: disable=too-many-instance-attributes
class AzCompleter(Completer):
""" Completes Azure CLI commands """
def __init__(self, commands, global_params=True):
# dictionary of command to descriptions
self.command_description = commands.descrip
# from a command to a list of parameters
self.command_parameters = commands.command_param
# a list of all the possible parameters
self.completable_param = commands.completable_param
# the command tree
self.command_tree = commands.command_tree
# a dictionary of parameter (which is command + " " + parameter name)
# to a description of what it does
self.param_description = commands.param_descript
# a dictionary of command to examples of how to use it
self.command_examples = commands.command_example
# a dictionary of which parameters mean the same thing
self.same_param_doubles = commands.same_param_doubles or {}
self._is_command = True
self.branch = self.command_tree
self.curr_command = ""
self.global_param = commands.global_param if global_params else []
self.output_choices = commands.output_choices if global_params else []
self.output_options = commands.output_options if global_params else []
self.global_param_descriptions = commands.global_param_descriptions if global_params else []
self.global_parser = AzCliCommandParser(add_help=False)
self.global_parser.add_argument_group('global', 'Global Arguments')
self.parser = AzCliCommandParser(parents=[self.global_parser])
from azclishell._dump_commands import CMD_TABLE
self.cmdtab = CMD_TABLE
self.parser.load_command_table(CMD_TABLE)
self.argsfinder = ArgsFinder(self.parser)
def validate_completion(self, param, words, text_before_cursor, double=True):
""" validates that a param should be completed """
return param.lower().startswith(words.lower()) and param.lower() != words.lower() and\
param not in text_before_cursor.split() and not \
text_before_cursor[-1].isspace() and\
(not (double and param in self.same_param_doubles) or
self.same_param_doubles[param] not in text_before_cursor.split())
def get_completions(self, document, complete_event):
text = document.text_before_cursor
self.branch = self.command_tree
self.curr_command = ''
self._is_command = True
text = reformat_cmd(text)
if text.split():
for comp in sort_completions(self.gen_cmd_and_param_completions(text)):
yield comp
for cmd in sort_completions(self.gen_cmd_completions(text)):
yield cmd
for val in sort_completions(self.gen_dynamic_completions(text)):
yield val
for param in sort_completions(self.gen_global_param_completions(text)):
yield param
def gen_enum_completions(self, arg_name, text, started_param, prefix):
""" generates dynamic enumeration completions """
try: # if enum completion
for choice in self.cmdtab[
self.curr_command].arguments[arg_name].choices:
if started_param:
if choice.lower().startswith(prefix.lower())\
and choice not in text.split():
yield Completion(choice, -len(prefix))
else:
yield Completion(choice, -len(prefix))
except TypeError: # there is no choices option
pass
def get_arg_name(self, is_param, param):
""" gets the argument name used in the command table for a parameter """
if self.curr_command in self.cmdtab and is_param:
for arg in self.cmdtab[self.curr_command].arguments:
for name in self.cmdtab[self.curr_command].arguments[arg].options_list:
if name == param:
return arg
# pylint: disable=too-many-branches
def gen_dynamic_completions(self, text):
""" generates the dynamic values, like the names of resource groups """
try: # pylint: disable=too-many-nested-blocks
is_param, started_param, prefix, param = dynamic_param_logic(text)
# command table specific name
arg_name = self.get_arg_name(is_param, param)
if arg_name and ((text.split()[-1].startswith('-') and text[-1].isspace()) or
text.split()[-2].startswith('-')):
for comp in self.gen_enum_completions(arg_name, text, started_param, prefix):
yield comp
parse_args = self.argsfinder.get_parsed_args(
parse_quotes(text, quotes=False))
# there are 3 formats for completers the cli uses
# this try catches which format it is
if self.cmdtab[self.curr_command].arguments[arg_name].completer:
try:
for comp in self.cmdtab[self.curr_command].arguments[arg_name].completer(
parsed_args=parse_args):
for comp in gen_dyn_completion(
comp, started_param, prefix, text):
yield comp
except TypeError:
try:
for comp in self.cmdtab[self.curr_command].\
arguments[arg_name].completer(prefix):
for comp in gen_dyn_completion(
comp, started_param, prefix, text):
yield comp
except TypeError:
try:
for comp in self.cmdtab[self.curr_command].\
arguments[arg_name].completer():
for comp in gen_dyn_completion(
comp, started_param, prefix, text):
yield comp
except TypeError:
pass # other completion method used
except CLIError: # if the user isn't logged in
pass
def gen_cmd_completions(self, text):
""" whether is a space or no text typed, send the current branch """
# if nothing, so first level commands
if not text.split() and self._is_command:
if self.branch.children is not None:
for com in self.branch.children:
yield Completion(com.data)
# if space show current level commands
elif len(text.split()) > 0 and text[-1].isspace() and self._is_command:
if self.branch is not self.command_tree:
for com in self.branch.children:
yield Completion(com.data)
def yield_param_completion(self, param, last_word):
""" yields a parameter """
return Completion(param, -len(last_word), display_meta=self.get_param_description(
self.curr_command + " " + str(param)).replace('\n', ''))
def gen_cmd_and_param_completions(self, text):
""" generates command and parameter completions """
temp_command = str('')
txtspt = text.split()
for word in txtspt:
if word.startswith("-"):
self._is_command = False
# building what the command is
elif self._is_command:
temp_command += ' ' + str(word) if temp_command else str(word)
mid_val = text.find(word) + len(word)
# moving down command tree
if self.branch.has_child(word) and len(text) > mid_val and text[mid_val].isspace():
self.branch = self.branch.get_child(word, self.branch.children)
if len(text) > 0 and text[-1].isspace():
if in_tree(self.command_tree, temp_command):
self.curr_command = temp_command
else:
self._is_command = False
else:
self.curr_command = temp_command
last_word = txtspt[-1]
# this is for single char parameters
if last_word.startswith("-") and not last_word.startswith("--"):
self._is_command = False
if self.has_parameters(self.curr_command):
for param in self.command_parameters[self.curr_command]:
if self.validate_completion(param, last_word, text) and\
not param.startswith("--"):
yield self.yield_param_completion(param, last_word)
elif last_word.startswith("--"): # for regular parameters
self._is_command = False
if self.has_parameters(self.curr_command): # Everything should, map to empty list
for param in self.command_parameters[self.curr_command]:
if self.validate_completion(param, last_word, text):
yield self.yield_param_completion(param, last_word)
if self.branch.children and self._is_command: # all underneath commands
for kid in self.branch.children:
if self.validate_completion(kid.data, txtspt[-1], text, False):
yield Completion(
str(kid.data), -len(txtspt[-1]))
elif self._is_command:
for param in self.command_parameters[self.curr_command.strip()]:
if param.startswith('--'):
yield self.yield_param_completion(param, '')
def gen_global_param_completions(self, text):
""" Global parameter stuff hard-coded in """
txtspt = text.split()
if txtspt and len(txtspt) > 0:
for param in self.global_param:
# for single dash global parameters
if txtspt[-1].startswith('-') \
and not txtspt[-1].startswith('--') and \
param.startswith('-') and not param.startswith('--') and\
self.validate_completion(param, txtspt[-1], text, double=False):
yield Completion(
param, -len(txtspt[-1]),
display_meta=self.global_param_descriptions[param])
# for double dash global parameters
elif txtspt[-1].startswith('--') and \
self.validate_completion(param, txtspt[-1], text, double=False):
yield Completion(
param, -len(txtspt[-1]),
display_meta=self.global_param_descriptions[param])
# if there is an output, gets the options without user typing
if txtspt[-1] in self.output_options:
for opt in self.output_choices:
yield Completion(opt)
# if there is an output option, if they have started typing
if len(txtspt) > 1 and\
txtspt[-2] in self.output_options:
for opt in self.output_choices:
if self.validate_completion(opt, txtspt[-1], text, double=False):
yield Completion(opt, -len(txtspt[-1]))
def is_completable(self, symbol):
""" whether the word can be completed as a command or parameter """
return self.has_parameters(symbol) or symbol in self.param_description.keys()
def get_param_description(self, param):
""" gets a description of an empty string """
if param in self.param_description:
return self.param_description[param]
else:
return ""
def has_parameters(self, command):
""" returns whether given command is valid """
return command in self.command_parameters.keys()
def has_description(self, param):
""" if a parameter has a description """
return param in self.param_description.keys() and \
not self.param_description[param].isspace()
|
|
import pytest
pytest.importorskip('numpy')
import itertools
from operator import getitem
from dask.compatibility import skip
import dask.array as da
from dask.array.slicing import (slice_array, _slice_1d, take, new_blockdim,
sanitize_index)
from dask.array.utils import assert_eq
import numpy as np
from toolz import merge
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def test_slice_1d():
expected = {0: slice(10, 25, 1), 1: slice(None, None, None), 2: slice(0, 1, 1)}
result = _slice_1d(100, [25] * 4, slice(10, 51, None))
assert expected == result
# x[100:12:-3]
expected = {0: slice(-2, -8, -3),
1: slice(-1, -21, -3),
2: slice(-3, -21, -3),
3: slice(-2, -21, -3),
4: slice(-1, -21, -3)}
result = _slice_1d(100, [20] * 5, slice(100, 12, -3))
assert expected == result
# x[102::-3]
expected = {0: slice(-2, -21, -3),
1: slice(-1, -21, -3),
2: slice(-3, -21, -3),
3: slice(-2, -21, -3),
4: slice(-1, -21, -3)}
result = _slice_1d(100, [20] * 5, slice(102, None, -3))
assert expected == result
# x[::-4]
expected = {0: slice(-1, -21, -4),
1: slice(-1, -21, -4),
2: slice(-1, -21, -4),
3: slice(-1, -21, -4),
4: slice(-1, -21, -4)}
result = _slice_1d(100, [20] * 5, slice(None, None, -4))
assert expected == result
# x[::-7]
expected = {0: slice(-5, -21, -7),
1: slice(-4, -21, -7),
2: slice(-3, -21, -7),
3: slice(-2, -21, -7),
4: slice(-1, -21, -7)}
result = _slice_1d(100, [20] * 5, slice(None, None, -7))
assert expected == result
# x=range(115)
# x[::-7]
expected = {0: slice(-7, -24, -7),
1: slice(-2, -24, -7),
2: slice(-4, -24, -7),
3: slice(-6, -24, -7),
4: slice(-1, -24, -7)}
result = _slice_1d(115, [23] * 5, slice(None, None, -7))
assert expected == result
# x[79::-3]
expected = {0: slice(-1, -21, -3),
1: slice(-3, -21, -3),
2: slice(-2, -21, -3),
3: slice(-1, -21, -3)}
result = _slice_1d(100, [20] * 5, slice(79, None, -3))
assert expected == result
# x[-1:-8:-1]
expected = {4: slice(-1, -8, -1)}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(-1, 92, -1))
assert expected == result
# x[20:0:-1]
expected = {0: slice(-1, -20, -1),
1: slice(-20, -21, -1)}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(20, 0, -1))
assert expected == result
# x[:0]
expected = {}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(0))
assert result
# x=range(99)
expected = {0: slice(-3, -21, -3),
1: slice(-2, -21, -3),
2: slice(-1, -21, -3),
3: slice(-2, -20, -3),
4: slice(-1, -21, -3)}
# This array has non-uniformly sized blocks
result = _slice_1d(99, [20, 20, 20, 19, 20], slice(100, None, -3))
assert expected == result
# x=range(104)
# x[::-3]
expected = {0: slice(-1, -21, -3),
1: slice(-3, -24, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-1, -22, -3)}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, None, -3))
assert expected == result
# x=range(104)
# x[:27:-3]
expected = {1: slice(-3, -16, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-1, -22, -3)}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, 27, -3))
assert expected == result
# x=range(104)
# x[100:27:-3]
expected = {1: slice(-3, -16, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-4, -22, -3)}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(100, 27, -3))
assert expected == result
def test_slice_singleton_value_on_boundary():
assert _slice_1d(15, [5, 5, 5], 10) == {2: 0}
assert _slice_1d(30, (5, 5, 5, 5, 5, 5), 10) == {2: 0}
def test_slice_array_1d():
#x[24::2]
expected = {('y', 0): (getitem, ('x', 0), (slice(24, 25, 2),)),
('y', 1): (getitem, ('x', 1), (slice(1, 25, 2),)),
('y', 2): (getitem, ('x', 2), (slice(0, 25, 2),)),
('y', 3): (getitem, ('x', 3), (slice(1, 25, 2),))}
result, chunks = slice_array('y', 'x', [[25] * 4], [slice(24, None, 2)])
assert expected == result
#x[26::2]
expected = {('y', 0): (getitem, ('x', 1), (slice(1, 25, 2),)),
('y', 1): (getitem, ('x', 2), (slice(0, 25, 2),)),
('y', 2): (getitem, ('x', 3), (slice(1, 25, 2),))}
result, chunks = slice_array('y', 'x', [[25] * 4], [slice(26, None, 2)])
assert expected == result
#x[24::2]
expected = {('y', 0): (getitem, ('x', 0), (slice(24, 25, 2),)),
('y', 1): (getitem, ('x', 1), (slice(1, 25, 2),)),
('y', 2): (getitem, ('x', 2), (slice(0, 25, 2),)),
('y', 3): (getitem, ('x', 3), (slice(1, 25, 2),))}
result, chunks = slice_array('y', 'x', [(25, ) * 4], (slice(24, None, 2), ))
assert expected == result
#x[26::2]
expected = {('y', 0): (getitem, ('x', 1), (slice(1, 25, 2),)),
('y', 1): (getitem, ('x', 2), (slice(0, 25, 2),)),
('y', 2): (getitem, ('x', 3), (slice(1, 25, 2),))}
result, chunks = slice_array('y', 'x', [(25, ) * 4], (slice(26, None, 2), ))
assert expected == result
def test_slice_array_2d():
#2d slices: x[13::2,10::1]
expected = {('y', 0, 0): (getitem, ('x', 0, 0),
(slice(13, 20, 2), slice(10, 20, 1))),
('y', 0, 1): (getitem, ('x', 0, 1),
(slice(13, 20, 2), slice(None, None, None))),
('y', 0, 2): (getitem, ('x', 0, 2),
(slice(13, 20, 2), slice(None, None, None)))}
result, chunks = slice_array('y', 'x', [[20], [20, 20, 5]],
[slice(13, None, 2), slice(10, None, 1)])
assert expected == result
#2d slices with one dimension: x[5,10::1]
expected = {('y', 0): (getitem, ('x', 0, 0),
(5, slice(10, 20, 1))),
('y', 1): (getitem, ('x', 0, 1),
(5, slice(None, None, None))),
('y', 2): (getitem, ('x', 0, 2),
(5, slice(None, None, None)))}
result, chunks = slice_array('y', 'x', ([20], [20, 20, 5]),
[5, slice(10, None, 1)])
assert expected == result
def test_slice_optimizations():
#bar[:]
expected = {('foo', 0): ('bar', 0)}
result, chunks = slice_array('foo', 'bar', [[100]], (slice(None, None, None),))
assert expected == result
#bar[:,:,:]
expected = {('foo', 0): ('bar', 0),
('foo', 1): ('bar', 1),
('foo', 2): ('bar', 2)}
result, chunks = slice_array('foo', 'bar', [(100, 1000, 10000)],
(slice(None, None, None),
slice(None, None, None),
slice(None, None, None)))
assert expected == result
def test_slicing_with_singleton_indices():
result, chunks = slice_array('y', 'x', ([5, 5], [5, 5]), (slice(0, 5), 8))
expected = {('y', 0): (getitem, ('x', 0, 1), (slice(None, None, None), 3))}
assert expected == result
def test_slicing_with_newaxis():
result, chunks = slice_array('y', 'x', ([5, 5], [5, 5]),
(slice(0, 3), None, slice(None, None, None)))
expected = {
('y', 0, 0, 0): (getitem, ('x', 0, 0),
(slice(0, 3, 1), None, slice(None, None, None))),
('y', 0, 0, 1): (getitem, ('x', 0, 1),
(slice(0, 3, 1), None, slice(None, None, None)))}
assert expected == result
assert chunks == ((3,), (1,), (5, 5))
def test_take():
chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], axis=0)
expected = {('y', 0): (getitem, (np.concatenate,
[(getitem, ('x', 0), ([1, 3, 5],)),
(getitem, ('x', 2), ([7],))], 0),
([2, 0, 3, 1], ))}
assert dsk == expected
assert chunks == ((4,),)
chunks, dsk = take('y', 'x', [(20, 20, 20, 20), (20, 20)], [5, 1, 47, 3], axis=0)
expected = {('y', 0, j): (getitem, (np.concatenate,
[(getitem, ('x', 0, j),
([1, 3, 5], slice(None, None, None))),
(getitem, ('x', 2, j),
([7], slice(None, None, None)))], 0),
([2, 0, 3, 1], slice(None, None, None)))
for j in range(2)}
assert dsk == expected
assert chunks == ((4,), (20, 20))
chunks, dsk = take('y', 'x', [(20, 20, 20, 20), (20, 20)], [5, 1, 37, 3], axis=1)
expected = {('y', i, 0): (getitem, (np.concatenate,
[(getitem, ('x', i, 0),
(slice(None, None, None), [1, 3, 5])),
(getitem, ('x', i, 1),
(slice(None, None, None), [17]))], 1),
(slice(None, None, None), [2, 0, 3, 1]))
for i in range(4)}
assert dsk == expected
assert chunks == ((20, 20, 20, 20), (4,))
def test_take_sorted():
chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
expected = {('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 1): (getitem, ('x', 2), ([7],))}
assert dsk == expected
assert chunks == ((3, 1),)
chunks, dsk = take('y', 'x', [(20, 20, 20, 20), (20, 20)], [1, 3, 5, 37], axis=1)
expected = merge(dict((('y', i, 0), (getitem, ('x', i, 0),
(slice(None, None, None), [1, 3, 5])))
for i in range(4)),
dict((('y', i, 1), (getitem, ('x', i, 1),
(slice(None, None, None), [17])))
for i in range(4)))
assert dsk == expected
assert chunks == ((20, 20, 20, 20), (3, 1))
def test_slice_lists():
y, chunks = slice_array('y', 'x', ((3, 3, 3, 1), (3, 3, 3, 1)),
([2, 1, 9], slice(None, None, None)))
exp = {('y', 0, i): (getitem, (np.concatenate,
[(getitem, ('x', 0, i),
([1, 2], slice(None, None, None))),
(getitem, ('x', 3, i),
([0], slice(None, None, None)))], 0),
([1, 0, 2], slice(None, None, None)))
for i in range(4)}
assert y == exp
assert chunks == ((3,), (3, 3, 3, 1))
def test_slicing_chunks():
result, chunks = slice_array('y', 'x', ([5, 5], [5, 5]),
(1, [2, 0, 3]))
assert chunks == ((3,), )
result, chunks = slice_array('y', 'x', ([5, 5], [5, 5]),
(slice(0, 7), [2, 0, 3]))
assert chunks == ((5, 2), (3, ))
result, chunks = slice_array('y', 'x', ([5, 5], [5, 5]),
(slice(0, 7), 1))
assert chunks == ((5, 2), )
def test_slicing_with_numpy_arrays():
a, bd1 = slice_array('y', 'x', ((3, 3, 3, 1), (3, 3, 3, 1)),
([1, 2, 9], slice(None, None, None)))
b, bd2 = slice_array('y', 'x', ((3, 3, 3, 1), (3, 3, 3, 1)),
(np.array([1, 2, 9]), slice(None, None, None)))
assert bd1 == bd2
assert a == b
i = [False, True, True, False, False,
False, False, False, False, True, False]
c, bd3 = slice_array('y', 'x', ((3, 3, 3, 1), (3, 3, 3, 1)),
(i, slice(None, None, None)))
assert bd1 == bd3
assert a == c
def test_slicing_and_chunks():
o = da.ones((24, 16), chunks=((4, 8, 8, 4), (2, 6, 6, 2)))
t = o[4:-4, 2:-2]
assert t.chunks == ((8, 8), (6, 6))
def test_slice_stop_0():
# from gh-125
a = da.ones(10, chunks=(10,))[:0].compute()
b = np.ones(10)[:0]
assert_eq(a, b)
def test_slice_list_then_None():
x = da.zeros(shape=(5, 5), chunks=(3, 3))
y = x[[2, 1]][None]
assert_eq(y, np.zeros((1, 2, 5)))
class ReturnItem(object):
def __getitem__(self, key):
return key
@skip
def test_slicing_exhaustively():
x = np.random.rand(6, 7, 8)
a = da.from_array(x, chunks=(3, 3, 3))
I = ReturnItem()
# independent indexing along different axes
indexers = [0, -2, I[:], I[:5], [0, 1], [0, 1, 2], [4, 2], I[::-1], None, I[:0], []]
for i in indexers:
assert_eq(x[i], a[i]), i
for j in indexers:
assert_eq(x[i][:, j], a[i][:, j]), (i, j)
assert_eq(x[:, i][j], a[:, i][j]), (i, j)
for k in indexers:
assert_eq(x[..., i][:, j][k], a[..., i][:, j][k]), (i, j, k)
# repeated indexing along the first axis
first_indexers = [I[:], I[:5], np.arange(5), [3, 1, 4, 5, 0], np.arange(6) < 6]
second_indexers = [0, -1, 3, I[:], I[:3], I[2:-1], [2, 4], [], I[:0]]
for i in first_indexers:
for j in second_indexers:
assert_eq(x[i][j], a[i][j]), (i, j)
def test_slicing_with_negative_step_flops_keys():
x = da.arange(10, chunks=5)
y = x[:1:-1]
assert (x.name, 1) in y.dask[(y.name, 0)]
assert (x.name, 0) in y.dask[(y.name, 1)]
assert_eq(y, np.arange(10)[:1:-1])
assert y.chunks == ((5, 3),)
assert y.dask[(y.name, 0)] == (getitem, (x.name, 1),
(slice(-1, -6, -1),))
assert y.dask[(y.name, 1)] == (getitem, (x.name, 0),
(slice(-1, -4, -1),))
def test_empty_slice():
x = da.ones((5, 5), chunks=(2, 2), dtype='i4')
y = x[:0]
assert_eq(y, np.ones((5, 5), dtype='i4')[:0])
def test_multiple_list_slicing():
x = np.random.rand(6, 7, 8)
a = da.from_array(x, chunks=(3, 3, 3))
assert_eq(x[:, [0, 1, 2]][[0, 1]], a[:, [0, 1, 2]][[0, 1]])
def test_empty_list():
x = np.ones((5, 5, 5), dtype='i4')
dx = da.from_array(x, chunks=2)
assert_eq(dx[[], :3, :2], x[[], :3, :2])
assert_eq(dx[:3, [], :2], x[:3, [], :2])
assert_eq(dx[:3, :2, []], x[:3, :2, []])
def test_uneven_chunks():
assert da.ones(20, chunks=5)[::2].chunks == ((3, 2, 3, 2),)
def test_new_blockdim():
assert new_blockdim(20, [5, 5, 5, 5], slice(0, None, 2)) == [3, 2, 3, 2]
def test_slicing_consistent_names():
x = np.arange(100).reshape((10, 10))
a = da.from_array(x, chunks=(5, 5))
assert same_keys(a[0], a[0])
assert same_keys(a[:, [1, 2, 3]], a[:, [1, 2, 3]])
assert same_keys(a[:, 5:2:-1], a[:, 5:2:-1])
def test_sanitize_index():
pd = pytest.importorskip('pandas')
with pytest.raises(TypeError):
sanitize_index('Hello!')
assert sanitize_index(pd.Series([1, 2, 3])) == [1, 2, 3]
assert sanitize_index((1, 2, 3)) == [1, 2, 3]
def test_uneven_blockdims():
blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30), (100,))
index = (slice(240, 270), slice(None))
dsk_out, bd_out = slice_array('in', 'out', blockdims, index)
sol = {('in', 0, 0): (getitem, ('out', 7, 0), (slice(28, 31, 1), slice(None))),
('in', 1, 0): (getitem, ('out', 8, 0), (slice(0, 27, 1), slice(None)))}
assert dsk_out == sol
assert bd_out == ((3, 27), (100,))
blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30),) * 2
index = (slice(240, 270), slice(180, 230))
dsk_out, bd_out = slice_array('in', 'out', blockdims, index)
sol = {('in', 0, 0): (getitem, ('out', 7, 5), (slice(28, 31, 1), slice(29, 30, 1))),
('in', 0, 1): (getitem, ('out', 7, 6), (slice(28, 31, 1), slice(None))),
('in', 0, 2): (getitem, ('out', 7, 7), (slice(28, 31, 1), slice(0, 18, 1))),
('in', 1, 0): (getitem, ('out', 8, 5), (slice(0, 27, 1), slice(29, 30, 1))),
('in', 1, 1): (getitem, ('out', 8, 6), (slice(0, 27, 1), slice(None))),
('in', 1, 2): (getitem, ('out', 8, 7), (slice(0, 27, 1), slice(0, 18, 1)))}
assert dsk_out == sol
assert bd_out == ((3, 27), (1, 31, 18))
def test_oob_check():
x = da.ones(5, chunks=(2,))
with pytest.raises(IndexError):
x[6]
with pytest.raises(IndexError):
x[[6]]
with pytest.raises(IndexError):
x[0, 0]
def test_index_with_dask_array_errors():
x = da.ones((5, 5), chunks=2)
with pytest.raises(NotImplementedError):
x[0, x > 10]
@pytest.mark.xfail
def test_cull():
x = da.ones(1000, chunks=(10,))
for slc in [1, slice(0, 30), slice(0, None, 100)]:
y = x[slc]
assert len(y.dask) < len(x.dask)
@pytest.mark.parametrize('shape', [(2,), (2, 3), (2, 3, 5)])
@pytest.mark.parametrize('slice', [(Ellipsis,),
(None, Ellipsis),
(Ellipsis, None),
(None, Ellipsis, None)])
def test_slicing_with_Nones(shape, slice):
x = np.random.random(shape)
d = da.from_array(x, chunks=shape)
assert_eq(x[slice], d[slice])
indexers = [Ellipsis, slice(2), 0, 1, -2, -1, slice(-2, None), None]
"""
@pytest.mark.parametrize('a', indexers)
@pytest.mark.parametrize('b', indexers)
@pytest.mark.parametrize('c', indexers)
@pytest.mark.parametrize('d', indexers)
def test_slicing_none_int_ellipses(a, b, c, d):
if (a, b, c, d).count(Ellipsis) > 1:
return
shape = (2,3,5,7,11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
xx = x[a, b, c, d]
yy = y[a, b, c, d]
assert_eq(xx, yy)
"""
@pytest.mark.slow
def test_slicing_none_int_ellipes():
shape = (2,3,5,7,11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
for ind in itertools.product(indexers, indexers, indexers, indexers):
if ind.count(Ellipsis) > 1:
continue
assert_eq(x[ind], y[ind])
def test_None_overlap_int():
a, b, c, d = (0, slice(None, 2, None), None, Ellipsis)
shape = (2,3,5,7,11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
xx = x[a, b, c, d]
yy = y[a, b, c, d]
assert_eq(xx, yy)
def test_negative_n_slicing():
assert_eq(da.ones(2, chunks=2)[-2], np.ones(2)[-2])
def test_negative_list_slicing():
x = np.arange(5)
dx = da.from_array(x, chunks=2)
assert_eq(dx[[0, -5]], x[[0, -5]])
assert_eq(dx[[4, -1]], x[[4, -1]])
|
|
import sqlite3
from math import sqrt
import sys
class DGFAnalyzer:
DebugLevel = 0
def __init__(self,Filename):
self.Filename=Filename
self.FunctionNames={}
self.Address2Function={}
conn=sqlite3.connect(self.Filename)
conn.text_factory = str
self.Cursor=conn.cursor()
def CacheFunctionInfo(self):
TmpCursor=conn.cursor()
TmpCursor.execute("SELECT * FROM FileInfo")
for row in TmpCursor:
FileID=int(row[0])
self.FunctionNames[FileID]={}
self.Address2Function[FileID]={}
self.Cursor.execute("SELECT FunctionAddress,StartAddress,Name FROM OneLocationInfo WHERE FileID="+str(FileID))
for row in self.Cursor:
function_address=int(row[0])
block_address=int(row[1])
name=row[2]
if function_address!=0:
self.Address2Function[FileID][block_address]=function_address
if function_address==block_address:
self.FunctionNames[FileID][function_address]=name
def GetNames(self,FileID):
if FileID==1:
FileIDColumnName="TheSourceFileID"
AddressColumnName="TheSourceAddress"
else:
FileIDColumnName="TheTargetFileID"
AddressColumnName="TheTargetAddress"
return (FileIDColumnName,AddressColumnName)
def RetrieveFunctionBasicBlockMap(self,FileID):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
FunctionHash={}
self.Cursor.execute( "SELECT FunctionAddress,StartAddress FROM OneLocationInfo WHERE FileID='" + str(FileID) + "' AND FunctionAddress!=0 ORDER BY FunctionAddress" );
for row in self.Cursor:
FunctionAddress = row[0]
StartAddress = row[1]
if not FunctionHash.has_key(FunctionAddress):
FunctionHash[FunctionAddress] = []
FunctionHash[FunctionAddress].append( StartAddress )
if self.DebugLevel > 2:
print len(FunctionHash.keys())
print len(FunctionHash)
FunctionMemberCounts=[]
for FunctionAddress in FunctionHash.keys():
FunctionMemberCounts.append( len(FunctionHash[FunctionAddress]) )
FunctionMemberCounts.sort()
return FunctionMemberCounts
def RetrievePatternAddresses(self,FileID,Pattern):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
FunctionHash={}
self.Cursor.execute( "SELECT FunctionAddress,StartAddress FROM OneLocationInfo WHERE FileID='" + str(FileID) + "' AND DisasmLines like '" + Pattern + "' ORDER BY FunctionAddress" );
Results=[]
for row in self.Cursor:
Results.append( (row[0],row[1] ) )
return Results
def RetrieveDisasmLines(self,FileID,Address):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
FunctionHash={}
self.Cursor.execute( "SELECT DisasmLines FROM OneLocationInfo WHERE FileID='" + str(FileID) + "' AND StartAddress='" + str(Address) + "'" );
Results=[]
for row in self.Cursor:
return row[0]
return ""
def GetDisasmLinesLength(self,FileID):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
FunctionHash={}
self.Cursor.execute( "SELECT DisasmLines FROM OneLocationInfo WHERE FileID='" + str(FileID) + "'");
TotalLength=0
for row in self.Cursor:
TotalLength += len(row[0])
return TotalLength
def RetrieveFingerprint(self,FileID):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
FunctionHash={}
self.Cursor.execute( "SELECT Fingerprint FROM OneLocationInfo WHERE FileID='" + str(FileID) + "'");
TotalLength=0
for row in self.Cursor:
TotalLength += len(row[0])
return TotalLength
def GetMaximumFingerPrintLength(self,FileID):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
FunctionHash={}
self.Cursor.execute( "SELECT StartAddress, Fingerprint FROM OneLocationInfo WHERE FileID='" + str(FileID) + "'");
MaximumLength=0
for row in self.Cursor:
start_address = row[0]
finger_print = row[1]
if MaximumLength < len( finger_print ):
MaximumLength = len( finger_print )
MaximuLengthAddress = start_address
return (MaximuLengthAddress, MaximumLength)
def GetIdentifiedBlocksCount(self,FileID):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
self.Cursor.execute("SELECT COUNT(*) FROM OneLocationInfo WHERE FunctionAddress!=0 AND Fingerprint!='' AND FileID="+str(FileID)+" AND StartAddress IN (SELECT "+AddressColumnName+" FROM MatchMap)");
for row in self.Cursor:
return row[0]
def GetIdentifiedFunctionsCount(self,FileID):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
self.Cursor.execute("SELECT COUNT(*) FROM OneLocationInfo WHERE FunctionAddress!=0 AND Fingerprint!='' AND BlockType=1 AND FileID="+str(FileID)+" AND StartAddress IN (SELECT "+AddressColumnName+" FROM MatchMap)");
for row in self.Cursor:
return row[0]
def GetMapInfoCount(self,FileID):
try:
self.Cursor.execute("SELECT COUNT(*) FROM MapInfo WHERE FileID="+str(FileID))
for row in self.Cursor:
return row[0]
except:
return 0
def GetMatchMapCount(self):
try:
self.Cursor.execute("SELECT COUNT(*) FROM MatchMap");
for row in self.Cursor:
return row[0]
except:
return 0
def GetMatchedFunctionList(self, Options={"matched":1} , Offset = None, Limit = None, RetrieveCount = False ):
if RetrieveCount:
Columns = "COUNT(*)"
else:
Columns = "TheSourceFileID, TheTargetFileID, TheSourceAddress, EndAddress, TheTargetAddress, BlockType, MatchRate, TheSourceFunctionName, Type, TheTargetFunctionName, MatchCountForTheSource, NoneMatchCountForTheSource, MatchCountWithModificationForTheSource, MatchCountForTheTarget, NoneMatchCountForTheTarget, MatchCountWithModificationForTheTarget"
Query = "SELECT " + Columns + " FROM FunctionMatchInfo"
IncludeUnidentifiedBlock = True
Conditions = []
include_matched = False
if ( Options.has_key( "matched" ) and Options[ "matched" ] == 1 ):
include_matched = True
include_modified = False
if ( Options.has_key( "modified" ) and Options[ "modified" ] == 1 ):
include_modified = True
if include_matched and include_modified:
pass
elif include_matched:
Conditions.append( 'MatchRate == 100 ' )
elif include_modified:
Conditions.append( 'MatchRate != 100 ' )
if not ( Options.has_key( "unidentified" ) and Options[ "unidentified" ] == 1 ):
Conditions.append( 'TheSourceAddress != 0 ' )
Conditions.append( 'TheTargetAddress != 0 ' )
ConditionStr = ''
for Condition in Conditions:
if ConditionStr == '':
ConditionStr += Condition
else:
ConditionStr += ' AND ' + Condition
if ConditionStr != '':
Query += ' WHERE ' + ConditionStr
if Limit:
Query += ' LIMIT ' + str( Limit )
if Offset:
Query += ' OFFSET ' + str( Offset )
if self.DebugLevel > 2:
print Query
Results=[]
try:
self.Cursor.execute( Query );
if RetrieveCount:
for ( Count ) in self.Cursor:
return Count
else:
for ( TheSourceFileID, TheTargetFileID, TheSourceAddress, EndAddress, TheTargetAddress, BlockType, MatchRate, TheSourceFunctionName, Type, TheTargetFunctionName, MatchCountForTheSource, NoneMatchCountForTheSource, MatchCountWithModificationForTheSource, MatchCountForTheTarget, NoneMatchCountForTheTarget, MatchCountWithModificationForTheTarget ) in self.Cursor:
result = {}
result["TheSourceFunctionName"] = TheSourceFunctionName
result["TheSourceAddress"] = TheSourceAddress
result["TheTargetFunctionName"] = TheTargetFunctionName
result["TheTargetAddress"] = TheTargetAddress
result["MatchRate"] = MatchRate
result["MatchCountForTheSource"] = MatchCountForTheSource
result["MatchCountWithModificationForTheSource"] = MatchCountWithModificationForTheSource
result["NoneMatchCountForTheSource"] = NoneMatchCountForTheSource
result["MatchCountForTheTarget"] = MatchCountForTheTarget
result["MatchCountWithModificationForTheTarget"] = MatchCountWithModificationForTheTarget
result["NoneMatchCountForTheTarget"] = NoneMatchCountForTheTarget
Results.append( result )
except:
pass
return Results
def GetFunctionMemberMatchList(self,Options=("matched")):
#modified,unidentified
pass
def GetFunctionDisasmLinesHash( self, FileID, FunctionAddress ):
#BOOL OneIDAClientManager::RetrieveOneLocationInfo( DWORD FunctionAddress )
self.Cursor.execute( "SELECT StartAddress, DisasmLines, Name FROM OneLocationInfo WHERE FileID = '%u' AND FunctionAddress = '%d'" % ( FileID, FunctionAddress ) )
DisasmLinesHash={}
for row in self.Cursor:
DisasmLinesHash[row[0]] = row[1]
return DisasmLinesHash
def DumpDisasmLineHash( self, DisasmLineHash ):
addresses = DisasmLineHash.keys()
addresses.sort()
for address in addresses:
print hex(address)
print DisasmLineHash[address]
print ""
def GetMatchMapForFunction( self, FileID, FunctionAddress ):
self.Cursor.execute( "SELECT TheSourceAddress, TheTargetAddress FROM MatchMap WHERE TheSourceAddress IN (SELECT StartAddress Name FROM OneLocationInfo WHERE FileID = '%u' AND FunctionAddress = '%d')" % ( FileID, FunctionAddress ) )
MatchHash = {}
for row in self.Cursor:
MatchHash[row[0]] = row[1]
return MatchHash
def GetMatchedFunctionMemberList( self, FunctionAddresses ):
DisasmLineHash=[]
DisasmLineHash.append( self.GetFunctionDisasmLines( "Source", FunctionAddresses[0] ) )
DisasmLineHash.append( self.GetFunctionDisasmLines( "Target", FunctionAddresses[1] ) )
#self.DumpDisasmLineHash( DisasmLineHash[0] )
#self.DumpDisasmLineHash( DisasmLineHash[1] )
MatchMap = self.GetMatchMapForFunction( 1, FunctionAddresses[0] )
#for Src in MatchMap.keys():
# print hex(Src), hex(MatchMap[Src])
return ( DisasmLineHash, MatchMap )
#BOOL OneIDAClientManager::Retrieve(DBWrapper *InputDB, int FileID, BOOL bRetrieveDataForAnalysis, DWORD FunctionAddress )
#self.Cursor.execute( "SELECT Type, SrcBlock, Dst From MapInfo WHERE FileID = %u AND SrcBlock IN (SELECT StartAddress FROM OneLocationInfo WHERE FileID = '%d' AND FunctionAddress='%d') AND Dst IN (SELECT StartAddress FROM OneLocationInfo WHERE FileID = '%d' AND FunctionAddress='%d') ORDER BY ID ASC" % ( FileID, FileID, FunctionAddress, FileID, FunctionAddress ) )
#for row in self.Cursor:
# print row
def GetUnidentifiedFunctionsCount(self,FileID):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
self.Cursor.execute("SELECT COUNT(*) FROM OneLocationInfo WHERE FunctionAddress!=0 AND Fingerprint!='' AND BlockType=1 AND FileID="+str(FileID)+" AND StartAddress NOT IN (SELECT "+AddressColumnName+" FROM MatchMap)");
for row in self.Cursor:
return row[0]
def GetFullMatchedBlocksCount(self):
try:
self.Cursor.execute("SELECT COUNT(*) FROM MatchMap WHERE MatchRate='100'");
for row in self.Cursor:
return row[0]
except:
return 0
def GetUnidentifiedFunctions(self,FileID):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
self.Cursor.execute("SELECT Name FROM OneLocationInfo WHERE FunctionAddress!=0 AND Fingerprint!='' AND BlockType=1 AND FileID="+str(FileID)+" AND StartAddress NOT IN (SELECT "+AddressColumnName+" FROM MatchMap)");
return self.Cursor
def GetUnidentifiedBlocksCount(self,FileID):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
try:
self.Cursor.execute("SELECT COUNT(*) FROM OneLocationInfo WHERE FunctionAddress!=0 AND Fingerprint!='' AND FileID="+str(FileID)+" AND StartAddress NOT IN (SELECT "+AddressColumnName+" FROM MatchMap)");
for row in self.Cursor:
return row[0]
except:
return 0
def GetOneLocationInfoCount(self,FileID):
self.Cursor.execute("SELECT COUNT(*) FROM OneLocationInfo WHERE FunctionAddress!=0 AND Fingerprint!='' AND FileID="+str(FileID))
for row in self.Cursor:
return row[0]
def GetCount(self,FileID):
(FileIDColumnName,AddressColumnName)=self.GetNames(FileID)
query="SELECT FunctionAddress,StartAddress FROM OneLocationInfo WHERE FunctionAddress!=0 AND Fingerprint!='' AND FileID="+str(FileID)+" AND StartAddress NOT IN (SELECT "+AddressColumnName+" FROM MatchMap) ORDER BY FunctionAddress"
if self.DebugLevel > 2:
print query
self.Cursor.execute(query);
for row in self.Cursor:
function_address=int(row[0])
block_address=int(row[1])
function_name="("+hex(function_address)+")"
if self.FunctionNames[FileID].has_key(function_address):
function_name=self.FunctionNames[FileID][function_address]
if self.DebugLevel > 2:
print function_name+"."+hex(block_address)
def GetTypeStr(self,Type):
Types=("Name","Fingerprint","Two Level fingerprint","Tree","Fingerprint Inside Function","Function")
if len(Types)<=Type:
return "Unknown"
return Types[Type]
"""
c.execute("SELECT COUNT(DISTINCT(TheSourceAddress)) FROM MatchMap WHERE MatchRate!=100")
for row in c:
print 'Modified Blocks',row,sqrt(row[0])
Matches={}
IdentifiedFunctions={}
c.execute("SELECT TheSourceFileID,TheTargetFileID,TheSourceAddress,TheTargetAddress,Type,MatchRate,UnpatchedParentAddress,PatchedParentAddress FROM MatchMap")
for row in c:
TheSourceFileID=int(row[0])
TheTargetFileID=int(row[1])
TheSourceAddress=int(row[2])
TheTargetAddress=int(row[3])
TheSourceFunctionAddress=0
if self.Address2Function[TheSourceFileID].has_key(TheSourceAddress):
TheSourceFunctionAddress=self.Address2Function[TheSourceFileID][TheSourceAddress]
TheTargetFunctionAddress=0
if self.Address2Function[TheTargetFileID].has_key(TheTargetAddress):
TheTargetFunctionAddress=self.Address2Function[TheTargetFileID][TheTargetAddress]
if not IdentifiedFunctions.has_key(TheSourceFileID):
IdentifiedFunctions[TheSourceFileID]={}
if not IdentifiedFunctions[TheSourceFileID].has_key(TheSourceFunctionAddress):
IdentifiedFunctions[TheSourceFileID][TheSourceFunctionAddress]=[]
if not IdentifiedFunctions.has_key(TheTargetFileID):
IdentifiedFunctions[TheTargetFileID]={}
if not IdentifiedFunctions[TheTargetFileID].has_key(TheTargetFunctionAddress):
IdentifiedFunctions[TheTargetFileID][TheTargetFunctionAddress]=[]
if not Matches.has_key(TheSourceFunctionAddress):
Matches[TheSourceFunctionAddress]=[]
Matches[TheSourceFunctionAddress].append((TheSourceAddress,TheTargetFunctionAddress,TheTargetAddress,row[4],row[5],row[6],row[7]))
TheSourceFileID=1
TheTargetFileID=2
DoPrintAll=False
for TheSourceFunctionAddress in Matches.keys():
TheSourceFunctionName=''
TheSourceFunctionName=hex(TheSourceFunctionAddress)
if self.FunctionNames[TheSourceFileID].has_key(TheSourceFunctionAddress):
TheSourceFunctionName=self.FunctionNames[TheSourceFileID][TheSourceFunctionAddress]
TheLastTargetFunctionAddress=0
for (TheSourceAddress,TheTargetFunctionAddress,TheTargetAddress,Type,MatchRate,TheParentSourceAddress,TheParentTargetAddress) in Matches[TheSourceFunctionAddress]:
if TheLastTargetFunctionAddress!=TheTargetFunctionAddress:
TheTargetFunctionName=hex(TheTargetFunctionAddress)
if self.FunctionNames[TheTargetFileID].has_key(TheTargetFunctionAddress):
TheTargetFunctionName=self.FunctionNames[TheTargetFileID][TheTargetFunctionAddress]
print TheSourceFunctionName,TheTargetFunctionName
TheLastTargetFunctionAddress=TheTargetFunctionAddress
if MatchRate!=100 or DoPrintAll:
print '\t',hex(TheSourceAddress),hex(TheTargetAddress),GetTypeStr(Type),MatchRate,hex(TheParentSourceAddress),hex(TheParentTargetAddress)
IdentifiedFunctions[TheTargetFileID][TheTargetFunctionAddress]=[]
for FileID in self.FunctionNames.keys():
for FunctionAddress in self.FunctionNames[FileID].keys():
if IdentifiedFunctions.has_key(FileID) and not IdentifiedFunctions[FileID].has_key(FunctionAddress):
print 'Unidentified Function',FileID,self.FunctionNames[FileID][FunctionAddress],hex(FunctionAddress)
"""
|
|
# -*- coding: UTF-8 -*-
"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The PropertList (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, pathOrFile)
function. 'rootObject' is the top level object, 'pathOrFile' is a
filename or a (writable) file object.
To parse a plist from a file, use the readPlist(pathOrFile) function,
with a file name or a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in strings, you can use readPlistFromString()
and writePlistToString().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries, Data or datetime.datetime objects. String values (including
dictionary keys) may be unicode strings -- they will be written out as
UTF-8.
The <data> plist type is supported through the Data class. This is a
thin wrapper around a Python string.
Generate Plist example:
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = Data("<binary gunk>"),
someMoreData = Data("<lots of binary gunk>" * 10),
aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
# unicode keys are possible, but a little awkward to use:
pl[u'\xc5benraa'] = "That was a unicode key."
writePlist(pl, fileName)
Parse Plist example:
pl = readPlist(pathOrFile)
print pl["aKey"]
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromString", "writePlistToString",
"readPlistFromResource", "writePlistToResource",
"Plist", "Data", "Dict"
]
# Note: the Plist and Dict classes have been deprecated.
import binascii
import datetime
from cStringIO import StringIO
import re
def readPlist(pathOrFile):
"""Read a .plist file. 'pathOrFile' may either be a file name or a
(readable) file object. Return the unpacked root object (which
usually is a dictionary).
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile)
didOpen = 1
p = PlistParser()
rootObject = p.parse(pathOrFile)
if didOpen:
pathOrFile.close()
return rootObject
def writePlist(rootObject, pathOrFile):
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile, "w")
didOpen = 1
writer = PlistWriter(pathOrFile)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
if didOpen:
pathOrFile.close()
def readPlistFromString(data):
"""Read a plist data from a string. Return the root object.
"""
return readPlist(StringIO(data))
def writePlistToString(rootObject):
"""Return 'rootObject' as a plist-formatted string.
"""
f = StringIO()
writePlist(rootObject, f)
return f.getvalue()
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData)
def writePlistToResource(rootObject, path, restype='plst', resid=0):
"""Write 'rootObject' as a plst resource to the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdWrPerm
from Carbon import Res
plistData = writePlistToString(rootObject)
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdWrPerm)
Res.UseResFile(resNum)
try:
Res.Get1Resource(restype, resid).RemoveResource()
except Res.Error:
pass
res = Res.Resource(plistData)
res.AddResource(restype, resid, '')
res.WriteResource()
Res.CloseResFile(resNum)
class DumbXMLWriter:
def __init__(self, file, indentLevel=0, indent="\t"):
self.file = file
self.stack = []
self.indentLevel = indentLevel
self.indent = indent
def beginElement(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self.indentLevel += 1
def endElement(self, element):
assert self.indentLevel > 0
assert self.stack.pop() == element
self.indentLevel -= 1
self.writeln("</%s>" % element)
def simpleElement(self, element, value=None):
if value is not None:
value = _escapeAndEncode(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
self.file.write(self.indentLevel * self.indent + line + "\n")
else:
self.file.write("\n")
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with
# a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z")
def _dateFromString(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _dateToString(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escapeAndEncode(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text.encode("utf-8") # encode as UTF-8
class PlistWriter(DumbXMLWriter):
XMLHEADER = '<?xml version="1.0" encoding="UTF-8"?>\n' # Standard header for XML/XSL documents
PLISTHEADER = XMLHEADER + """\
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
def __init__(self, file, indentLevel=0, indent="\t", writeHeader=1):
if writeHeader:
file.write(self.PLISTHEADER)
DumbXMLWriter.__init__(self, file, indentLevel, indent)
def writeValue(self, value):
if isinstance(value, (str, unicode)):
self.simpleElement("string", value)
elif isinstance(value, bool):
# must switch for bool before int, as bool is a
# subclass of int...
if value:
self.simpleElement("true")
else:
self.simpleElement("false")
elif isinstance(value, int):
self.simpleElement("integer", str(value))
elif isinstance(value, float):
self.simpleElement("real", repr(value))
elif isinstance(value, dict):
self.writeDict(value)
elif isinstance(value, Data):
self.writeData(value)
elif isinstance(value, datetime.datetime):
self.simpleElement("date", _dateToString(value))
elif isinstance(value, (tuple, list)):
self.writeArray(value)
else:
raise TypeError("unsuported type: %s" % type(value))
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
maxlinelength = 76 - len(self.indent.replace("\t", " " * 8) *
self.indentLevel)
for line in data.asBase64(maxlinelength).split("\n"):
if line:
self.writeln(line)
self.indentLevel += 1
self.endElement("data")
def writeDict(self, d):
self.beginElement("dict")
items = d.items()
items.sort()
for key, value in items:
if not isinstance(key, (str, unicode)):
raise TypeError("keys must be strings")
self.simpleElement("key", key)
self.writeValue(value)
self.endElement("dict")
def writeArray(self, array):
self.beginElement("array")
for value in array:
self.writeValue(value)
self.endElement("array")
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
return value
def __setattr__(self, attr, value):
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError, attr
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
class Dict(_InternalDict):
def __init__(self, **kwargs):
from warnings import warn
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
PendingDeprecationWarning)
super(Dict, self).__init__(**kwargs)
class Plist(_InternalDict):
"""This class has been deprecated. Use readPlist() and writePlist()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
from warnings import warn
warn("The Plist class is deprecated, use the readPlist() and "
"writePlist() functions instead", PendingDeprecationWarning)
super(Plist, self).__init__(**kwargs)
def fromFile(cls, pathOrFile):
"""Deprecated. Use the readPlist() function instead."""
rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
fromFile = classmethod(fromFile)
def write(self, pathOrFile):
"""Deprecated. Use the writePlist() function instead."""
writePlist(self, pathOrFile)
def _encodeBase64(s, maxlinelength=76):
# copied from base64.encodestring(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
class Data:
"""Wrapper for binary data."""
def __init__(self, data):
self.data = data
def fromBase64(cls, data):
# base64.decodestring just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(binascii.a2b_base64(data))
fromBase64 = classmethod(fromBase64)
def asBase64(self, maxlinelength=76):
return _encodeBase64(self.data, maxlinelength)
def __cmp__(self, other):
if isinstance(other, self.__class__):
return cmp(self.data, other.data)
elif isinstance(other, str):
return cmp(self.data, other)
else:
return cmp(id(self), id(other))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
class PlistParser:
def __init__(self):
self.stack = []
self.currentKey = None
self.root = None
def parse(self, fileobj):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self.handleBeginElement
parser.EndElementHandler = self.handleEndElement
parser.CharacterDataHandler = self.handleData
parser.ParseFile(fileobj)
return self.root
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handleEndElement(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handleData(self, data):
self.data.append(data)
def addObject(self, value):
if self.currentKey is not None:
self.stack[-1][self.currentKey] = value
self.currentKey = None
elif not self.stack:
# this is the root object
self.root = value
else:
self.stack[-1].append(value)
def getData(self):
data = "".join(self.data)
try:
data = data.encode("ascii")
except UnicodeError:
pass
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = _InternalDict()
self.addObject(d)
self.stack.append(d)
def end_dict(self):
self.stack.pop()
def end_key(self):
self.currentKey = self.getData()
def begin_array(self, attrs):
a = []
self.addObject(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.addObject(True)
def end_false(self):
self.addObject(False)
def end_integer(self):
self.addObject(int(self.getData()))
def end_real(self):
self.addObject(float(self.getData()))
def end_string(self):
self.addObject(self.getData())
def end_data(self):
self.addObject(Data.fromBase64(self.getData()))
def end_date(self):
self.addObject(_dateFromString(self.getData()))
|
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from starthinker.util.bigquery import query_to_view
from starthinker.util.bigquery import table_create
from starthinker.util.data import get_rows
from starthinker.util.data import put_rows
from starthinker.util.google_api import API_DV360
from starthinker.util.discovery_to_bigquery import Discovery_To_BigQuery
from starthinker.util.regexp import lookup_id
from starthinker.util.sheets import sheets_clear
from starthinker.task.dv_editor.patch import patch_log
from starthinker.task.dv_editor.patch import patch_masks
from starthinker.task.dv_editor.patch import patch_preview
def dv_line_item_clear(config, task):
table_create(
config,
task["auth_bigquery"],
config.project,
task["dataset"],
"DV_LineItems",
Discovery_To_BigQuery(
"displayvideo",
"v1"
).method_schema(
"advertisers.lineItems.list"
)
)
sheets_clear(
config,
task["auth_sheets"],
task["sheet"],
"Line Items",
"A2:AI"
)
def dv_line_item_load(config, task):
# load multiple advertisers from user defined sheet
def dv_line_item_load_multiple():
campaigns = set([lookup_id(row[0]) for row in get_rows(
config,
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "DV Campaigns",
"header":False,
"range": "A2:A"
}}
)])
rows = get_rows(
config,
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "DV Advertisers",
"header":False,
"range": "A2:A"
}}
)
# String for filtering which entityStatus enums we want to see in the sheet
for row in rows:
for record in API_DV360(
config,
task["auth_dv"],
iterate=True
).advertisers().lineItems().list(
advertiserId=lookup_id(row[0]),
filter='entityStatus="ENTITY_STATUS_PAUSED" OR entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_DRAFT"'
).execute():
if not campaigns or record['campaignId'] in campaigns:
yield record
# write line_items to database
put_rows(
config,
task["auth_bigquery"],
{
"bigquery": {
"dataset": task["dataset"],
"table": "DV_LineItems",
"schema": Discovery_To_BigQuery(
"displayvideo",
"v1"
).method_schema(
"advertisers.lineItems.list"
),
"format": "JSON"
}},
dv_line_item_load_multiple()
)
# write line_items to sheet
#put_rows(
# config,
# task["auth_sheets"],
# { "sheets": {
# "sheet": task["sheet"],
# "tab": "DV Line Items",
# "header":False,
# "range": "A2"
# }},
# get_rows(
# config,
# task["auth_bigquery"],
# { "bigquery": {
# "dataset": task["dataset"],
# "query": """SELECT
# CONCAT(P.displayName, ' - ', P.partnerId),
# CONCAT(A.displayName, ' - ', A.advertiserId),
# CONCAT(C.displayName, ' - ', C.campaignId),
# CONCAT(I.displayName, ' - ', I.insertionOrderId),
# CONCAT(L.displayName, ' - ', L.lineItemId),
# 'PATCH',
# L.entityStatus,
# L.entityStatus,
# ARRAY_TO_STRING(L.warningMessages, '\\n'),
# L.lineItemType,
# L.lineItemType,
#
# L.flight.flightDateType,
# L.flight.flightDateType,
# CONCAT(L.flight.dateRange.startDate.year, '-', L.flight.dateRange.startDate.month, '-', L.flight.dateRange.startDate.day),
# CONCAT(L.flight.dateRange.startDate.year, '-', L.flight.dateRange.startDate.month, '-', L.flight.dateRange.startDate.day),
# CONCAT(L.flight.dateRange.endDate.year, '-', L.flight.dateRange.endDate.month, '-', L.flight.dateRange.endDate.day),
# CONCAT(L.flight.dateRange.endDate.year, '-', L.flight.dateRange.endDate.month, '-', L.flight.dateRange.endDate.day),
# L.flight.triggerId,
# L.flight.triggerId,
#
# L.budget.budgetAllocationType,
# L.budget.budgetAllocationType,
# L.budget.budgetUnit,
# L.budget.budgetUnit,
# L.budget.maxAmount / 1000000,
# L.budget.maxAmount / 1000000,
#
# L.partnerRevenueModel.markupType,
# L.partnerRevenueModel.markupType,
# CAST(L.partnerRevenueModel.markupAmount AS FLOAT64) / IF(L.partnerRevenueModel.markupType='PARTNER_REVENUE_MODEL_MARKUP_TYPE_CPM', 1000000, 1000),
# CAST(L.partnerRevenueModel.markupAmount AS FLOAT64) / IF(L.partnerRevenueModel.markupType='PARTNER_REVENUE_MODEL_MARKUP_TYPE_CPM', 1000000, 1000),
#
# CAST(L.conversionCounting.postViewCountPercentageMillis AS Float64) / 1000,
# CAST(L.conversionCounting.postViewCountPercentageMillis AS Float64) / 1000,
#
# L.targetingExpansion.targetingExpansionLevel,
# L.targetingExpansion.targetingExpansionLevel,
# L.targetingExpansion.excludeFirstPartyAudience,
# L.targetingExpansion.excludeFirstPartyAudience,
#
# FROM `{dataset}.DV_LineItems` AS L
# LEFT JOIN `{dataset}.DV_Advertisers` AS A
# ON L.advertiserId=A.advertiserId
# LEFT JOIN `{dataset}.DV_Campaigns` AS C
# ON L.campaignId=C.campaignId
# LEFT JOIN `{dataset}.DV_InsertionOrders` AS I
# ON L.insertionOrderId=I.insertionOrderId
# LEFT JOIN `{dataset}.DV_Partners` AS P
# ON A.partnerId=P.partnerId
# ORDER BY I.displayName, L.displayName
# """.format(**task),
# "legacy": False
# }}
# )
# )
def dv_line_item_audit(config, task):
put_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table": "SHEET_LineItems",
"schema": [
{ "name": "Partner", "type": "STRING" },
{ "name": "Advertiser", "type": "STRING" },
{ "name": "Campaign", "type": "STRING" },
{ "name": "Insertion_Order", "type": "STRING" },
{ "name": "Line_Item", "type": "STRING" },
{ "name": "Action", "type": "STRING" },
{ "name": "Status", "type": "STRING" },
{ "name": "Status_Edit", "type": "STRING" },
{ "name": "Warning", "type": "STRING" },
{ "name": "Line_Item_Type", "type": "STRING" },
{ "name": "Line_Item_Type_Edit", "type": "STRING" },
{ "name": "Flight_Data_Type", "type": "STRING" },
{ "name": "Flight_Data_Type_Edit", "type": "STRING" },
{ "name": "Flight_Start_Date", "type": "STRING" },
{ "name": "Flight_Start_Date_Edit", "type": "STRING" },
{ "name": "Flight_End_Date", "type": "STRING" },
{ "name": "Flight_End_Date_Edit", "type": "STRING" },
{ "name": "Flight_Trigger", "type": "STRING" },
{ "name": "Flight_Trigger_Edit", "type": "STRING" },
{ "name": "Budget_Allocation_Type", "type": "STRING" },
{ "name": "Budget_Allocation_Type_Edit", "type": "STRING" },
{ "name": "Budget_Unit", "type": "STRING" },
{ "name": "Budget_Unit_Edit", "type": "STRING" },
{ "name": "Budget_Max", "type": "FLOAT" },
{ "name": "Budget_Max_Edit", "type": "FLOAT" },
{ "name": "Partner_Revenue_Model_Type", "type": "STRING" },
{ "name": "Partner_Revenue_Model_Type_Edit", "type": "STRING" },
{ "name": "Partner_Revenue_Model_Markup_Percent", "type": "FLOAT" },
{ "name": "Partner_Revenue_Model_Markup_Percent_Edit", "type": "FLOAT" },
{ "name": "Conversion_Percent", "type": "FLOAT" },
{ "name": "Conversion_Percent_Edit", "type": "FLOAT" },
{ "name": "Targeting_Expansion_Level", "type": "STRING" },
{ "name": "Targeting_Expansion_Level_Edit", "type": "STRING" },
{ "name": "Exclude_1P", "type": "STRING" },
{ "name": "Exclude_1P_Edit", "type": "STRING" },
],
"format": "CSV"
}},
get_rows(
config,
task["auth_sheets"],
{ "sheets": {
"sheet": task["sheet"],
"tab": "Line Items",
"header":False,
"range": "A2:AI"
}}
)
)
# Create Insert View
query_to_view(
config,
task["auth_bigquery"],
config.project,
task["dataset"],
"INSERT_LineItems",
"""SELECT
REGEXP_EXTRACT(S_LI.Advertiser, r' - (\d+)$') AS advertiserId,
REGEXP_EXTRACT(S_LI.Campaign, r' - (\d+)$') AS campaignId,
REGEXP_EXTRACT(S_LI.Insertion_Order, r' - (\d+)$') AS insertionOrderId,
S_LI.Line_Item AS displayName,
S_LI.Line_Item_Type_Edit AS lineItemType,
S_LI.Status_Edit AS entityStatus,
STRUCT(
S_PC.Cost_Type_Edit As costType,
S_PC.Fee_Type_Edit As feeType,
S_PC.Invoice_Type_Edit AS invoiceType,
S_PC.Fee_Amount_Edit AS feeAmount,
S_PC.Fee_Percent_Edit * 1000 AS feePercentageMillis
) AS partnerCosts,
STRUCT(
S_LI.Flight_Data_Type_Edit AS flightDateType,
STRUCT (
STRUCT (
EXTRACT(YEAR FROM CAST(S_LI.Flight_Start_Date_Edit AS Date)) AS year,
EXTRACT(MONTH FROM CAST(S_LI.Flight_Start_Date_Edit AS DATE)) AS month,
EXTRACT(DAY FROM CAST(S_LI.Flight_Start_Date_Edit AS DATE)) AS day
) AS startDate,
STRUCT (
EXTRACT(YEAR FROM CAST(S_LI.Flight_End_Date_Edit AS Date)) AS year,
EXTRACT(MONTH FROM CAST(S_LI.Flight_End_Date_Edit AS DATE)) AS month,
EXTRACT(DAY FROM CAST(S_LI.Flight_End_Date_Edit AS DATE)) AS day
) AS endDate
) AS dateRange,
S_LI.Flight_Trigger_Edit AS triggerId
) AS flight,
STRUCT(
S_LI.Budget_Allocation_Type_Edit AS budgetAllocationType,
S_LI.Budget_Unit_Edit AS budgetUnit,
S_LI.Budget_Max_Edit * 1000000 AS maxAmount
) AS budget,
STRUCT(
S_P.Period_Edit As pacingPeriod,
S_P.Type_Edit As pacingType,
S_P.Daily_Budget_Edit AS dailyMaxMicros,
S_P.Daily_Impressions_Edit AS dailyMaxImpressions
) AS pacing,
STRUCT(
S_FC.Unlimited_Edit AS unlimited,
S_FC.Time_Unit_Edit AS timeUnit,
S_FC.Time_Count_Edit AS timeUnitCount,
S_FC.Max_impressions_Edit AS maxImpressions
) AS frequencyCap,
STRUCT(
S_LI.Partner_Revenue_Model_Type_Edit AS markupType,
S_LI.Partner_Revenue_Model_Markup_Percent_Edit * IF(S_LI.Partner_Revenue_Model_Type_Edit='PARTNER_REVENUE_MODEL_MARKUP_TYPE_CPM', 1000000, 1000) AS markupAmount
) AS partnerRevenueModel,
STRUCT(
S_LI. Conversion_Percent_Edit * 1000 AS postViewCountPercentageMillis,
[] AS floodlightActivityConfigs
) AS conversionCounting,
STRUCT(
IF(S_BS.Fixed_Bid_Edit IS NOT NULL,
STRUCT(
S_BS.Fixed_Bid_Edit * 1000000 AS bidAmountMicros
),
NULL
) AS fixedBid,
IF(S_BS.Auto_Bid_Goal_Edit IS NOT NULL,
STRUCT(
S_BS.Auto_Bid_Goal_Edit AS performanceGoalType,
S_BS.Auto_Bid_Amount_Edit * 1000000 AS maxAverageCpmBidAmountMicros,
S_BS.Auto_Bid_Algorithm_Edit AS customBiddingAlgorithmId
),
NULL
) AS maximizeSpendAutoBid,
IF(S_BS.Performance_Goal_Type_Edit IS NOT NULL,
STRUCT(
S_BS.Performance_Goal_Type_Edit AS performanceGoalType,
S_BS.Performance_Goal_Amount_Edit * 1000000 AS performanceGoalAmountMicros,
S_BS.Performance_Goal_Average_CPM_Bid_Edit * 1000000 AS maxAverageCpmBidAmountMicros,
S_BS.Performance_Goal_Algorithm_Edit AS customBiddingAlgorithmId
),
NULL
) AS performanceGoalAutoBid
)
AS bidStrategy,
STRUCT(
S_ID.Integration_Code_Edit AS integrationCode,
S_ID.Details_Edit AS details
) AS integrationDetails,
STRUCT(
S_LI.Targeting_Expansion_Level_Edit AS targetingExpansionLevel,
S_LI.Exclude_1P_Edit AS excludeFirstPartyAudience
) AS targetingExpansion
FROM `{dataset}.SHEET_LineItems` AS S_LI
LEFT JOIN `{dataset}.SHEET_PartnerCosts` AS S_PC ON S_LI.Line_Item=S_PC.Line_Item
LEFT JOIN `{dataset}.SHEET_Pacing` AS S_P ON S_LI.Line_Item=S_P.Line_Item
LEFT JOIN `{dataset}.SHEET_FrequencyCaps` AS S_FC ON S_LI.Line_Item=S_FC.Line_Item
LEFT JOIN `{dataset}.SHEET_IntegrationDetails` AS S_ID ON S_LI.Line_Item=S_ID.Line_Item
LEFT JOIN `{dataset}.SHEET_BidStrategy` AS S_BS ON S_LI.Line_Item=S_BS.Line_Item
LEFT JOIN `{dataset}.DV_LineItems` AS DV_LI ON S_LI.Line_Item=DV_LI.displayName
WHERE S_LI.Action="INSERT"
AND DV_LI IS NULL
""".format(**task),
legacy=False
)
# Create Audit View
query_to_view(
config,
task["auth_bigquery"],
config.project,
task["dataset"],
"AUDIT_LineItems",
"""WITH
/* Check if sheet values are set */
INPUT_ERRORS AS (
SELECT
*
FROM (
SELECT
'Line Item' AS Operation,
CASE
WHEN Budget_Allocation_Type_Edit IS NULL THEN 'Missing Budget Allocation Type.'
WHEN Budget_Unit_Edit IS NULL THEN 'Missing Budget Unit.'
ELSE
NULL
END AS Error,
'ERROR' AS Severity,
COALESCE(Line_Item, 'BLANK') AS Id
FROM
`{dataset}.SHEET_LineItems`
)
WHERE
Error IS NOT NULL
),
/* Check duplicate inserts */
DUPLICATE_ERRORS AS (
SELECT
'Line Item' AS Operation,
'Duplicate Line Item name, insert will be ignored.' AS Error,
'WARNING' AS Severity,
COALESCE(S_LI.Line_Item, 'BLANK') AS Id
FROM `{dataset}.SHEET_LineItems` As S_LI
LEFT JOIN `{dataset}.DV_LineItems` AS DV_LI ON S_LI.Line_Item=DV_LI.displayName
WHERE S_LI.Action="INSERT"
AND DV_LI IS NOT NULL
)
SELECT * FROM INPUT_ERRORS
UNION ALL
SELECT * FROM DUPLICATE_ERRORS
""".format(**task),
legacy=False
)
query_to_view(
config,
task["auth_bigquery"],
config.project,
task["dataset"],
"PATCH_LineItems",
"""SELECT *
FROM `{dataset}.SHEET_LineItems`
WHERE Line_Item NOT IN (SELECT Id FROM `{dataset}.AUDIT_LineItems` WHERE Severity='ERROR')
""".format(**task),
legacy=False
)
def dv_line_item_patch(commit=False):
def date_edited(value):
y, m, d = value.split("-")
return {"year": y, "month": m, "day": d}
patches = []
rows = get_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table":"PATCH_LineItems",
}},
as_object=True
)
for row in rows:
if row['Action'] == "DELETE":
patches.append({
"operation": "Line Items",
"action": "DELETE",
"partner": row['Partner'],
"advertiser": row['Advertiser'],
"campaign": row['Campaign'],
"line_item": row['Line_Item'],
"parameters": {
"advertiserId": lookup_id(row['Advertiser']),
"lineItemId": lookup_id(row['Line_Item'])
}
})
elif row['Action'] == "PATCH":
line_item = {}
if row['Line_Item_Type'] != row['Line_Item_Type_Edit']:
line_item["lineItemType"] = row['Line_Item_Type_Edit']
if row['Status'] != row['Status_Edit']:
line_item['entityStatus'] = row['Status_Edit']
if row['Flight_Data_Type'] != row['Flight_Data_Type_Edit']:
line_item.setdefault("flight", {})
line_item["flight"]["flightDateType"] = row['Flight_Data_Type_Edit']
if row['Flight_Start_Date'] != row['Flight_Start_Date_Edit']:
line_item.setdefault("flight", {}).setdefault("dateRange", {})
line_item["flight"]["dateRange"]["startDate"] = date_edited(row['Flight_Start_Date_Edit'])
if row['Flight_End_Date'] != row['Flight_End_Date_Edit']:
line_item.setdefault("flight", {}).setdefault("dateRange", {})
line_item["flight"]["dateRange"]["endDate"] = date_edited(row['Flight_End_Date_Edit'])
if row['Flight_Trigger'] != row['Flight_Trigger_Edit']:
line_item.setdefault("flight", {})
line_item["flight"]["triggerId"] = row['Flight_Trigger_Edit']
if row['Budget_Allocation_Type'] != row['Budget_Allocation_Type_Edit']:
line_item.setdefault("budget", {})
line_item["budget"]["budgetAllocationType"] = row['Budget_Allocation_Type_Edit']
if row['Budget_Unit'] != row['Budget_Unit_Edit']:
line_item.setdefault("budget", {})
line_item["budget"]["budgetUnit"] = row['Budget_Unit_Edit']
if row['Budget_Max'] != row['Budget_Max_Edit']:
line_item.setdefault("budget", {})
line_item["budget"]["maxAmount"] = int(
float(row['Budget_Max_Edit']) * 100000
)
if row['Partner_Revenue_Model_Type'] != row['Partner_Revenue_Model_Type_Edit']:
line_item.setdefault("partnerRevenueModel", {})
line_item["partnerRevenueModel"]["markupType"] = row['Partner_Revenue_Model_Type_Edit']
if row['Partner_Revenue_Model_Markup_Percent'] != row['Partner_Revenue_Model_Markup_Percent_Edit']:
line_item.setdefault("partnerRevenueModel", {})
line_item["partnerRevenueModel"]["markupAmount"] = int(
float(row['Partner_Revenue_Model_Markup_Percent_Edit']) * (
100000 if row['Partner_Revenue_Model_Type_Edit'] == 'PARTNER_REVENUE_MODEL_MARKUP_TYPE_CPM' else 1000
)
)
if row['Conversion_Percent'] != row['Conversion_Percent_Edit']:
line_item.setdefault("conversionCounting", {})
line_item["conversionCounting"]["postViewCountPercentageMillis"] = int(
float(row['Conversion_Percent_Edit']) * 1000
)
if row['Targeting_Expansion_Level'] != row['Targeting_Expansion_Level_Edit']:
line_item.setdefault("targetingExpansion", {})
line_item["targetingExpansion"]["targetingExpansionLevel"] = row['Targeting_Expansion_Level_Edit']
if row['Exclude_1P'] != row['Exclude_1P_Edit']:
line_item.setdefault("targetingExpansion", {})
line_item["targetingExpansion"]["excludeFirstPartyAudience"] = row['Exclude_1P_Edit']
if line_item:
patches.append({
"operation": "Line Items",
"action": "PATCH",
"partner": row['Partner'],
"advertiser": row['Advertiser'],
"campaign": row['Campaign'],
"line_item": row['Line_Item'],
"parameters": {
"advertiserId": lookup_id(row['Advertiser']),
"lineItemId": lookup_id(row['Line_Item']),
"body": line_item
}
})
patch_masks(patches)
patch_preview(config, patches)
if commit:
line_item_commit(config, patches)
def dv_line_item_insert(commit=False):
inserts = []
rows = get_rows(
config,
task["auth_bigquery"],
{ "bigquery": {
"dataset": task["dataset"],
"table":"INSERT_LineItems",
}},
as_object=True
)
for row in rows:
inserts.append({
"operation": "Line Items",
"action": "INSERT",
"partner": None,
"advertiser": row['advertiserId'],
"campaign": row['campaignId'],
"line_item": row['displayName'],
"parameters": {
"advertiserId": row['advertiserId'],
"body":row
}
})
patch_preview(config, inserts)
if commit:
line_item_commit(config, inserts)
def dv_line_item_commit(config, patches):
for patch in patches:
if not patch.get("line_item"):
continue
print("API LINE ITEM:", patch["action"], patch["line_item"])
try:
if patch["action"] == "DELETE":
response = API_DV360(
config,
task["auth_dv"]
).advertisers().lineItems().delete(
**patch["parameters"]
).execute()
patch["success"] = response
elif patch["action"] == "PATCH":
response = API_DV360(
config,
task["auth_dv"]
).advertisers().lineItems().patch(
**patch["parameters"]
).execute()
patch["success"] = response["lineItemId"]
elif patch["action"] == "INSERT":
response = API_DV360(
config,
task["auth_dv"]
).advertisers().lineItems().create(
**patch["parameters"]
).execute()
patch["success"] = response["lineItemId"]
elif patch["action"] == "TARGETING":
response = API_DV360(
config,
task["auth_dv"]
).advertisers().lineItems().bulkEditAdvertiserAssignedTargetingOptions(
**patch["parameters"]
).execute()
patch["success"] = len(response["createdAssignedTargetingOptions"])
except Exception as e:
patch["error"] = str(e)
finally:
patch_log(config, patch)
patch_log(config)
|
|
"""
Test api_objects.py
"""
import mock
import unittest
from fmcapi import api_objects
class TestApiObjects(unittest.TestCase):
def test_ip_host_required_for_put(self):
self.assertEqual(api_objects.Hosts.REQUIRED_FOR_PUT, ["id", "name", "value"])
@mock.patch("fmcapi.api_objects.APIClassTemplate.parse_kwargs")
@mock.patch("fmcapi.api_objects.APIClassTemplate.valid_for_delete")
def test_api_class_template_delete_on_bad_response(self, mock_valid, *_):
"""
If send_to_api returns a None (because the API call failed) then do not process the response and just return the None
"""
mock_valid.return_value = True
mock_fmc = mock.Mock()
mock_fmc.send_to_api.return_value = None
api = api_objects.APIClassTemplate(fmc=mock_fmc)
api.id = "id"
self.assertIsNone(api.delete())
@mock.patch("fmcapi.api_objects.ACPRule.parse_kwargs")
@mock.patch("fmcapi.fmc.FMC")
def test_URL_SUFFIX_1(self, mock_fmc, *_):
"""
Test URL_SUFFIX property
- No URL params
"""
a = api_objects.AccessRules(fmc=mock_fmc, acp_name="something")
self.assertEqual("", a.URL_SUFFIX)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.ACPRule.acp")
@mock.patch("fmcapi.fmc.FMC")
def test_URL_SUFFIX_2(self, mock_fmc, *_):
"""
Test URL_SUFFIX property
- Category param
"""
a = api_objects.AccessRules(
fmc=mock_fmc, acp_name="something", category="something"
)
self.assertTrue(a.URL.endswith("?category=something"))
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.ACPRule.acp")
@mock.patch("fmcapi.fmc.FMC")
def test_URL_SUFFIX_3(self, mock_fmc, *_):
"""
Test URL_SUFFIX property
- insertBefore param
"""
a = api_objects.AccessRules(
fmc=mock_fmc, acp_name="something", insertBefore="something"
)
self.assertTrue(a.URL.endswith("?insertBefore=something"))
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.ACPRule.acp")
@mock.patch("fmcapi.fmc.FMC")
def test_URL_SUFFIX_4(self, mock_fmc, *_):
"""
Test URL_SUFFIX property
- insertAfter param
"""
a = api_objects.AccessRules(
fmc=mock_fmc, acp_name="something", insertAfter="something"
)
self.assertTrue(a.URL.endswith("?insertAfter=something"))
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.ACPRule.acp")
@mock.patch("fmcapi.fmc.FMC")
def test_URL_SUFFIX_5(self, mock_fmc, *_):
"""
Test URL_SUFFIX property
- category param
- insertBefore param
"""
a = api_objects.AccessRules(
fmc=mock_fmc,
acp_name="something",
category="something",
insertBefore="something",
)
self.assertTrue(a.URL.endswith("?category=something&insertBefore=something"))
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.ACPRule.acp")
@mock.patch("fmcapi.fmc.FMC")
def test_URL_SUFFIX_6(self, mock_fmc, *_):
"""
Test URL_SUFFIX property
- Category param
- insertAfter param
"""
a = api_objects.AccessRules(
fmc=mock_fmc,
acp_name="something",
category="something",
insertAfter="something",
)
self.assertTrue(a.URL.endswith("?category=something&insertAfter=something"))
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.ACPRule.acp")
@mock.patch("logging.warning")
@mock.patch("fmcapi.fmc.FMC")
def test_URL_SUFFIX_7(self, mock_fmc, mock_log, *_):
"""
Test URL_SUFFIX property
- Category param
- insertBefore param
- insertAfter param
"""
a = api_objects.AccessRules(
fmc=mock_fmc,
acp_name="something",
category="something",
insertBefore="something",
insertAfter="something",
)
self.assertTrue(
a.URL.endswith(
"?category=something&insertBefore=something&insertAfter=something"
)
)
mock_log.assert_called_once()
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_source_network_add_for_objects_and_no_objects_initially(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", name="someExistingObjectName2")
self.assertEqual(len(rule_obj.sourceNetworks["objects"]), 1)
self.assertEqual(
rule_obj.sourceNetworks,
{
"objects": [
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
}
]
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_source_network_add_for_objects_and_no_objects_initially(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", name="someExistingObjectName2")
self.assertEqual(len(rule_obj.sourceNetworks["objects"]), 1)
self.assertEqual(
rule_obj.sourceNetworks["objects"],
[
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
}
],
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_source_network_add_for_objects_and_one_objects_present_initially(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {
"objects": [
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
}
]
}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", name="someExistingObjectName2")
self.assertEqual(len(rule_obj.sourceNetworks["objects"]), 2)
self.assertEqual(
rule_obj.sourceNetworks["objects"][0],
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
)
self.assertEqual(
rule_obj.sourceNetworks["objects"][1],
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_source_network_add_for_objects_and_multiple_objects_present_initially(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {
"objects": [
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
]
}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", name="someExistingObjectName2")
self.assertEqual(len(rule_obj.sourceNetworks["objects"]), 3)
self.assertEqual(
rule_obj.sourceNetworks["objects"][0],
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
)
self.assertEqual(
rule_obj.sourceNetworks["objects"][1],
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
)
self.assertEqual(
rule_obj.sourceNetworks["objects"][2],
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_source_network_add_for_objects_and_duplicate_objects_present(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {
"objects": [
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
]
}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", name="someExistingObjectName3")
self.assertEqual(len(rule_obj.sourceNetworks["objects"]), 2)
self.assertEqual(
rule_obj.sourceNetworks["objects"][0],
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
)
self.assertEqual(
rule_obj.sourceNetworks["objects"][1],
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_add_for_literals_and_no_literal_present_initially(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", literal="10.0.0.1")
self.assertEqual(len(rule_obj.sourceNetworks["literals"]), 1)
self.assertEqual(rule_obj.sourceNetworks["literals"]["10.0.0.1"], "host")
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_add_for_literals_and_one_literal_present_initially(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {"literals": {"10.0.0.1": "host"}}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", literal="10.0.0.2")
self.assertEqual(len(rule_obj.sourceNetworks["literals"]), 2)
self.assertEqual(rule_obj.sourceNetworks["literals"]["10.0.0.1"], "host")
self.assertEqual(rule_obj.sourceNetworks["literals"]["10.0.0.2"], "host")
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_add_for_literals_and_multiple_literal_present_initially(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {
"literals": {"10.0.0.1": "host", "10.0.0.2": "host", "10.0.0.3": "host"}
}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", literal="10.0.0.4")
self.assertEqual(len(rule_obj.sourceNetworks["literals"]), 4)
self.assertEqual(rule_obj.sourceNetworks["literals"]["10.0.0.1"], "host")
self.assertEqual(rule_obj.sourceNetworks["literals"]["10.0.0.2"], "host")
self.assertEqual(rule_obj.sourceNetworks["literals"]["10.0.0.3"], "host")
self.assertEqual(rule_obj.sourceNetworks["literals"]["10.0.0.4"], "host")
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_add_for_literals_and_duplicate_literal_present(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {"literals": {"10.0.0.1": "host"}}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", literal="10.0.0.1")
self.assertEqual(len(rule_obj.sourceNetworks["literals"]), 1)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_add_for_literals_and_objects_present_initially(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {
"objects": [
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
],
"literals": {},
}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", literal="10.0.0.1")
self.assertEqual(len(rule_obj.sourceNetworks["literals"]), 1)
self.assertEqual(rule_obj.sourceNetworks["literals"]["10.0.0.1"], "host")
self.assertEqual(
rule_obj.sourceNetworks["objects"][0],
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
)
self.assertEqual(
rule_obj.sourceNetworks["objects"][1],
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_source_network_add_for_objects_and_literals_present_initially(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {"literals": {"10.0.0.1": "host"}}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.source_network(action="add", name="someExistingObjectName3")
self.assertEqual(len(rule_obj.sourceNetworks["objects"]), 1)
self.assertEqual(
rule_obj.sourceNetworks["objects"][0],
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
)
self.assertEqual(len(rule_obj.sourceNetworks["literals"]), 1)
self.assertEqual(rule_obj.sourceNetworks["literals"]["10.0.0.1"], "host")
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_with_both_name_and_literals_given(self, _):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
with self.assertRaises(ValueError):
rule_obj.source_network(
action="add", name="someObjectName", literal="10.0.0.1"
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_remove_for_literals_with_multiple_literals_present(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {
"literals": {"10.0.0.1": "host", "10.0.0.2": "host", "10.0.0.3": "host"}
}
rule_obj.source_network(action="remove", literal="10.0.0.1")
self.assertEqual(len(rule_obj.sourceNetworks["literals"]), 2)
self.assertIsNotNone(rule_obj.sourceNetworks["literals"]["10.0.0.2"])
self.assertIsNotNone(rule_obj.sourceNetworks["literals"]["10.0.0.3"])
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_remove_for_literals_with_only_one_literal_present(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {"literals": {"10.0.0.1": "host"}}
rule_obj.source_network(action="remove", literal="10.0.0.1")
self.assertEqual(len(rule_obj.sourceNetworks["literals"]), 0)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_remove_for_objects_with_only_one_object_present(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {
"objects": [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
}
]
}
rule_obj.source_network(action="remove", name="someExistingObjectName1")
self.assertNotIn("sourceNetworks", self.__dict__)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_remove_for_objects_with_multiple_objects_present(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {
"objects": [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
}
rule_obj.source_network(action="remove", name="someExistingObjectName3")
self.assertEqual(len(rule_obj.sourceNetworks["objects"]), 2)
self.assertEqual(
rule_obj.sourceNetworks["objects"][0],
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
)
self.assertEqual(
rule_obj.sourceNetworks["objects"][1],
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_clear_for_objects(self, _):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {
"objects": [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
}
rule_obj.source_network(action="clear")
self.assertNotIn("sourceNetworks", self.__dict__)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_source_network_clear_for_literals(self, _):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.sourceNetworks = {"literals": {"10.0.0.1": "host"}}
rule_obj.source_network(action="clear")
self.assertNotIn("sourceNetworks", self.__dict__)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_destination_network_add_for_objects_and_no_objects_initially(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", name="someExistingObjectName2")
self.assertEqual(len(rule_obj.destinationNetworks["objects"]), 1)
self.assertEqual(
rule_obj.destinationNetworks,
{
"objects": [
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
}
]
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_destination_network_add_for_objects_and_no_objects_initially(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", name="someExistingObjectName2")
self.assertEqual(len(rule_obj.destinationNetworks["objects"]), 1)
self.assertEqual(
rule_obj.destinationNetworks["objects"],
[
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
}
],
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_destination_network_add_for_objects_and_one_objects_present_initially(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {
"objects": [
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
}
]
}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", name="someExistingObjectName2")
self.assertEqual(len(rule_obj.destinationNetworks["objects"]), 2)
self.assertEqual(
rule_obj.destinationNetworks["objects"][0],
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
)
self.assertEqual(
rule_obj.destinationNetworks["objects"][1],
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_destination_network_add_for_objects_and_multiple_objects_present_initially(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {
"objects": [
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
]
}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", name="someExistingObjectName2")
self.assertEqual(len(rule_obj.destinationNetworks["objects"]), 3)
self.assertEqual(
rule_obj.destinationNetworks["objects"][0],
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
)
self.assertEqual(
rule_obj.destinationNetworks["objects"][1],
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
)
self.assertEqual(
rule_obj.destinationNetworks["objects"][2],
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_destination_network_add_for_objects_and_duplicate_objects_present(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {
"objects": [
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
]
}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", name="someExistingObjectName3")
self.assertEqual(len(rule_obj.destinationNetworks["objects"]), 2)
self.assertEqual(
rule_obj.destinationNetworks["objects"][0],
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
)
self.assertEqual(
rule_obj.destinationNetworks["objects"][1],
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_add_for_literals_and_no_literal_present_initially(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", literal="10.0.0.1")
self.assertEqual(len(rule_obj.destinationNetworks["literals"]), 1)
self.assertEqual(rule_obj.destinationNetworks["literals"]["10.0.0.1"], "host")
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_add_for_literals_and_one_literal_present_initially(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {"literals": {"10.0.0.1": "host"}}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", literal="10.0.0.2")
self.assertEqual(len(rule_obj.destinationNetworks["literals"]), 2)
self.assertEqual(rule_obj.destinationNetworks["literals"]["10.0.0.1"], "host")
self.assertEqual(rule_obj.destinationNetworks["literals"]["10.0.0.2"], "host")
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_add_for_literals_and_multiple_literal_present_initially(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {
"literals": {"10.0.0.1": "host", "10.0.0.2": "host", "10.0.0.3": "host"}
}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", literal="10.0.0.4")
self.assertEqual(len(rule_obj.destinationNetworks["literals"]), 4)
self.assertEqual(rule_obj.destinationNetworks["literals"]["10.0.0.1"], "host")
self.assertEqual(rule_obj.destinationNetworks["literals"]["10.0.0.2"], "host")
self.assertEqual(rule_obj.destinationNetworks["literals"]["10.0.0.3"], "host")
self.assertEqual(rule_obj.destinationNetworks["literals"]["10.0.0.4"], "host")
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_add_for_literals_and_duplicate_literal_present(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {"literals": {"10.0.0.1": "host"}}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", literal="10.0.0.1")
self.assertEqual(len(rule_obj.destinationNetworks["literals"]), 1)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_add_for_literals_and_objects_present_initially(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {
"objects": [
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
],
"literals": {},
}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", literal="10.0.0.1")
self.assertEqual(len(rule_obj.destinationNetworks["literals"]), 1)
self.assertEqual(rule_obj.destinationNetworks["literals"]["10.0.0.1"], "host")
self.assertEqual(
rule_obj.destinationNetworks["objects"][0],
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
)
self.assertEqual(
rule_obj.destinationNetworks["objects"][1],
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
@mock.patch("fmcapi.api_objects.NetworkGroup")
@mock.patch("fmcapi.api_objects.FQDNS")
@mock.patch("fmcapi.api_objects.IPAddresses")
def test_ACPRule_destination_network_add_for_objects_and_literals_present_initially(
self, mock_ipaddress, mock_fqdns, mock_nwgroup, _
):
value2 = mock.Mock()
value = mock.Mock()
value.get.return_value = value2
dummyvalue3 = mock.Mock()
dummyvalue4 = mock.Mock()
dummyvalue4.get.return_value = []
dummyvalue3.get.return_value = dummyvalue4
value2.get.return_value = [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
mock_ipaddress.return_value = value
mock_nwgroup.return_value = dummyvalue3
mock_fqdns.return_value = dummyvalue3
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {"literals": {"10.0.0.1": "host"}}
rule_obj.URL = "/accesspolicies/<accesspolicyid>/accessrules/<accessruleid>"
rule_obj.destination_network(action="add", name="someExistingObjectName3")
self.assertEqual(len(rule_obj.destinationNetworks["objects"]), 1)
self.assertEqual(
rule_obj.destinationNetworks["objects"][0],
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
)
self.assertEqual(len(rule_obj.destinationNetworks["literals"]), 1)
self.assertEqual(rule_obj.destinationNetworks["literals"]["10.0.0.1"], "host")
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_with_both_name_and_literals_given(self, _):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
with self.assertRaises(ValueError):
rule_obj.destination_network(
action="add", name="someObjectName", literal="10.0.0.1"
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_remove_for_literals_with_multiple_literals_present(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {
"literals": {"10.0.0.1": "host", "10.0.0.2": "host", "10.0.0.3": "host"}
}
rule_obj.destination_network(action="remove", literal="10.0.0.1")
self.assertEqual(len(rule_obj.destinationNetworks["literals"]), 2)
self.assertIsNotNone(rule_obj.destinationNetworks["literals"]["10.0.0.2"])
self.assertIsNotNone(rule_obj.destinationNetworks["literals"]["10.0.0.3"])
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_remove_for_literals_with_only_one_literal_present(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {"literals": {"10.0.0.1": "host"}}
rule_obj.destination_network(action="remove", literal="10.0.0.1")
self.assertEqual(len(rule_obj.destinationNetworks["literals"]), 0)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_remove_for_objects_with_only_one_object_present(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {
"objects": [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
}
]
}
rule_obj.destination_network(action="remove", name="someExistingObjectName1")
self.assertNotIn("destinationNetworks", self.__dict__)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_remove_for_objects_with_multiple_objects_present(
self, _
):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {
"objects": [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
}
rule_obj.destination_network(action="remove", name="someExistingObjectName3")
self.assertEqual(len(rule_obj.destinationNetworks["objects"]), 2)
self.assertEqual(
rule_obj.destinationNetworks["objects"][0],
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
)
self.assertEqual(
rule_obj.destinationNetworks["objects"][1],
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_clear_for_objects(self, _):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {
"objects": [
{
"name": "someExistingObjectName1",
"id": "someExistingObjectId1",
"type": "someExistingObjectType1",
},
{
"name": "someExistingObjectName2",
"id": "someExistingObjectId2",
"type": "someExistingObjectType2",
},
{
"name": "someExistingObjectName3",
"id": "someExistingObjectId3",
"type": "someExistingObjectType3",
},
]
}
rule_obj.destination_network(action="clear")
self.assertNotIn("destinationNetworks", self.__dict__)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_destination_network_clear_for_literals(self, _):
rule_obj = api_objects.AccessRules(fmc=mock.Mock())
rule_obj.destinationNetworks = {"literals": {"10.0.0.1": "host"}}
rule_obj.destination_network(action="clear")
self.assertNotIn("destinationNetworks", self.__dict__)
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_parse_kwargs_with_source_networks(self, _):
rule_obj = api_objects.AccessRules(
fmc=mock.Mock(),
sourceNetworks={
"objects": [{"name": "someExistingObjectName1"}],
"literals": [{"type": "host", "value": "10.0.0.1"}],
},
)
self.assertEqual(
[{"name": "someExistingObjectName1"}], rule_obj.sourceNetworks["objects"]
)
self.assertEqual({"10.0.0.1": "host"}, rule_obj.sourceNetworks["literals"])
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_parse_kwargs_with_destination_networks(self, _):
rule_obj = api_objects.AccessRules(
fmc=mock.Mock(),
destinationNetworks={
"objects": [{"name": "someExistingObjectName1"}],
"literals": [{"type": "host", "value": "10.0.0.1"}],
},
)
self.assertEqual(
[{"name": "someExistingObjectName1"}],
rule_obj.destinationNetworks["objects"],
)
self.assertEqual({"10.0.0.1": "host"}, rule_obj.destinationNetworks["literals"])
@mock.patch("fmcapi.api_objects.ACPRule.variable_set")
def test_ACPRule_format_data_with_source_networks(self, _):
rule_obj = api_objects.AccessRules(
fmc=mock.Mock(),
sourceNetworks={
"objects": [{"name": "someExistingObjectName1"}],
"literals": [{"type": "host", "value": "10.0.0.1"}],
},
)
data = rule_obj.format_data()
self.assertEqual(
[{"name": "someExistingObjectName1"}], data["sourceNetworks"]["objects"]
)
self.assertEqual(
[{"type": "host", "value": "10.0.0.1"}], data["sourceNetworks"]["literals"]
)
|
|
# MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import math
import tensorflow_models as tf_models
import tensorflow_models.relaxed_onehot_categorical_fixed_noise as dist_fixed
import tensorflow_models.gumbel as gumbel
def create_placeholders(settings):
K = settings['count_categories']
latent_batchshape = (settings['batch_size'], settings['latent_dimension'], K)
x = tf.placeholder(tf.float32, shape=tf_models.batchshape(settings), name='samples')
z = tf.placeholder(tf.float32, shape=latent_batchshape, name='codes')
return x, z
#def create_prior(settings):
# dist_prior = tf.contrib.distributions.Bernoulli(probs=0.5, dtype=tf.float32)
# return tf.identity(dist_prior.sample(sample_shape=tf_models.latentshape(settings)) * 2. - 1., name='p_z/sample')
def create_prior(settings):
temperature_prior = 0.5
K = settings['count_categories']
latent_batchshape = (settings['batch_size'], settings['latent_dimension'], K)
dist_prior = tf.contrib.distributions.ExpRelaxedOneHotCategorical(temperature=temperature_prior, logits=tf.constant(0., shape=latent_batchshape))
logits_sample = tf.cast(dist_prior.sample(), dtype=tf.float32)
z_sample = tf.exp(logits_sample) *2. - 1.
return tf.identity(z_sample, name='p_z/sample')
def create_encoder(settings, reuse=True):
temperature = 2./3.
encoder_network = settings['architecture']['encoder']['fn']
x_placeholder = tf_models.samples_placeholder()
assert(not x_placeholder is None)
with tf.variable_scope('encoder', reuse=reuse):
logits_z = encoder_network(settings, x_placeholder, is_training=False)
dist_z_given_x = tf.contrib.distributions.ExpRelaxedOneHotCategorical(temperature=temperature, logits=logits_z)
logits_sample = tf.cast(dist_z_given_x.sample(), dtype=tf.float32)
z_sample = tf.exp(logits_sample) * 2. - 1.
encoder = tf.identity(z_sample, name='q_z_given_x/sample')
return encoder
def create_decoder(settings, reuse=True):
decoder_network = settings['architecture']['decoder']['fn']
z_placeholder = tf_models.codes_placeholder()
assert(not z_placeholder is None)
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, tf.reshape(z_placeholder, (settings['batch_size'], -1)), is_training=False)
#dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=logits_x, dtype=tf.float32)
#decoder = tf.identity(dist_x_given_z.sample(), name='p_x_given_z/sample')
#return decoder
return tf.identity(tf.nn.sigmoid(logits_x), name='p_x_given_z/sample')
def create_probs(settings, inputs, is_training, reuse=False):
temperature = 2./3.
encoder_network = settings['architecture']['encoder']['fn']
decoder_network = settings['architecture']['decoder']['fn']
K = settings['count_categories']
latent_batchshape = (settings['batch_size'], settings['latent_dimension'], K)
#dist_prior = tf_models.standard_normal(tf_models.latentshape(settings))
#dist_prior = tf.contrib.distributions.Bernoulli(probs=0.5, dtype=tf.float32)
temperature_prior = 0.5
prior_prob = settings['prior_prob']
logits_prior_prob = math.log(prior_prob / (1. - prior_prob))
dist_prior = tf.contrib.distributions.ExpRelaxedOneHotCategorical(temperature=temperature_prior, logits=tf.constant(0., shape=latent_batchshape))
# Use recognition network to determine mean and (log) variance of Gaussian distribution in latent space
with tf.variable_scope('encoder', reuse=reuse):
logits_z = tf.reshape(encoder_network(settings, inputs, is_training=is_training), latent_batchshape)
dist_z_given_x = tf.contrib.distributions.ExpRelaxedOneHotCategorical(temperature=temperature, logits=logits_z)
# Draw one sample z from Gumbel-softmax
logits_sample = tf.cast(dist_z_given_x.sample(), dtype=tf.float32)
# DEBUG
#print('logits_sample.shape', logits_sample.shape)
#raise Exception()
# NOTE: Is this what is meant by "this running average was subtracted from the activity of the layer before it was updated"?
#z_sample = tf.sigmoid(logits_sample) * 2. - 1. #- z_sample_avg
z_sample = tf.exp(logits_sample) * 2. - 1.
#z_sample = logits_sample
# Use generator to determine mean of Bernoulli distribution of reconstructed input
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, tf.reshape(z_sample, (settings['batch_size'], -1)), is_training=is_training)
dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=tf_models.flatten(logits_x), dtype=tf.float32)
# NOTE: x | z is defined as over each pixel separate, where prior on z is a multivariate
# Hence the need to do the tf.reduce_sum op on the former to get down to a single number for each sample
lg_p_x_given_z = tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(inputs)), 1, name='p_x_given_z/log_prob')
lg_p_z = tf.identity(tf.reduce_sum(dist_prior.log_prob(logits_sample), 1), name='p_z/log_prob')
lg_q_z_given_x = tf.identity(tf.reduce_sum(dist_z_given_x.log_prob(logits_sample), 1), name='q_z_given_x/log_prob')
#print(dist_prior.log_prob(logits_sample).shape)
#print(dist_z_given_x.log_prob(logits_sample).shape)
#raise Exception()
return lg_p_x_given_z, lg_p_z, lg_q_z_given_x
"""def lg_likelihood(x, z, settings, reuse=True, is_training=False):
decoder_network = settings['architecture']['decoder']['fn']
K = settings['count_categories']
latent_batchshape = (settings['batch_size'], settings['latent_dimension'], K)
z = tf.reshape(z, latent_batchshape)
real_z = z - tf.expand_dims(tf.reduce_logsumexp(z, axis=2), axis=-1)
real_z = tf.exp(z)*2. - 1.
#print(z.shape, real_z.shape, tf.reduce_logsumexp(z, axis=2).shape, tf.expand_dims(tf.reduce_logsumexp(z, axis=2), axis=-1).shape)
#raise Exception()
with tf.variable_scope('model'):
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, tf.reshape(real_z, (settings['batch_size'], -1)), is_training=is_training)
dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=tf_models.flatten(logits_x), dtype=tf.float32)
#print('lg_likelihood.shape', tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(x)), 1).shape)
return tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(x)), 1)"""
def lg_likelihood(x, z, settings, reuse=True, is_training=False):
decoder_network = settings['architecture']['decoder']['fn']
latent_batchshape = (settings['batch_size'], settings['latent_dimension'], 1)
real_z1 = tf.reshape(tf.sigmoid(z)*2. - 1., latent_batchshape)
real_z2 = tf.reshape((1.-tf.sigmoid(z))*2. - 1., latent_batchshape)
real_z = tf.reshape(tf.concat([real_z1, real_z2], axis=2), (settings['batch_size'],-1))
with tf.variable_scope('model'):
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, real_z, is_training=is_training)
dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=tf_models.flatten(logits_x), dtype=tf.float32)
#print('lg_likelihood.shape', tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(x)), 1).shape)
return tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(x)), 1)
"""def lg_prior(z, settings, reuse=True, is_training=False):
temperature_prior = 0.5
K = settings['count_categories']
latent_batchshape = (settings['batch_size'], settings['latent_dimension'], K)
#dist_prior = tf.contrib.distributions.ExpRelaxedOneHotCategorical(temperature=temperature_prior, logits=tf.constant(0., shape=latent_batchshape))
dist_prior = gumbel._Gumbel(loc=0., scale=1./temperature_prior)
#return tf.reduce_sum(dist_prior.log_prob(tf.reshape(z, latent_batchshape)), [1,2])
#print('lg_prior.shape', tf.reduce_sum(tf_models.flatten(dist_prior.log_prob(z)), 1).shape)
return tf.reduce_sum(tf_models.flatten(dist_prior.log_prob(z)), 1)
def sample_prior(settings):
temperature_prior = 0.5
K = settings['count_categories']
latent_batchshape = (settings['batch_size'], settings['latent_dimension'], K)
#dist_prior = tf.contrib.distributions.ExpRelaxedOneHotCategorical(temperature=temperature_prior, logits=tf.constant(0., shape=latent_batchshape))
dist_prior = gumbel._Gumbel(loc=0., scale=1./temperature_prior)
#print('sample_prior.shape', tf.reshape(dist_prior.sample(latent_batchshape), (settings['batch_size'], -1)).shape)
return tf.identity(tf.cast(tf.reshape(dist_prior.sample(latent_batchshape), (settings['batch_size'], -1)), dtype=tf.float32), name='p_z/sample')"""
def lg_prior(z, settings, reuse=True, is_training=False):
temperature = 0.5
prior_prob = settings['prior_prob']
logits_prior_prob = math.log(prior_prob / (1. - prior_prob))
dist_prior = tf.contrib.distributions.Logistic(loc=logits_prior_prob/temperature, scale=1./temperature)
#print('lg_prior.shape', tf.reduce_sum(tf_models.flatten(dist_prior.log_prob(z)), 1).shape)
return tf.reduce_sum(tf_models.flatten(dist_prior.log_prob(z)), 1)
def sample_prior(settings):
temperature = 0.5
prior_prob = settings['prior_prob']
logits_prior_prob = math.log(prior_prob / (1. - prior_prob))
dist_prior = tf.contrib.distributions.Logistic(loc=logits_prior_prob/temperature, scale=1./temperature)
#print('sample_prior.shape', tf.cast(dist_prior.sample(sample_shape=tf_models.latentshape(settings)), dtype=tf.float32).shape)
return tf.identity(tf.cast(dist_prior.sample(sample_shape=tf_models.latentshape(settings)), dtype=tf.float32), name='p_z/sample')
|
|
#!/usr/bin/env python
# vim:ts=4:sw=4:et:
from __future__ import absolute_import, division, print_function
import binascii
import collections
import inspect
import os
import sys
import tempfile
import uuid
from pywatchman import (
SocketConnectError,
SocketTimeout,
Transport,
WatchmanError,
bser,
client,
compat,
load,
pybser,
)
# no unicode literals
try:
import unittest2 as unittest
except ImportError:
import unittest
if os.path.basename(bser.__file__) == "pybser.py":
raise Exception(
"bser module resolved to pybser! Something is broken in your build. __file__={!r}, sys.path={!r}".format(
bser.__file__, sys.path
)
)
PILE_OF_POO = u"\U0001F4A9"
NON_UTF8_STRING = b"\xff\xff\xff"
class TestSocketTimeout(unittest.TestCase):
def test_exception_handling(self):
try:
raise SocketTimeout("should not raise")
except WatchmanError:
pass
class TestTransportErrorHandling(unittest.TestCase):
def test_transport_error(self):
buf = '{"foo":"bar"}'
failAfterBytesRead = 5
class FakeFailingTransport(Transport):
def __init__(self, sockpath, timeout):
self.readBuf = buf
self.readBufPos = 0
self.writeBuf = []
self.closed = False
def close(self):
self.closed = True
def readBytes(self, size):
readEnd = self.readBufPos + size
if readEnd > failAfterBytesRead:
raise IOError(23, "fnord")
elif readEnd > len(self.readBuf):
return ""
read = self.readBuf[self.readBufPos : self.readBufPos + size]
self.readBufPos += size
return read
def write(self, buf):
self.writeBuf.extend(buf)
c = client(
sockpath="",
transport=FakeFailingTransport,
sendEncoding="json",
recvEncoding="json",
)
try:
c.query("foobarbaz")
self.assertTrue(False, "expected a WatchmanError")
except WatchmanError as e:
self.assertIn(
"I/O error communicating with watchman daemon: "
+ "errno=23 errmsg=fnord, while executing "
+ "('foobarbaz',)",
str(e),
)
except Exception as e:
self.assertTrue(False, "expected a WatchmanError, but got " + str(e))
class TestLocalTransport(unittest.TestCase):
def test_missing_socket_file_raises_connect_error(self):
socket_path = self.make_deleted_socket_path()
c = client(sockpath=socket_path, transport="local")
with self.assertRaises(SocketConnectError):
with c:
pass
def make_deleted_socket_path(self):
if os.name == "nt":
path = self.make_deleted_windows_socket_path()
else:
path = self.make_deleted_unix_socket_path()
self.assertFalse(os.path.exists(path))
return path
def make_deleted_windows_socket_path(self):
return "\\\\.\\pipe\\pywatchman-test-{}".format(uuid.uuid1().hex)
def make_deleted_unix_socket_path(self):
temp_dir = tempfile.mkdtemp()
return os.path.join(temp_dir, "socket")
def expand_bser_mods(test_class):
"""
A decorator function used to create a class for bser and pybser
variants of the test.
"""
# We do some rather hacky things here to define new test class types
# in our caller's scope. This is needed so that the unittest TestLoader
# will find the subclasses we define.
caller_scope = inspect.currentframe().f_back.f_locals
flavors = [(bser, "Bser"), (pybser, "PyBser")]
for (mod, suffix) in flavors:
def make_class(mod, suffix):
subclass_name = test_class.__name__ + suffix
# Define a new class that derives from the input class
class MatrixTest(test_class):
def init_bser_mod(self):
self.bser_mod = mod
# Set the name and module information on our new subclass
MatrixTest.__name__ = subclass_name
MatrixTest.__qualname__ = subclass_name
MatrixTest.__module__ = test_class.__module__
caller_scope[subclass_name] = MatrixTest
make_class(mod, suffix)
class FakeFile(object):
def __init__(self, data):
self._data = data
self._ptr = 0
def readinto(self, buf):
l = len(buf)
if len(self._data) - self._ptr < l:
return None
buf[:] = self._data[self._ptr : self._ptr + l]
self._ptr += l
return l
@expand_bser_mods
class TestBSERDump(unittest.TestCase):
def setUp(self):
self.init_bser_mod()
def raw(self, structured_input, bser_output):
enc = self.bser_mod.dumps(structured_input)
self.assertEqual(enc, bser_output)
def roundtrip(self, val, mutable=True, value_encoding=None, value_errors=None):
enc = self.bser_mod.dumps(val)
# print("# %s --> %s" % (repr(val),
# binascii.hexlify(enc).decode('ascii')))
dec = self.bser_mod.loads(
enc, mutable, value_encoding=value_encoding, value_errors=value_errors
)
self.assertEqual(val, dec)
fp = FakeFile(enc)
dec = self.bser_mod.load(
fp, mutable, value_encoding=value_encoding, value_errors=value_errors
)
self.assertEqual(val, dec)
def munged(self, val, munged, value_encoding=None, value_errors=None):
enc = self.bser_mod.dumps(val)
# print("# %s --> %s" % (repr(val),
# binascii.hexlify(enc).decode('ascii')))
dec = self.bser_mod.loads(
enc, value_encoding=value_encoding, value_errors=value_errors
)
self.assertEqual(munged, dec)
def test_raw(self):
self.raw(
{"name": "Tom"},
b"\x00\x01\x05\x10\x00\x00\x00\x01\x03\x01\x02\x03\x04name\x02"
b"\x03\x03Tom",
)
self.raw(
{"names": ["Tom", "Jerry"]},
b"\x00\x01\x05\x1c\x00\x00\x00\x01\x03\x01\x02\x03\x05names\x00"
b"\x03\x02\x02\x03\x03Tom\x02\x03\x05Jerry",
)
self.raw(
["Tom", "Jerry"],
b"\x00\x01\x05\x11\x00\x00\x00\x00\x03\x02\x02\x03\x03Tom\x02"
b"\x03\x05Jerry",
)
self.raw(
[1, 123, 12345, 1234567, 12345678912345678],
b"\x00\x01\x05\x18\x00\x00\x00\x00\x03\x05\x03\x01\x03{\x0490"
b"\x05\x87\xd6\x12\x00\x06N\xd6\x14^T\xdc+\x00",
)
def test_int(self):
self.roundtrip(1)
self.roundtrip(0x100)
self.roundtrip(0x10000)
self.roundtrip(0x10000000)
self.roundtrip(0x1000000000)
def test_negative_int(self):
self.roundtrip(-0x80)
self.roundtrip(-0x8000)
self.roundtrip(-0x80000000)
self.roundtrip(-0x8000000000000000)
def test_float(self):
self.roundtrip(1.5)
def test_bool(self):
self.roundtrip(True)
self.roundtrip(False)
def test_none(self):
self.roundtrip(None)
def test_string(self):
self.roundtrip(b"hello")
# For Python 3, here we can only check that a Unicode string goes in,
# not that a Unicode string comes out.
self.munged(u"Hello", b"Hello")
self.roundtrip(u"Hello", value_encoding="utf8")
self.roundtrip(u"Hello", value_encoding="ascii")
self.roundtrip(u"Hello" + PILE_OF_POO, value_encoding="utf8")
# can't use the with form here because Python 2.6
self.assertRaises(
UnicodeDecodeError,
self.roundtrip,
u"Hello" + PILE_OF_POO,
value_encoding="ascii",
)
self.munged(
u"Hello" + PILE_OF_POO,
u"Hello",
value_encoding="ascii",
value_errors="ignore",
)
self.roundtrip(b"hello" + NON_UTF8_STRING)
self.assertRaises(
UnicodeDecodeError,
self.roundtrip,
b"hello" + NON_UTF8_STRING,
value_encoding="utf8",
)
self.munged(
b"hello" + NON_UTF8_STRING,
u"hello",
value_encoding="utf8",
value_errors="ignore",
)
# TODO: test non-UTF8 strings with surrogateescape in Python 3
ustr = u"\xe4\xf6\xfc"
self.munged(ustr, ustr.encode("utf-8"))
def test_list(self):
self.roundtrip([1, 2, 3])
self.roundtrip([1, b"helo", 2.5, False, None, True, 3])
def test_tuple(self):
self.munged((1, 2, 3), [1, 2, 3])
self.roundtrip((1, 2, 3), mutable=False)
def test_dict(self):
self.roundtrip({"hello": b"there"})
self.roundtrip({"hello": u"there"}, value_encoding="utf8")
self.roundtrip({"hello": u"there"}, value_encoding="ascii")
self.roundtrip({"hello": u"there" + PILE_OF_POO}, value_encoding="utf8")
# can't use the with form here because Python 2.6
self.assertRaises(
UnicodeDecodeError,
self.roundtrip,
{"hello": u"there" + PILE_OF_POO},
value_encoding="ascii",
)
self.munged(
{"Hello": u"there" + PILE_OF_POO},
{"Hello": u"there"},
value_encoding="ascii",
value_errors="ignore",
)
self.roundtrip({"Hello": b"there" + NON_UTF8_STRING})
self.assertRaises(
UnicodeDecodeError,
self.roundtrip,
{"hello": b"there" + NON_UTF8_STRING},
value_encoding="utf8",
)
self.munged(
{"Hello": b"there" + NON_UTF8_STRING},
{"Hello": u"there"},
value_encoding="utf8",
value_errors="ignore",
)
obj = self.bser_mod.loads(self.bser_mod.dumps({"hello": b"there"}), False)
self.assertEqual(1, len(obj))
self.assertEqual(b"there", obj.hello)
self.assertEqual(b"there", obj[u"hello"])
if not compat.PYTHON3:
self.assertEqual(b"there", obj[b"hello"])
self.assertEqual(b"there", obj[0])
# make sure this doesn't crash
self.assertRaises(Exception, lambda: obj[45.25])
hello, = obj # sequence/list assignment
self.assertEqual(b"there", hello)
def assertItemAttributes(self, dictish, attrish):
self.assertEqual(len(dictish), len(attrish))
# Use items for compatibility across Python 2 and 3.
for k, v in dictish.items():
self.assertEqual(v, getattr(attrish, k))
def test_template(self):
# since we can't generate the template bser output, here's a
# a blob from the C test suite in watchman
templ = (
b"\x00\x01\x03\x28"
+ b"\x0b\x00\x03\x02\x02\x03\x04\x6e\x61\x6d\x65\x02"
+ b"\x03\x03\x61\x67\x65\x03\x03\x02\x03\x04\x66\x72"
+ b"\x65\x64\x03\x14\x02\x03\x04\x70\x65\x74\x65\x03"
+ b"\x1e\x0c\x03\x19"
)
dec = self.bser_mod.loads(templ)
exp = [
{"name": b"fred", "age": 20},
{"name": b"pete", "age": 30},
{"name": None, "age": 25},
]
self.assertEqual(exp, dec)
res = self.bser_mod.loads(templ, False)
for i in range(0, len(exp)):
self.assertItemAttributes(exp[i], res[i])
def test_pdu_info(self):
enc = self.bser_mod.dumps(1)
DEFAULT_BSER_VERSION = 1
DEFAULT_BSER_CAPABILITIES = 0
self.assertEqual(
(DEFAULT_BSER_VERSION, DEFAULT_BSER_CAPABILITIES, len(enc)),
self.bser_mod.pdu_info(enc),
)
# try a bigger one; prove that we get the correct length
# even though we receive just a portion of the complete
# data
enc = self.bser_mod.dumps([1, 2, 3, "hello there, much larger"])
self.assertEqual(
(DEFAULT_BSER_VERSION, DEFAULT_BSER_CAPABILITIES, len(enc)),
self.bser_mod.pdu_info(enc[0:7]),
)
def test_pdu_len(self):
enc = self.bser_mod.dumps(1)
self.assertEqual(len(enc), self.bser_mod.pdu_len(enc))
# try a bigger one; prove that we get the correct length
# even though we receive just a portion of the complete
# data
enc = self.bser_mod.dumps([1, 2, 3, "hello there, much larger"])
self.assertEqual(len(enc), self.bser_mod.pdu_len(enc[0:7]))
def test_garbage(self):
# can't use the with form here because Python 2.6
self.assertRaises(ValueError, self.bser_mod.loads, b"\x00\x01\n")
self.assertRaises(ValueError, self.bser_mod.loads, b"\x00\x01\x04\x01\x00\x02")
self.assertRaises(ValueError, self.bser_mod.loads, b"\x00\x01\x07")
self.assertRaises(ValueError, self.bser_mod.loads, b"\x00\x01\x03\x01\xff")
self.assertRaises(ValueError, self.bser_mod.pdu_info, b"\x00\x02")
if __name__ == "__main__":
suite = load_tests(unittest.TestLoader())
unittest.TextTestRunner().run(suite)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations:
"""InboundNatRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.InboundNatRuleListResult"]:
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.InboundNatRule":
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> "_models.InboundNatRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> AsyncLROPoller["_models.InboundNatRule"]:
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2018_10_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
|
|
from django import forms
from django.contrib.auth import authenticate
from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
from .. import scope
from ..constants import RESPONSE_TYPE_CHOICES, SCOPES
from ..forms import OAuthForm, OAuthValidationError
from ..scope import SCOPE_NAMES
from ..utils import now
from .models import Client, Grant, RefreshToken
class ClientForm(forms.ModelForm):
"""
Form to create new consumers.
"""
class Meta:
model = Client
fields = ('name', 'url', 'redirect_uri', 'client_type')
def save(self, user=None, **kwargs):
self.instance.user = user
return super(ClientForm, self).save(**kwargs)
class ClientAuthForm(forms.Form):
"""
Client authentication form. Required to make sure that we're dealing with a
real client. Form is used in :attr:`provider.oauth2.backends` to validate
the client.
"""
client_id = forms.CharField()
client_secret = forms.CharField()
def clean(self):
data = self.cleaned_data
try:
client = Client.objects.get(client_id=data.get('client_id'),
client_secret=data.get('client_secret'))
except Client.DoesNotExist:
raise forms.ValidationError(_("Client could not be validated with "
"key pair."))
data['client'] = client
return data
class ScopeChoiceField(forms.ChoiceField):
"""
Custom form field that seperates values on space as defined in
:rfc:`3.3`.
"""
widget = forms.SelectMultiple
def to_python(self, value):
if not value:
return []
# New in Django 1.6: value may come in as a string.
# Instead of raising an `OAuthValidationError`, try to parse and
# ultimately return an empty list if nothing remains -- this will
# eventually raise an `OAuthValidationError` in `validate` where
# it should be anyways.
if not isinstance(value, (list, tuple)):
value = value.split(' ')
# Split values into list
return u' '.join([smart_text(val) for val in value]).split(u' ')
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise OAuthValidationError({'error': 'invalid_request'})
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise OAuthValidationError({
'error': 'invalid_request',
'error_description': _("'%s' is not a valid scope.") % \
val})
class ScopeMixin(object):
"""
Form mixin to clean scope fields.
"""
def clean_scope(self):
"""
The scope is assembled by combining all the set flags into a single
integer value which we can later check again for set bits.
If *no* scope is set, we return the default scope which is the first
defined scope in :attr:`provider.constants.SCOPES`.
"""
default = SCOPES[0][0]
flags = self.cleaned_data.get('scope', [])
return scope.to_int(default=default, *flags)
class AuthorizationRequestForm(ScopeMixin, OAuthForm):
"""
This form is used to validate the request data that the authorization
endpoint receives from clients.
Included data is specified in :rfc:`4.1.1`.
"""
# Setting all required fields to false to explicitly check by hand
# and use custom error messages that can be reused in the OAuth2
# protocol
response_type = forms.CharField(required=False)
"""
``"code"`` or ``"token"`` depending on the grant type.
"""
redirect_uri = forms.URLField(required=False)
"""
Where the client would like to redirect the user
back to. This has to match whatever value was saved while creating
the client.
"""
state = forms.CharField(required=False)
"""
Opaque - just pass back to the client for validation.
"""
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
"""
The scope that the authorization should include.
"""
def clean_response_type(self):
"""
:rfc:`3.1.1` Lists of values are space delimited.
"""
response_type = self.cleaned_data.get('response_type')
if not response_type:
raise OAuthValidationError({'error': 'invalid_request',
'error_description': "No 'response_type' supplied."})
types = response_type.split(" ")
for type in types:
if type not in RESPONSE_TYPE_CHOICES:
raise OAuthValidationError({
'error': 'unsupported_response_type',
'error_description': u"'%s' is not a supported response "
"type." % type})
return response_type
def clean_redirect_uri(self):
"""
:rfc:`3.1.2` The redirect value has to match what was saved on the
authorization server.
"""
redirect_uri = self.cleaned_data.get('redirect_uri')
if redirect_uri:
if not redirect_uri == self.client.redirect_uri:
raise OAuthValidationError({
'error': 'invalid_request',
'error_description': _("The requested redirect didn't "
"match the client settings.")})
return redirect_uri
class AuthorizationForm(ScopeMixin, OAuthForm):
"""
A form used to ask the resource owner for authorization of a given client.
"""
authorize = forms.BooleanField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
def save(self, **kwargs):
authorize = self.cleaned_data.get('authorize')
if not authorize:
return None
grant = Grant()
grant.scope = self.cleaned_data.get('scope')
return grant
class RefreshTokenGrantForm(ScopeMixin, OAuthForm):
"""
Checks and returns a refresh token.
"""
refresh_token = forms.CharField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
def clean_refresh_token(self):
token = self.cleaned_data.get('refresh_token')
if not token:
raise OAuthValidationError({'error': 'invalid_request'})
try:
token = RefreshToken.objects.get(token=token,
expired=False, client=self.client)
except RefreshToken.DoesNotExist:
raise OAuthValidationError({'error': 'invalid_grant'})
return token
def clean(self):
"""
Make sure that the scope is less or equal to the previous scope!
"""
data = self.cleaned_data
want_scope = data.get('scope') or 0
refresh_token = data.get('refresh_token')
access_token = getattr(refresh_token, 'access_token', None) if \
refresh_token else \
None
has_scope = access_token.scope if access_token else 0
# Only check if we've actually got a scope in the data
# (read: All fields have been cleaned)
if want_scope is not 0 and not scope.check(want_scope, has_scope):
raise OAuthValidationError({'error': 'invalid_scope'})
return data
class AuthorizationCodeGrantForm(ScopeMixin, OAuthForm):
"""
Check and return an authorization grant.
"""
code = forms.CharField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
def clean_code(self):
code = self.cleaned_data.get('code')
if not code:
raise OAuthValidationError({'error': 'invalid_request'})
try:
self.cleaned_data['grant'] = Grant.objects.get(
code=code, client=self.client, expires__gt=now())
except Grant.DoesNotExist:
raise OAuthValidationError({'error': 'invalid_grant'})
return code
def clean(self):
"""
Make sure that the scope is less or equal to the scope allowed on the
grant!
"""
data = self.cleaned_data
want_scope = data.get('scope') or 0
grant = data.get('grant')
has_scope = grant.scope if grant else 0
# Only check if we've actually got a scope in the data
# (read: All fields have been cleaned)
if want_scope is not 0 and not scope.check(want_scope, has_scope):
raise OAuthValidationError({'error': 'invalid_scope'})
return data
class PasswordGrantForm(ScopeMixin, OAuthForm):
"""
Validate the password of a user on a password grant request.
"""
username = forms.CharField(required=False)
password = forms.CharField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
def clean_username(self):
username = self.cleaned_data.get('username')
if not username:
raise OAuthValidationError({'error': 'invalid_request'})
return username
def clean_password(self):
password = self.cleaned_data.get('password')
if not password:
raise OAuthValidationError({'error': 'invalid_request'})
return password
def clean(self):
data = self.cleaned_data
user = authenticate(username=data.get('username'),
password=data.get('password'))
if user is None:
raise OAuthValidationError({'error': 'invalid_grant'})
data['user'] = user
return data
class PublicPasswordGrantForm(PasswordGrantForm):
client_id = forms.CharField(required=True)
grant_type = forms.CharField(required=True)
def clean_grant_type(self):
grant_type = self.cleaned_data.get('grant_type')
if grant_type != 'password':
raise OAuthValidationError({'error': 'invalid_grant'})
return grant_type
def clean(self):
data = super(PublicPasswordGrantForm, self).clean()
try:
client = Client.objects.get(client_id=data.get('client_id'))
except Client.DoesNotExist:
raise OAuthValidationError({'error': 'invalid_client'})
if client.client_type != 1: # public
raise OAuthValidationError({'error': 'invalid_client'})
data['client'] = client
return data
|
|
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.object_storage import base
from tempest import clients
from tempest import config
from tempest import test
CONF = config.CONF
class ObjectACLsNegativeTest(base.BaseObjectTest):
@classmethod
def setup_credentials(cls):
super(ObjectACLsNegativeTest, cls).setup_credentials()
cls.os_operator = clients.Manager(
cls.isolated_creds.get_creds_by_roles(
roles=[CONF.object_storage.operator_role], force_new=True))
@classmethod
def resource_setup(cls):
super(ObjectACLsNegativeTest, cls).resource_setup()
cls.test_auth_data = cls.os_operator.auth_provider.auth_data
def setUp(self):
super(ObjectACLsNegativeTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(self.container_name)
def tearDown(self):
self.delete_containers([self.container_name])
super(ObjectACLsNegativeTest, self).tearDown()
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('af587587-0c24-4e15-9822-8352ce711013')
def test_write_object_without_using_creds(self):
# trying to create object with empty headers
# X-Auth-Token is not provided
object_name = data_utils.rand_name(name='Object')
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=None
)
self.assertRaises(lib_exc.Unauthorized,
self.object_client.create_object,
self.container_name, object_name, 'data', headers={})
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('af85af0b-a025-4e72-a90e-121babf55720')
def test_delete_object_without_using_creds(self):
# create object
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(self.container_name,
object_name, 'data')
# trying to delete object with empty headers
# X-Auth-Token is not provided
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=None
)
self.assertRaises(lib_exc.Unauthorized,
self.object_client.delete_object,
self.container_name, object_name)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('63d84e37-55a6-42e2-9e5f-276e60e26a00')
def test_write_object_with_non_authorized_user(self):
# attempt to upload another file using non-authorized user
# User provided token is forbidden. ACL are not set
object_name = data_utils.rand_name(name='Object')
# trying to create object with non-authorized user
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(lib_exc.Forbidden,
self.object_client.create_object,
self.container_name, object_name, 'data', headers={})
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('abf63359-be52-4feb-87dd-447689fc77fd')
def test_read_object_with_non_authorized_user(self):
# attempt to read object using non-authorized user
# User provided token is forbidden. ACL are not set
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(
self.container_name, object_name, 'data')
self.assertHeaders(resp, 'Object', 'PUT')
# trying to get object with non authorized user token
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(lib_exc.Forbidden,
self.object_client.get_object,
self.container_name, object_name)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('7343ac3d-cfed-4198-9bb0-00149741a492')
def test_delete_object_with_non_authorized_user(self):
# attempt to delete object using non-authorized user
# User provided token is forbidden. ACL are not set
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(
self.container_name, object_name, 'data')
self.assertHeaders(resp, 'Object', 'PUT')
# trying to delete object with non-authorized user token
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(lib_exc.Forbidden,
self.object_client.delete_object,
self.container_name, object_name)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('9ed01334-01e9-41ea-87ea-e6f465582823')
def test_read_object_without_rights(self):
# attempt to read object using non-authorized user
# update X-Container-Read metadata ACL
cont_headers = {'X-Container-Read': 'badtenant:baduser'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(self.container_name,
object_name, 'data')
self.assertHeaders(resp, 'Object', 'PUT')
# Trying to read the object without rights
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(lib_exc.Forbidden,
self.object_client.get_object,
self.container_name, object_name)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('a3a585a7-d8cf-4b65-a1a0-edc2b1204f85')
def test_write_object_without_rights(self):
# attempt to write object using non-authorized user
# update X-Container-Write metadata ACL
cont_headers = {'X-Container-Write': 'badtenant:baduser'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# Trying to write the object without rights
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
object_name = data_utils.rand_name(name='Object')
self.assertRaises(lib_exc.Forbidden,
self.object_client.create_object,
self.container_name,
object_name, 'data', headers={})
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('8ba512ad-aa6e-444e-b882-2906a0ea2052')
def test_write_object_without_write_rights(self):
# attempt to write object using non-authorized user
# update X-Container-Read and X-Container-Write metadata ACL
tenant_name = self.os_operator.credentials.tenant_name
username = self.os_operator.credentials.username
cont_headers = {'X-Container-Read':
tenant_name + ':' + username,
'X-Container-Write': ''}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# Trying to write the object without write rights
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
object_name = data_utils.rand_name(name='Object')
self.assertRaises(lib_exc.Forbidden,
self.object_client.create_object,
self.container_name,
object_name, 'data', headers={})
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('b4e366f8-f185-47ab-b789-df4416f9ecdb')
def test_delete_object_without_write_rights(self):
# attempt to delete object using non-authorized user
# update X-Container-Read and X-Container-Write metadata ACL
tenant_name = self.os_operator.credentials.tenant_name
username = self.os_operator.credentials.username
cont_headers = {'X-Container-Read':
tenant_name + ':' + username,
'X-Container-Write': ''}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(self.container_name,
object_name, 'data')
self.assertHeaders(resp, 'Object', 'PUT')
# Trying to delete the object without write rights
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(lib_exc.Forbidden,
self.object_client.delete_object,
self.container_name,
object_name)
|
|
"""Support for Rflink devices."""
import asyncio
from collections import defaultdict
import logging
import async_timeout
from rflink.protocol import create_rflink_connection
from serial import SerialException
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_COMMAND,
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
STATE_ON,
)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.restore_state import RestoreEntity
_LOGGER = logging.getLogger(__name__)
ATTR_EVENT = "event"
ATTR_STATE = "state"
CONF_ALIASES = "aliases"
CONF_GROUP_ALIASES = "group_aliases"
CONF_GROUP = "group"
CONF_NOGROUP_ALIASES = "nogroup_aliases"
CONF_DEVICE_DEFAULTS = "device_defaults"
CONF_DEVICE_ID = "device_id"
CONF_DEVICES = "devices"
CONF_AUTOMATIC_ADD = "automatic_add"
CONF_FIRE_EVENT = "fire_event"
CONF_IGNORE_DEVICES = "ignore_devices"
CONF_RECONNECT_INTERVAL = "reconnect_interval"
CONF_SIGNAL_REPETITIONS = "signal_repetitions"
CONF_WAIT_FOR_ACK = "wait_for_ack"
DATA_DEVICE_REGISTER = "rflink_device_register"
DATA_ENTITY_LOOKUP = "rflink_entity_lookup"
DATA_ENTITY_GROUP_LOOKUP = "rflink_entity_group_only_lookup"
DEFAULT_RECONNECT_INTERVAL = 10
DEFAULT_SIGNAL_REPETITIONS = 1
CONNECTION_TIMEOUT = 10
EVENT_BUTTON_PRESSED = "button_pressed"
EVENT_KEY_COMMAND = "command"
EVENT_KEY_ID = "id"
EVENT_KEY_SENSOR = "sensor"
EVENT_KEY_UNIT = "unit"
RFLINK_GROUP_COMMANDS = ["allon", "alloff"]
DOMAIN = "rflink"
SERVICE_SEND_COMMAND = "send_command"
SIGNAL_AVAILABILITY = "rflink_device_available"
SIGNAL_HANDLE_EVENT = "rflink_handle_event_{}"
TMP_ENTITY = "tmp.{}"
DEVICE_DEFAULTS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(
CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS
): vol.Coerce(int),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PORT): vol.Any(cv.port, cv.string),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_WAIT_FOR_ACK, default=True): cv.boolean,
vol.Optional(
CONF_RECONNECT_INTERVAL, default=DEFAULT_RECONNECT_INTERVAL
): int,
vol.Optional(CONF_IGNORE_DEVICES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SEND_COMMAND_SCHEMA = vol.Schema(
{vol.Required(CONF_DEVICE_ID): cv.string, vol.Required(CONF_COMMAND): cv.string}
)
def identify_event_type(event):
"""Look at event to determine type of device.
Async friendly.
"""
if EVENT_KEY_COMMAND in event:
return EVENT_KEY_COMMAND
if EVENT_KEY_SENSOR in event:
return EVENT_KEY_SENSOR
return "unknown"
async def async_setup(hass, config):
"""Set up the Rflink component."""
# Allow entities to register themselves by device_id to be looked up when
# new rflink events arrive to be handled
hass.data[DATA_ENTITY_LOOKUP] = {
EVENT_KEY_COMMAND: defaultdict(list),
EVENT_KEY_SENSOR: defaultdict(list),
}
hass.data[DATA_ENTITY_GROUP_LOOKUP] = {EVENT_KEY_COMMAND: defaultdict(list)}
# Allow platform to specify function to register new unknown devices
hass.data[DATA_DEVICE_REGISTER] = {}
async def async_send_command(call):
"""Send Rflink command."""
_LOGGER.debug("Rflink command for %s", str(call.data))
if not (
await RflinkCommand.send_command(
call.data.get(CONF_DEVICE_ID), call.data.get(CONF_COMMAND)
)
):
_LOGGER.error("Failed Rflink command for %s", str(call.data))
hass.services.async_register(
DOMAIN, SERVICE_SEND_COMMAND, async_send_command, schema=SEND_COMMAND_SCHEMA
)
@callback
def event_callback(event):
"""Handle incoming Rflink events.
Rflink events arrive as dictionaries of varying content
depending on their type. Identify the events and distribute
accordingly.
"""
event_type = identify_event_type(event)
_LOGGER.debug("event of type %s: %s", event_type, event)
# Don't propagate non entity events (eg: version string, ack response)
if event_type not in hass.data[DATA_ENTITY_LOOKUP]:
_LOGGER.debug("unhandled event of type: %s", event_type)
return
# Lookup entities who registered this device id as device id or alias
event_id = event.get(EVENT_KEY_ID, None)
is_group_event = (
event_type == EVENT_KEY_COMMAND
and event[EVENT_KEY_COMMAND] in RFLINK_GROUP_COMMANDS
)
if is_group_event:
entity_ids = hass.data[DATA_ENTITY_GROUP_LOOKUP][event_type].get(
event_id, []
)
else:
entity_ids = hass.data[DATA_ENTITY_LOOKUP][event_type][event_id]
_LOGGER.debug("entity_ids: %s", entity_ids)
if entity_ids:
# Propagate event to every entity matching the device id
for entity in entity_ids:
_LOGGER.debug("passing event to %s", entity)
async_dispatcher_send(hass, SIGNAL_HANDLE_EVENT.format(entity), event)
elif not is_group_event:
# If device is not yet known, register with platform (if loaded)
if event_type in hass.data[DATA_DEVICE_REGISTER]:
_LOGGER.debug("device_id not known, adding new device")
# Add bogus event_id first to avoid race if we get another
# event before the device is created
# Any additional events received before the device has been
# created will thus be ignored.
hass.data[DATA_ENTITY_LOOKUP][event_type][event_id].append(
TMP_ENTITY.format(event_id)
)
hass.async_create_task(
hass.data[DATA_DEVICE_REGISTER][event_type](event)
)
else:
_LOGGER.debug("device_id not known and automatic add disabled")
# When connecting to tcp host instead of serial port (optional)
host = config[DOMAIN].get(CONF_HOST)
# TCP port when host configured, otherwise serial port
port = config[DOMAIN][CONF_PORT]
@callback
def reconnect(exc=None):
"""Schedule reconnect after connection has been unexpectedly lost."""
# Reset protocol binding before starting reconnect
RflinkCommand.set_rflink_protocol(None)
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
# If HA is not stopping, initiate new connection
if hass.state != CoreState.stopping:
_LOGGER.warning("disconnected from Rflink, reconnecting")
hass.async_create_task(connect())
async def connect():
"""Set up connection and hook it into HA for reconnect/shutdown."""
_LOGGER.info("Initiating Rflink connection")
# Rflink create_rflink_connection decides based on the value of host
# (string or None) if serial or tcp mode should be used
# Initiate serial/tcp connection to Rflink gateway
connection = create_rflink_connection(
port=port,
host=host,
event_callback=event_callback,
disconnect_callback=reconnect,
loop=hass.loop,
ignore=config[DOMAIN][CONF_IGNORE_DEVICES],
)
try:
with async_timeout.timeout(CONNECTION_TIMEOUT, loop=hass.loop):
transport, protocol = await connection
except (
SerialException,
ConnectionRefusedError,
TimeoutError,
OSError,
asyncio.TimeoutError,
) as exc:
reconnect_interval = config[DOMAIN][CONF_RECONNECT_INTERVAL]
_LOGGER.exception(
"Error connecting to Rflink, reconnecting in %s", reconnect_interval
)
# Connection to Rflink device is lost, make entities unavailable
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
hass.loop.call_later(reconnect_interval, reconnect, exc)
return
# There is a valid connection to a Rflink device now so
# mark entities as available
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, True)
# Bind protocol to command class to allow entities to send commands
RflinkCommand.set_rflink_protocol(protocol, config[DOMAIN][CONF_WAIT_FOR_ACK])
# handle shutdown of Rflink asyncio transport
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, lambda x: transport.close()
)
_LOGGER.info("Connected to Rflink")
hass.async_create_task(connect())
return True
class RflinkDevice(Entity):
"""Representation of a Rflink device.
Contains the common logic for Rflink entities.
"""
platform = None
_state = None
_available = True
def __init__(
self,
device_id,
initial_event=None,
name=None,
aliases=None,
group=True,
group_aliases=None,
nogroup_aliases=None,
fire_event=False,
signal_repetitions=DEFAULT_SIGNAL_REPETITIONS,
):
"""Initialize the device."""
# Rflink specific attributes for every component type
self._initial_event = initial_event
self._device_id = device_id
if name:
self._name = name
else:
self._name = device_id
self._aliases = aliases
self._group = group
self._group_aliases = group_aliases
self._nogroup_aliases = nogroup_aliases
self._should_fire_event = fire_event
self._signal_repetitions = signal_repetitions
@callback
def handle_event_callback(self, event):
"""Handle incoming event for device type."""
# Call platform specific event handler
self._handle_event(event)
# Propagate changes through ha
self.async_schedule_update_ha_state()
# Put command onto bus for user to subscribe to
if self._should_fire_event and identify_event_type(event) == EVENT_KEY_COMMAND:
self.hass.bus.async_fire(
EVENT_BUTTON_PRESSED,
{ATTR_ENTITY_ID: self.entity_id, ATTR_STATE: event[EVENT_KEY_COMMAND]},
)
_LOGGER.debug(
"Fired bus event for %s: %s", self.entity_id, event[EVENT_KEY_COMMAND]
)
def _handle_event(self, event):
"""Platform specific event handler."""
raise NotImplementedError()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return a name for the device."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
if self.assumed_state:
return False
return self._state
@property
def assumed_state(self):
"""Assume device state until first device event sets state."""
return self._state is None
@property
def available(self):
"""Return True if entity is available."""
return self._available
@callback
def _availability_callback(self, availability):
"""Update availability state."""
self._available = availability
self.async_schedule_update_ha_state()
async def async_added_to_hass(self):
"""Register update callback."""
await super().async_added_to_hass()
# Remove temporary bogus entity_id if added
tmp_entity = TMP_ENTITY.format(self._device_id)
if (
tmp_entity
in self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][self._device_id]
):
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][
self._device_id
].remove(tmp_entity)
# Register id and aliases
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][self._device_id].append(
self.entity_id
)
if self._group:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][
self._device_id
].append(self.entity_id)
# aliases respond to both normal and group commands (allon/alloff)
if self._aliases:
for _id in self._aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
# group_aliases only respond to group commands (allon/alloff)
if self._group_aliases:
for _id in self._group_aliases:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
# nogroup_aliases only respond to normal commands
if self._nogroup_aliases:
for _id in self._nogroup_aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
async_dispatcher_connect(
self.hass, SIGNAL_AVAILABILITY, self._availability_callback
)
async_dispatcher_connect(
self.hass,
SIGNAL_HANDLE_EVENT.format(self.entity_id),
self.handle_event_callback,
)
# Process the initial event now that the entity is created
if self._initial_event:
self.handle_event_callback(self._initial_event)
class RflinkCommand(RflinkDevice):
"""Singleton class to make Rflink command interface available to entities.
This class is to be inherited by every Entity class that is actionable
(switches/lights). It exposes the Rflink command interface for these
entities.
The Rflink interface is managed as a class level and set during setup (and
reset on reconnect).
"""
# Keep repetition tasks to cancel if state is changed before repetitions
# are sent
_repetition_task = None
_protocol = None
@classmethod
def set_rflink_protocol(cls, protocol, wait_ack=None):
"""Set the Rflink asyncio protocol as a class variable."""
cls._protocol = protocol
if wait_ack is not None:
cls._wait_ack = wait_ack
@classmethod
def is_connected(cls):
"""Return connection status."""
return bool(cls._protocol)
@classmethod
async def send_command(cls, device_id, action):
"""Send device command to Rflink and wait for acknowledgement."""
return await cls._protocol.send_command_ack(device_id, action)
async def _async_handle_command(self, command, *args):
"""Do bookkeeping for command, send it to rflink and update state."""
self.cancel_queued_send_commands()
if command == "turn_on":
cmd = "on"
self._state = True
elif command == "turn_off":
cmd = "off"
self._state = False
elif command == "dim":
# convert brightness to rflink dim level
cmd = str(int(args[0] / 17))
self._state = True
elif command == "toggle":
cmd = "on"
# if the state is unknown or false, it gets set as true
# if the state is true, it gets set as false
self._state = self._state in [None, False]
# Cover options for RFlink
elif command == "close_cover":
cmd = "DOWN"
self._state = False
elif command == "open_cover":
cmd = "UP"
self._state = True
elif command == "stop_cover":
cmd = "STOP"
self._state = True
# Send initial command and queue repetitions.
# This allows the entity state to be updated quickly and not having to
# wait for all repetitions to be sent
await self._async_send_command(cmd, self._signal_repetitions)
# Update state of entity
await self.async_update_ha_state()
def cancel_queued_send_commands(self):
"""Cancel queued signal repetition commands.
For example when user changed state while repetitions are still
queued for broadcast. Or when an incoming Rflink command (remote
switch) changes the state.
"""
# cancel any outstanding tasks from the previous state change
if self._repetition_task:
self._repetition_task.cancel()
async def _async_send_command(self, cmd, repetitions):
"""Send a command for device to Rflink gateway."""
_LOGGER.debug("Sending command: %s to Rflink device: %s", cmd, self._device_id)
if not self.is_connected():
raise HomeAssistantError("Cannot send command, not connected!")
if self._wait_ack:
# Puts command on outgoing buffer then waits for Rflink to confirm
# the command has been send out in the ether.
await self._protocol.send_command_ack(self._device_id, cmd)
else:
# Puts command on outgoing buffer and returns straight away.
# Rflink protocol/transport handles asynchronous writing of buffer
# to serial/tcp device. Does not wait for command send
# confirmation.
self._protocol.send_command(self._device_id, cmd)
if repetitions > 1:
self._repetition_task = self.hass.async_create_task(
self._async_send_command(cmd, repetitions - 1)
)
class SwitchableRflinkDevice(RflinkCommand, RestoreEntity):
"""Rflink entity which can switch on/off (eg: light, switch)."""
async def async_added_to_hass(self):
"""Restore RFLink device state (ON/OFF)."""
await super().async_added_to_hass()
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_ON
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event["command"]
if command in ["on", "allon"]:
self._state = True
elif command in ["off", "alloff"]:
self._state = False
def async_turn_on(self, **kwargs):
"""Turn the device on."""
return self._async_handle_command("turn_on")
def async_turn_off(self, **kwargs):
"""Turn the device off."""
return self._async_handle_command("turn_off")
|
|
"""
Tests of the core auth models (Role, Membership, Collection, FacilityUser, etc).
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.test import TestCase
from ..constants import collection_kinds
from ..constants import role_kinds
from ..errors import InvalidCollectionHierarchy
from ..errors import InvalidMembershipError
from ..errors import InvalidRoleKind
from ..errors import UserDoesNotHaveRoleError
from ..errors import UserIsNotMemberError
from ..models import AdHocGroup
from ..models import Classroom
from ..models import Collection
from ..models import Facility
from ..models import FacilityUser
from ..models import LearnerGroup
from ..models import Membership
from ..models import Role
from .helpers import create_superuser
from kolibri.core.device.models import DeviceSettings
class CollectionRoleMembershipDeletionTestCase(TestCase):
"""
Tests that removing users from a Collection deletes the corresponding Role, and that deleting a Collection
or FacilityUser deletes all associated Roles and Memberships.
"""
def setUp(self):
self.facility = Facility.objects.create()
learner, classroom_coach, facility_admin = (
self.learner,
self.classroom_coach,
self.facility_admin,
) = (
FacilityUser.objects.create(username="foo", facility=self.facility),
FacilityUser.objects.create(username="bar", facility=self.facility),
FacilityUser.objects.create(username="baz", facility=self.facility),
)
self.facility.add_admin(facility_admin)
self.cr = Classroom.objects.create(parent=self.facility)
self.cr.add_coach(classroom_coach)
self.cr.add_member(learner)
self.lg = LearnerGroup.objects.create(parent=self.cr)
self.lg.add_learner(learner)
def test_remove_learner(self):
self.assertTrue(self.learner.is_member_of(self.lg))
self.assertTrue(self.learner.is_member_of(self.cr))
self.assertTrue(self.learner.is_member_of(self.facility))
self.assertEqual(
Membership.objects.filter(user=self.learner, collection=self.lg).count(), 1
)
self.lg.remove_learner(self.learner)
self.cr.remove_member(self.learner)
self.assertFalse(self.learner.is_member_of(self.lg))
self.assertFalse(self.learner.is_member_of(self.cr))
self.assertTrue(
self.learner.is_member_of(self.facility)
) # always a member of one's own facility
self.assertEqual(
Membership.objects.filter(user=self.learner, collection=self.lg).count(), 0
)
with self.assertRaises(UserIsNotMemberError):
self.lg.remove_learner(self.learner)
def test_remove_learner_from_parent_removes_from_child(self):
self.assertTrue(self.learner.is_member_of(self.lg))
self.assertTrue(self.learner.is_member_of(self.cr))
self.assertTrue(self.learner.is_member_of(self.facility))
self.assertEqual(
Membership.objects.filter(user=self.learner, collection=self.lg).count(), 1
)
self.cr.remove_member(self.learner)
self.assertFalse(self.learner.is_member_of(self.lg))
self.assertFalse(self.learner.is_member_of(self.cr))
self.assertTrue(
self.learner.is_member_of(self.facility)
) # always a member of one's own facility
self.assertEqual(
Membership.objects.filter(user=self.learner, collection=self.lg).count(), 0
)
with self.assertRaises(UserIsNotMemberError):
self.lg.remove_learner(self.learner)
def test_remove_coach(self):
self.assertTrue(
self.classroom_coach.has_role_for_collection(role_kinds.COACH, self.lg)
)
self.assertTrue(
self.classroom_coach.has_role_for_collection(role_kinds.COACH, self.cr)
)
self.assertFalse(
self.classroom_coach.has_role_for_collection(
role_kinds.COACH, self.facility
)
)
self.assertFalse(
self.classroom_coach.has_role_for_collection(role_kinds.ADMIN, self.lg)
)
self.assertTrue(
self.classroom_coach.has_role_for_user(role_kinds.COACH, self.learner)
)
self.assertFalse(
self.classroom_coach.has_role_for_user(
role_kinds.COACH, self.facility_admin
)
)
self.assertFalse(
self.classroom_coach.has_role_for_user(role_kinds.ADMIN, self.learner)
)
self.assertEqual(
Role.objects.filter(
user=self.classroom_coach, kind=role_kinds.COACH, collection=self.cr
).count(),
1,
)
self.cr.remove_coach(self.classroom_coach)
self.assertFalse(
self.classroom_coach.has_role_for_collection(role_kinds.COACH, self.lg)
)
self.assertFalse(
self.classroom_coach.has_role_for_collection(role_kinds.COACH, self.cr)
)
self.assertFalse(
self.classroom_coach.has_role_for_collection(
role_kinds.COACH, self.facility
)
)
self.assertFalse(
self.classroom_coach.has_role_for_collection(role_kinds.ADMIN, self.lg)
)
self.assertFalse(
self.classroom_coach.has_role_for_user(role_kinds.COACH, self.learner)
)
self.assertFalse(
self.classroom_coach.has_role_for_user(
role_kinds.COACH, self.facility_admin
)
)
self.assertFalse(
self.classroom_coach.has_role_for_user(role_kinds.ADMIN, self.learner)
)
self.assertEqual(
Role.objects.filter(
user=self.classroom_coach, kind=role_kinds.COACH, collection=self.cr
).count(),
0,
)
with self.assertRaises(UserDoesNotHaveRoleError):
self.cr.remove_coach(self.classroom_coach)
def test_remove_admin(self):
self.assertTrue(
self.facility_admin.has_role_for_collection(role_kinds.ADMIN, self.lg)
)
self.assertTrue(
self.facility_admin.has_role_for_collection(role_kinds.ADMIN, self.cr)
)
self.assertTrue(
self.facility_admin.has_role_for_collection(role_kinds.ADMIN, self.facility)
)
self.assertFalse(
self.facility_admin.has_role_for_collection(role_kinds.COACH, self.lg)
)
self.assertTrue(
self.facility_admin.has_role_for_user(role_kinds.ADMIN, self.learner)
)
self.assertTrue(
self.facility_admin.has_role_for_user(role_kinds.ADMIN, self.facility_admin)
)
self.assertTrue(
self.facility_admin.has_role_for_user(
role_kinds.ADMIN, self.classroom_coach
)
)
self.assertFalse(
self.facility_admin.has_role_for_user(role_kinds.COACH, self.learner)
)
self.assertEqual(
Role.objects.filter(
user=self.facility_admin,
kind=role_kinds.ADMIN,
collection=self.facility,
).count(),
1,
)
self.facility.remove_admin(self.facility_admin)
self.assertEqual(
Role.objects.filter(
user=self.facility_admin,
kind=role_kinds.ADMIN,
collection=self.facility,
).count(),
0,
)
with self.assertRaises(UserDoesNotHaveRoleError):
self.facility.remove_admin(self.facility_admin)
def test_remove_nonexistent_role(self):
with self.assertRaises(UserDoesNotHaveRoleError):
self.facility.remove_admin(self.learner)
with self.assertRaises(UserDoesNotHaveRoleError):
self.cr.remove_coach(self.learner)
def test_remove_indirect_admin_role(self):
""" Trying to remove the admin role for a a Facility admin from a descendant classroom doesn't actually remove anything. """
with self.assertRaises(UserDoesNotHaveRoleError):
self.cr.remove_admin(self.facility_admin)
def test_delete_learner_group(self):
""" Deleting a LearnerGroup should delete its associated Memberships as well """
self.assertEqual(Membership.objects.filter(collection=self.lg.id).count(), 1)
self.lg.delete()
self.assertEqual(Membership.objects.filter(collection=self.lg.id).count(), 0)
def test_delete_classroom_pt1(self):
""" Deleting a Classroom should delete its associated Roles as well """
self.assertEqual(Role.objects.filter(collection=self.cr.id).count(), 1)
self.cr.delete()
self.assertEqual(Role.objects.filter(collection=self.cr.id).count(), 0)
def test_delete_classroom_pt2(self):
""" Deleting a Classroom should delete its associated LearnerGroups """
self.assertEqual(LearnerGroup.objects.count(), 1)
self.cr.delete()
self.assertEqual(LearnerGroup.objects.count(), 0)
def test_delete_facility_pt1(self):
""" Deleting a Facility should delete associated Roles as well """
self.assertEqual(Role.objects.filter(collection=self.facility.id).count(), 2)
self.facility.delete()
self.assertEqual(Role.objects.filter(collection=self.facility.id).count(), 0)
def test_delete_facility_pt2(self):
""" Deleting a Facility should delete Classrooms under it. """
self.assertEqual(Classroom.objects.count(), 1)
self.facility.delete()
self.assertEqual(Classroom.objects.count(), 0)
def test_delete_facility_pt3(self):
""" Deleting a Facility should delete *every* Collection under it and associated Roles """
self.facility.delete()
self.assertEqual(Collection.objects.count(), 0)
self.assertEqual(Role.objects.count(), 0)
def test_delete_facility_user(self):
""" Deleting a FacilityUser should delete associated Memberships """
self.learner.delete()
self.assertEqual(Membership.objects.filter(user=self.learner).count(), 0)
class CollectionRelatedObjectTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = Facility.objects.create()
users = cls.users = [
FacilityUser.objects.create(username="foo%s" % i, facility=cls.facility)
for i in range(10)
]
cls.facility.add_admins(users[8:9])
cls.cr = Classroom.objects.create(parent=cls.facility)
cls.cr.add_coaches(users[5:8])
for u in users[0:5]:
cls.cr.add_member(u)
cls.lg = LearnerGroup.objects.create(parent=cls.cr)
cls.lg.add_learners(users[0:5])
def test_get_learner_groups(self):
self.assertSetEqual(
{self.lg.pk}, set(lg.pk for lg in self.cr.get_learner_groups())
)
def test_get_classrooms(self):
self.assertSetEqual(
{self.cr.pk}, set(cr.pk for cr in self.facility.get_classrooms())
)
def test_get_classroom(self):
self.assertEqual(self.cr.pk, self.lg.get_classroom().pk)
class CollectionsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = Facility.objects.create()
cls.classroom = Classroom.objects.create(parent=cls.facility)
def test_add_and_remove_admin(self):
user = FacilityUser.objects.create(username="foo", facility=self.facility)
self.facility.add_admin(user)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.ADMIN, collection=self.facility
).count(),
1,
)
self.facility.remove_admin(user)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.ADMIN, collection=self.facility
).count(),
0,
)
def test_add_and_remove_coach(self):
user = FacilityUser.objects.create(username="foo", facility=self.facility)
self.classroom.add_coach(user)
self.facility.add_coach(user)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.COACH, collection=self.classroom
).count(),
1,
)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.COACH, collection=self.facility
).count(),
1,
)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.ASSIGNABLE_COACH, collection=self.facility
).count(),
1,
)
self.classroom.remove_coach(user)
self.facility.remove_coach(user)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.COACH, collection=self.classroom
).count(),
0,
)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.COACH, collection=self.facility
).count(),
0,
)
def test_add_and_remove_classroom_coach(self):
user = FacilityUser.objects.create(username="foo", facility=self.facility)
self.classroom.add_coach(user)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.COACH, collection=self.classroom
).count(),
1,
)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.ASSIGNABLE_COACH, collection=self.facility
).count(),
1,
)
self.facility.remove_role(user, role_kinds.ASSIGNABLE_COACH)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.COACH, collection=self.classroom
).count(),
0,
)
self.assertEqual(
Role.objects.filter(
user=user, kind=role_kinds.ASSIGNABLE_COACH, collection=self.facility
).count(),
0,
)
def test_add_coaches(self):
user1 = FacilityUser.objects.create(username="foo1", facility=self.facility)
user2 = FacilityUser.objects.create(username="foo2", facility=self.facility)
self.classroom.add_coaches([user1, user2])
self.facility.add_coaches([user1, user2])
self.assertEqual(
Role.objects.filter(
kind=role_kinds.COACH, collection=self.classroom
).count(),
2,
)
self.assertEqual(
Role.objects.filter(
kind=role_kinds.COACH, collection=self.facility
).count(),
2,
)
def test_add_admins(self):
user1 = FacilityUser.objects.create(username="foo1", facility=self.facility)
user2 = FacilityUser.objects.create(username="foo2", facility=self.facility)
with self.assertRaises(InvalidRoleKind):
self.classroom.add_admins([user1, user2])
self.facility.add_admins([user1, user2])
self.assertEqual(
Role.objects.filter(
kind=role_kinds.ADMIN, collection=self.facility
).count(),
2,
)
def test_add_classroom(self):
classroom = Classroom.objects.create(parent=self.facility)
self.assertEqual(Classroom.objects.count(), 2)
self.assertEqual(classroom.get_facility(), self.facility)
def test_add_learner_group(self):
classroom = Classroom.objects.create(name="blah", parent=self.facility)
classroom.full_clean()
LearnerGroup.objects.create(parent=classroom)
self.assertEqual(LearnerGroup.objects.count(), 1)
def test_learner_cannot_be_added_to_learnergroup_if_not_classroom_member(self):
user = FacilityUser.objects.create(username="foo", facility=self.facility)
classroom = Classroom.objects.create(parent=self.facility)
learner_group = LearnerGroup.objects.create(name="blah", parent=classroom)
learner_group.full_clean()
with self.assertRaises(InvalidMembershipError):
learner_group.add_learner(user)
def test_learner_can_be_added_to_learnergroup_if_classroom_member(self):
user = FacilityUser.objects.create(username="foo", facility=self.facility)
classroom = Classroom.objects.create(parent=self.facility)
classroom.add_member(user)
learner_group = LearnerGroup.objects.create(name="blah", parent=classroom)
learner_group.full_clean()
learner_group.add_learner(user)
self.assertEqual(
Membership.objects.filter(user=user, collection=learner_group).count(), 1
)
def test_learner_cannot_be_added_to_adhocgroup_if_not_classroom_member(self):
user = FacilityUser.objects.create(username="foo", facility=self.facility)
classroom = Classroom.objects.create(parent=self.facility)
adhoc_group = AdHocGroup.objects.create(name="blah", parent=classroom)
adhoc_group.full_clean()
with self.assertRaises(InvalidMembershipError):
adhoc_group.add_learner(user)
def test_learner_can_be_added_to_adhocgroup_if_classroom_member(self):
user = FacilityUser.objects.create(username="foo", facility=self.facility)
classroom = Classroom.objects.create(parent=self.facility)
classroom.add_member(user)
adhoc_group = AdHocGroup.objects.create(name="blah", parent=classroom)
adhoc_group.full_clean()
adhoc_group.add_learner(user)
self.assertEqual(
Membership.objects.filter(user=user, collection=adhoc_group).count(), 1
)
def test_parentless_classroom(self):
classroom = Classroom(name="myclass")
# shouldn't be valid, because no parent was specified, and Classrooms can't be the root of the collection tree
with self.assertRaises(ValidationError):
classroom.full_clean()
with self.assertRaises(IntegrityError):
classroom.save()
def test_parentless_learnergroup(self):
group = LearnerGroup(name="mygroup")
# shouldn't be valid, because no parent was specified, and LearnerGroups can't be the root of the collection tree
with self.assertRaises(ValidationError):
group.full_clean()
with self.assertRaises(IntegrityError):
group.save()
def test_facility_with_parent_facility(self):
with self.assertRaises(IntegrityError):
Facility.objects.create(name="blah", parent=self.facility)
def test_create_bare_collection_without_kind(self):
with self.assertRaises(ValidationError):
Collection(name="qqq", parent=self.facility).full_clean()
class RoleErrorTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
self.classroom = Classroom.objects.create(parent=self.facility)
self.learner_group = LearnerGroup.objects.create(parent=self.classroom)
self.adhoc_group = AdHocGroup.objects.create(parent=self.classroom)
self.facility_user = FacilityUser.objects.create(
username="blah", password="#", facility=self.facility
)
def test_invalid_role_kind(self):
with self.assertRaises(InvalidRoleKind):
self.learner_group.add_role(
self.facility_user, "blahblahnonexistentroletype"
)
with self.assertRaises(InvalidRoleKind):
self.learner_group.remove_role(
self.facility_user, "blahblahnonexistentroletype"
)
def test_invalid_learner_group_roles(self):
with self.assertRaises(InvalidRoleKind):
self.learner_group.add_role(self.facility_user, role_kinds.ADMIN)
with self.assertRaises(InvalidRoleKind):
self.learner_group.add_role(self.facility_user, role_kinds.COACH)
def test_invalid_adhoc_group_roles(self):
with self.assertRaises(InvalidRoleKind):
self.learner_group.add_role(self.facility_user, role_kinds.ADMIN)
with self.assertRaises(InvalidRoleKind):
self.learner_group.add_role(self.facility_user, role_kinds.COACH)
def test_invalid_classroom_roles(self):
with self.assertRaises(InvalidRoleKind):
self.learner_group.add_role(self.facility_user, role_kinds.ADMIN)
class SuperuserRoleMembershipTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = Facility.objects.create()
cls.classroom = Classroom.objects.create(parent=cls.facility)
cls.learner_group = LearnerGroup.objects.create(parent=cls.classroom)
cls.facility_user = FacilityUser.objects.create(
username="blah", password="#", facility=cls.facility
)
cls.superuser = create_superuser(cls.facility)
cls.superuser2 = create_superuser(cls.facility, username="superuser2")
def test_superuser_is_not_member_of_any_sub_collection(self):
self.assertFalse(self.superuser.is_member_of(self.classroom))
self.assertTrue(self.superuser.is_member_of(self.facility))
self.assertFalse(self.superuser.is_member_of(self.learner_group))
def test_superuser_is_admin_for_everything(self):
self.assertTrue(
self.superuser.has_role_for_user([role_kinds.ADMIN], self.facility_user)
)
self.assertTrue(
self.superuser.has_role_for_collection([role_kinds.ADMIN], self.facility)
)
class SuperuserTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = Facility.objects.create()
cls.superuser = create_superuser(cls.facility, username="the_superuser")
def test_superuser_is_superuser(self):
self.assertTrue(self.superuser.is_superuser)
def test_superuser_manager_supports_superuser_creation(self):
self.assertEqual(FacilityUser.objects.get().username, "the_superuser")
def test_superuser_has_all_django_perms_for_django_admin(self):
fake_permission = "fake_permission"
fake_module = "module.someapp"
self.assertTrue(self.superuser.has_perm(fake_permission, object()))
self.assertTrue(self.superuser.has_perms([fake_permission], object()))
self.assertTrue(self.superuser.has_module_perms(fake_module))
class StringMethodTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = Facility.objects.create(name="Arkham")
learner, classroom_coach, facility_admin = (
cls.learner,
cls.classroom_coach,
cls.facility_admin,
) = (
FacilityUser.objects.create(username="foo", facility=cls.facility),
FacilityUser.objects.create(username="bar", facility=cls.facility),
FacilityUser.objects.create(username="baz", facility=cls.facility),
)
cls.facility.add_admin(facility_admin)
cls.cr = Classroom.objects.create(name="Classroom X", parent=cls.facility)
cls.cr.add_coach(classroom_coach)
cls.cr.add_member(learner)
cls.lg = LearnerGroup.objects.create(name="Oodles of Fun", parent=cls.cr)
cls.lg.add_learner(learner)
cls.superuser = create_superuser(cls.facility)
def test_facility_user_str_method(self):
self.assertEqual(str(self.learner), '"foo"@"Arkham"')
def test_superuser_str_method(self):
self.assertEqual(str(self.superuser), '"superuser"@"Arkham"')
def test_collection_str_method(self):
self.assertEqual(
str(Collection.objects.filter(kind=collection_kinds.FACILITY)[0]),
'"Arkham" (facility)',
)
def test_membership_str_method(self):
self.assertEqual(
str(self.learner.memberships.filter(collection=self.lg)[0]),
'"foo"@"Arkham"\'s membership in "Oodles of Fun" (learnergroup)',
)
def test_role_str_method(self):
self.assertEqual(
str(self.classroom_coach.roles.filter(kind=role_kinds.COACH)[0]),
'"bar"@"Arkham"\'s coach role for "Classroom X" (classroom)',
)
def test_facility_str_method(self):
self.assertEqual(str(self.facility), "Arkham")
def test_classroom_str_method(self):
self.assertEqual(str(self.cr), "Classroom X")
def test_learner_group_str_method(self):
self.assertEqual(str(self.lg), "Oodles of Fun")
class FacilityTestCase(TestCase):
def test_existing_facility_becomes_default_facility(self):
self.facility = Facility.objects.create()
self.device_settings = DeviceSettings.objects.create()
self.assertEqual(self.device_settings.default_facility, None)
default_facility = Facility.get_default_facility()
self.assertEqual(default_facility, self.facility)
self.device_settings.refresh_from_db()
self.assertEqual(self.device_settings.default_facility, self.facility)
def test_default_facility_returns_none_when_no_settings(self):
default_facility = Facility.get_default_facility()
self.assertEqual(default_facility, None)
class FacilityUserTestCase(TestCase):
def test_able_to_create_user_with_same_username_at_orm_level(self):
self.facility = Facility.objects.create()
self.device_settings = DeviceSettings.objects.create()
FacilityUser.objects.create(username="bob", facility=self.facility)
try:
FacilityUser.objects.create(username="bob", facility=self.facility)
except IntegrityError:
self.fail("Can't create user with same username.")
def test_deserialize_empty_password(self):
self.facility = Facility.objects.create()
self.device_settings = DeviceSettings.objects.create()
user = FacilityUser.deserialize(dict(username="bob", password=""))
self.assertEqual("bob", user.username)
self.assertEqual("NOT_SPECIFIED", user.password)
class CollectionHierarchyTestCase(TestCase):
def test_facility_with_parent(self):
facility = Facility.objects.create()
with self.assertRaises(IntegrityError):
Facility.objects.create(parent=facility)
def test_classroom_no_parent(self):
with self.assertRaises(IntegrityError):
Classroom.objects.create()
def test_classroom_no_facility_parent(self):
facility = Facility.objects.create()
clsroom = Classroom.objects.create(parent=facility)
with self.assertRaises(InvalidCollectionHierarchy):
Classroom.objects.create(parent=clsroom)
def test_learnergroup_no_parent(self):
with self.assertRaises(IntegrityError):
LearnerGroup.objects.create()
def test_learnergroup_no_facility_parent(self):
facility = Facility.objects.create()
clsroom = Classroom.objects.create(parent=facility)
lgroup = LearnerGroup.objects.create(parent=clsroom)
with self.assertRaises(InvalidCollectionHierarchy):
LearnerGroup.objects.create(parent=lgroup)
def test_adhocgroup_no_parent(self):
with self.assertRaises(IntegrityError):
AdHocGroup.objects.create()
def test_adhocgroup_no_facility_parent(self):
facility = Facility.objects.create()
clsroom = Classroom.objects.create(parent=facility)
adhocgroup = AdHocGroup.objects.create(parent=clsroom)
with self.assertRaises(InvalidCollectionHierarchy):
AdHocGroup.objects.create(parent=adhocgroup)
|
|
# -*- coding: utf-8 -*-
"""
celery.app.defaults
~~~~~~~~~~~~~~~~~~~
Configuration introspection and defaults.
"""
from __future__ import absolute_import
import sys
from collections import deque
from datetime import timedelta
from celery.utils import strtobool
from celery.utils.functional import memoize
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
DEFAULT_POOL = 'processes'
if is_jython:
DEFAULT_POOL = 'threads'
elif is_pypy:
if sys.pypy_version_info[0:3] < (1, 5, 0):
DEFAULT_POOL = 'solo'
else:
DEFAULT_POOL = 'processes'
DEFAULT_PROCESS_LOG_FMT = """
[%(asctime)s: %(levelname)s/%(processName)s] %(message)s
""".strip()
DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
%(task_name)s[%(task_id)s]: %(message)s"""
_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', 'alt': 'BROKER_URL'}
_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0',
'alt': 'URL form of CELERY_RESULT_BACKEND'}
class Option(object):
alt = None
deprecate_by = None
remove_by = None
typemap = dict(string=str, int=int, float=float, any=lambda v: v,
bool=strtobool, dict=dict, tuple=tuple)
def __init__(self, default=None, *args, **kwargs):
self.default = default
self.type = kwargs.get('type') or 'string'
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
def to_python(self, value):
return self.typemap[self.type](value)
def __repr__(self):
return '<Option: type->%s default->%r>' % (self.type, self.default)
NAMESPACES = {
'BROKER': {
'URL': Option(None, type='string'),
'CONNECTION_TIMEOUT': Option(4, type='float'),
'CONNECTION_RETRY': Option(True, type='bool'),
'CONNECTION_MAX_RETRIES': Option(100, type='int'),
'HEARTBEAT': Option(10, type='int'),
'POOL_LIMIT': Option(10, type='int'),
'INSIST': Option(False, type='bool',
deprecate_by='2.4', remove_by='4.0'),
'USE_SSL': Option(False, type='bool'),
'TRANSPORT': Option(type='string'),
'TRANSPORT_OPTIONS': Option({}, type='dict'),
'HOST': Option(type='string', **_BROKER_OLD),
'PORT': Option(type='int', **_BROKER_OLD),
'USER': Option(type='string', **_BROKER_OLD),
'PASSWORD': Option(type='string', **_BROKER_OLD),
'VHOST': Option(type='string', **_BROKER_OLD),
},
'CASSANDRA': {
'COLUMN_FAMILY': Option(type='string'),
'DETAILED_MODE': Option(False, type='bool'),
'KEYSPACE': Option(type='string'),
'READ_CONSISTENCY': Option(type='string'),
'SERVERS': Option(type='list'),
'WRITE_CONSISTENCY': Option(type='string'),
},
'CELERY': {
'ACKS_LATE': Option(False, type='bool'),
'ALWAYS_EAGER': Option(False, type='bool'),
'AMQP_TASK_RESULT_EXPIRES': Option(
type='float', deprecate_by='2.5', remove_by='4.0',
alt='CELERY_TASK_RESULT_EXPIRES'
),
'AMQP_TASK_RESULT_CONNECTION_MAX': Option(
1, type='int', remove_by='2.5', alt='BROKER_POOL_LIMIT',
),
'ANNOTATIONS': Option(type='any'),
'BROADCAST_QUEUE': Option('celeryctl'),
'BROADCAST_EXCHANGE': Option('celeryctl'),
'BROADCAST_EXCHANGE_TYPE': Option('fanout'),
'CACHE_BACKEND': Option(),
'CACHE_BACKEND_OPTIONS': Option({}, type='dict'),
'CREATE_MISSING_QUEUES': Option(True, type='bool'),
'DEFAULT_RATE_LIMIT': Option(type='string'),
'DISABLE_RATE_LIMITS': Option(False, type='bool'),
'DEFAULT_ROUTING_KEY': Option('celery'),
'DEFAULT_QUEUE': Option('celery'),
'DEFAULT_EXCHANGE': Option('celery'),
'DEFAULT_EXCHANGE_TYPE': Option('direct'),
'DEFAULT_DELIVERY_MODE': Option(2, type='string'),
'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'),
'ENABLE_UTC': Option(True, type='bool'),
'EVENT_SERIALIZER': Option('json'),
'IMPORTS': Option((), type='tuple'),
'INCLUDE': Option((), type='tuple'),
'IGNORE_RESULT': Option(False, type='bool'),
'MAX_CACHED_RESULTS': Option(5000, type='int'),
'MESSAGE_COMPRESSION': Option(type='string'),
'MONGODB_BACKEND_SETTINGS': Option(type='dict'),
'REDIS_HOST': Option(type='string', **_REDIS_OLD),
'REDIS_PORT': Option(type='int', **_REDIS_OLD),
'REDIS_DB': Option(type='int', **_REDIS_OLD),
'REDIS_PASSWORD': Option(type='string', **_REDIS_OLD),
'REDIS_MAX_CONNECTIONS': Option(type='int'),
'RESULT_BACKEND': Option(type='string'),
'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'),
'RESULT_DBURI': Option(),
'RESULT_ENGINE_OPTIONS': Option(type='dict'),
'RESULT_EXCHANGE': Option('celeryresults'),
'RESULT_EXCHANGE_TYPE': Option('direct'),
'RESULT_SERIALIZER': Option('pickle'),
'RESULT_PERSISTENT': Option(False, type='bool'),
'ROUTES': Option(type='any'),
'SEND_EVENTS': Option(False, type='bool'),
'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
'TASK_ERROR_WHITELIST': Option(
(), type='tuple', deprecate_by='2.5', remove_by='4.0',
),
'TASK_PUBLISH_RETRY': Option(True, type='bool'),
'TASK_PUBLISH_RETRY_POLICY': Option({
'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}, type='dict'),
'TASK_RESULT_EXPIRES': Option(timedelta(days=1), type='float'),
'TASK_SERIALIZER': Option('pickle'),
'TIMEZONE': Option(type='string'),
'TRACK_STARTED': Option(False, type='bool'),
'REDIRECT_STDOUTS': Option(True, type='bool'),
'REDIRECT_STDOUTS_LEVEL': Option('WARNING'),
'QUEUES': Option(type='dict'),
'QUEUE_HA_POLICY': Option(None, type='string'),
'SECURITY_KEY': Option(type='string'),
'SECURITY_CERTIFICATE': Option(type='string'),
'SECURITY_CERT_STORE': Option(type='string'),
'WORKER_DIRECT': Option(False, type='bool'),
},
'CELERYD': {
'AUTOSCALER': Option('celery.worker.autoscale.Autoscaler'),
'AUTORELOADER': Option('celery.worker.autoreload.Autoreloader'),
'BOOT_STEPS': Option((), type='tuple'),
'CONCURRENCY': Option(0, type='int'),
'TIMER': Option(type='string'),
'TIMER_PRECISION': Option(1.0, type='float'),
'FORCE_EXECV': Option(True, type='bool'),
'HIJACK_ROOT_LOGGER': Option(True, type='bool'),
'CONSUMER': Option(type='string'),
'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT),
'LOG_COLOR': Option(type='bool'),
'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
'MEDIATOR': Option('celery.worker.mediator.Mediator'),
'MAX_TASKS_PER_CHILD': Option(type='int'),
'POOL': Option(DEFAULT_POOL),
'POOL_PUTLOCKS': Option(True, type='bool'),
'POOL_RESTARTS': Option(False, type='bool'),
'PREFETCH_MULTIPLIER': Option(4, type='int'),
'STATE_DB': Option(),
'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT),
'TASK_SOFT_TIME_LIMIT': Option(type='float'),
'TASK_TIME_LIMIT': Option(type='float'),
'WORKER_LOST_WAIT': Option(10.0, type='float')
},
'CELERYBEAT': {
'SCHEDULE': Option({}, type='dict'),
'SCHEDULER': Option('celery.beat.PersistentScheduler'),
'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
'MAX_LOOP_INTERVAL': Option(0, type='float'),
'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
},
'CELERYMON': {
'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
'LOG_FORMAT': Option(DEFAULT_LOG_FMT),
},
'EMAIL': {
'HOST': Option('localhost'),
'PORT': Option(25, type='int'),
'HOST_USER': Option(),
'HOST_PASSWORD': Option(),
'TIMEOUT': Option(2, type='float'),
'USE_SSL': Option(False, type='bool'),
'USE_TLS': Option(False, type='bool'),
},
'SERVER_EMAIL': Option('celery@localhost'),
'ADMINS': Option((), type='tuple'),
}
def flatten(d, ns=''):
stack = deque([(ns, d)])
while stack:
name, space = stack.popleft()
for key, value in space.iteritems():
if isinstance(value, dict):
stack.append((name + key + '_', value))
else:
yield name + key, value
DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES))
def find_deprecated_settings(source):
from celery.utils import warn_deprecated
for name, opt in flatten(NAMESPACES):
if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
warn_deprecated(description='The %r setting' % (name, ),
deprecation=opt.deprecate_by,
removal=opt.remove_by,
alternative=opt.alt)
return source
@memoize(maxsize=None)
def find(name, namespace='celery'):
# - Try specified namespace first.
namespace = namespace.upper()
try:
return namespace, name.upper(), NAMESPACES[namespace][name.upper()]
except KeyError:
# - Try all the other namespaces.
for ns, keys in NAMESPACES.iteritems():
if ns.upper() == name.upper():
return None, ns, keys
elif isinstance(keys, dict):
try:
return ns, name.upper(), keys[name.upper()]
except KeyError:
pass
# - See if name is a qualname last.
return None, name.upper(), DEFAULTS[name.upper()]
|
|
"""
Second-Hand-Shop Project
@author: Malte Gerth
@copyright: Copyright (C) 2015 Malte Gerth
@license: MIT
@maintainer: Malte Gerth
@email: [email protected]
"""
import logging
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from rest_framework.decorators import api_view
from rest_framework.generics import RetrieveAPIView
from rest_framework.response import Response
from events.models import Event
from pos.models import Cart
from sale_lists.models import Article, SaleList
from .serializers import (
ArticleSerializer,
CartSerializer,
EventSerializer,
SaleListSerializer,
UserSerializer,
)
__author__ = "Malte Gerth <[email protected]>"
__copyright__ = "Copyright (C) 2015 Malte Gerth"
__license__ = "MIT"
logger = logging.getLogger(__name__)
class UserRetrieveAPIView(RetrieveAPIView):
pagination_class = None
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
@api_view(["POST"])
def find_sale_list(request):
try:
sale_list = (
SaleList.objects.select_related("event", "owner")
.prefetch_related("items", "items__event")
.get_by_barcode(
request.data["barcode"], event=Event.objects.get(is_active=True)
)
)
sale_list_serializer = SaleListSerializer(
sale_list, context={"request": request}
)
return Response({"sale_list": sale_list_serializer.data})
except SaleList.DoesNotExist:
return Response(
{
"error": _('Sale list with barcode "%s" not found')
% request.data["barcode"]
}
)
@api_view(["POST"])
def deliver_sale_list(request, pk):
sale_list = (
SaleList.objects.select_related("event", "owner")
.prefetch_related("items", "items__event", "items__cart")
.get(pk=pk)
)
sale_list.is_delivered_to_owner = bool(
request.data.get("is_delivered_to_owner", False)
)
sale_list.is_paid = request.data.get("is_paid", False)
sale_list.save()
sale_list_serializer = SaleListSerializer(sale_list, context={"request": request})
return Response({"sale_list": sale_list_serializer.data})
@api_view(["POST"])
def receive_articles(request, pk):
sale_list = (
SaleList.objects.select_related("event", "owner")
.prefetch_related("items", "items__event", "items__cart")
.get(pk=pk)
)
sale_list.is_in_stock = request.data.get("is_in_stock", False)
sale_list.is_for_collection = request.data.get("is_for_collection", False)
sale_list.save()
sale_list_serializer = SaleListSerializer(sale_list, context={"request": request})
return Response({"sale_list": sale_list_serializer.data})
@api_view(["POST"])
def return_articles(request, pk):
sale_list = (
SaleList.objects.select_related("event", "owner")
.prefetch_related("items", "items__event", "items__cart")
.get(pk=pk)
)
sale_list.is_unsold_returned = request.data.get("is_unsold_returned", False)
sale_list.save()
sale_list_serializer = SaleListSerializer(sale_list, context={"request": request})
return Response({"sale_list": sale_list_serializer.data})
@api_view(["POST"])
def payoff_sale_list(request, pk):
sale_list = (
SaleList.objects.select_related("event", "owner")
.prefetch_related("items", "items__event", "items__cart")
.get(pk=pk)
)
sale_list.is_paid_off = request.data.get("is_paid_off", False)
sale_list.save()
sale_list_serializer = SaleListSerializer(sale_list, context={"request": request})
return Response({"sale_list": sale_list_serializer.data})
class SaleListRetrieveAPIView(RetrieveAPIView):
queryset = (
SaleList.objects.all()
.select_related("event", "owner")
.prefetch_related("items", "items__event", "items__cart")
)
serializer_class = SaleListSerializer
@api_view(["POST"])
def create_cart(request):
cart = Cart.objects.create()
cart_serializer = CartSerializer(cart, context={"request": request})
return Response({"cart": cart_serializer.data})
@api_view(["POST"])
def mark_cart_as_paid(request, cart_id):
cart = Cart.objects.select_related("event").get(pk=cart_id)
cart.is_paid = request.data.get("is_paid", False)
cart.save()
cart_serializer = CartSerializer(cart, context={"request": request})
return Response({"cart": cart_serializer.data})
class CartRetrieveAPIView(RetrieveAPIView):
queryset = Cart.objects.all().select_related("event")
serializer_class = CartSerializer
@api_view(["POST"])
def add_article_to_cart(request, cart_id):
try:
article = Article.objects.select_related(
"event", "cart", "cart__event", "sale_list", "sale_list__event"
).get_by_barcode(
request.data["article_barcode"], event=Event.objects.get(is_active=True)
)
if article.is_sold:
return Response(
{
"error": _("Article %(article_id)s is already sold to %(cart_id)s!")
% {"article_id": article.pk, "cart_id": article.cart_id},
"error_code": "ARTICLE_ALREADY_SOLD",
}
)
article.cart_id = cart_id
article.save()
article_serializer = ArticleSerializer(article, context={"request": request})
cart = Cart.objects.select_related("event").get(pk=cart_id)
cart_serializer = CartSerializer(cart, context={"request": request})
return Response(
{"article": article_serializer.data, "cart": cart_serializer.data}
)
except Article.DoesNotExist:
return Response(
{"error": _("Article not found"), "error_code": "ARTICLE_NOT_FOUND"}
)
@api_view(["POST"])
def remove_article_from_cart(request, cart_id):
try:
article = Article.objects.select_related(
"event", "cart", "cart__event", "sale_list", "sale_list__event"
).get(pk=request.data["id"], event=Event.objects.get(is_active=True))
if str(article.cart_id) != str(cart_id):
return Response(
{
"error": _("Article %s is not in cart!") % article.pk,
"error_code": "ARTICLE_NOT_IN_CART",
}
)
article.cart = None
article.save()
article_serializer = ArticleSerializer(article, context={"request": request})
cart = Cart.objects.select_related("event").get(pk=cart_id)
cart_serializer = CartSerializer(cart, context={"request": request})
return Response(
{"article": article_serializer.data, "cart": cart_serializer.data}
)
except Article.DoesNotExist:
return Response(
{"error": _("Article not found"), "error_code": "ARTICLE_NOT_FOUND"}
)
class ArticleRetrieveAPIView(RetrieveAPIView):
queryset = Article.objects.all().select_related("event", "sale_list", "cart")
serializer_class = ArticleSerializer
@api_view(["POST"])
def find_article(request):
try:
article = Article.objects.select_related(
"event", "cart", "cart__event", "sale_list", "sale_list__event"
).get_by_barcode(
request.data["barcode"], event=Event.objects.get(is_active=True)
)
article_serializer = ArticleSerializer(article, context={"request": request})
return Response({"article": article_serializer.data})
except Article.DoesNotExist:
return Response(
{
"error": _('Der Artikel "%s" konnte leider nicht gefunden werden.')
% request.data["barcode"]
}
)
@api_view(["POST"])
def mark_stored_external(request):
try:
article = Article.objects.get_by_barcode(
request.data["barcode"], event=Event.objects.get(is_active=True)
)
article.is_stored_external = True
article.save()
return Response({"status": "OK"})
except Article.DoesNotExist:
return Response(
{
"error": _('Der Artikel "%s" konnte leider nicht gefunden werden.')
% request.data["barcode"]
}
)
class EventRetrieveAPIView(RetrieveAPIView):
queryset = Event.objects.all()
serializer_class = EventSerializer
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, First Party Software
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import json
import datetime
from datetime import timedelta
from django.utils import timezone
from django import forms
from django.db import connection
from django.shortcuts import render
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_page
from servo.stats.forms import *
from servo.stats.queries import *
from servo.models import User, Order
class ServoTimeDelta:
def __init__(self, td):
self.td = td
def days(self):
return self.td.days
def workdays(self):
pass
def hours(self):
return self.td.seconds//3600
def nonzero(self):
return self.hours() > 0
def prep_view(request):
"""
Prepares the stats view
"""
title = _('Statistics')
profile = request.user
location = request.user.location
initial = {
'location' : location.pk,
'end_date' : str(default_end_date),
'start_date': str(default_start_date),
'timescale' : default_timescale,
}
group = request.user.get_group()
if group:
initial['group'] = group.pk
# Remember the previous stats filter
if request.session.get('stats_filter'):
initial.update(request.session['stats_filter'])
request.session['stats_filter'] = initial
return locals()
def index(request):
"""
/stats/
"""
data = prep_view(request)
form = TechieStatsForm(initial=data['initial'])
if request.method == 'POST':
form = TechieStatsForm(request.POST, initial=data['initial'])
if form.is_valid():
request.session['stats_filter'] = form.serialize()
data['form'] = form
return render(request, "stats/index.html", data)
#@cache_page(15*60)
def data(request, query):
result = []
stats = StatsManager()
cursor = connection.cursor()
report, what = query.split('/')
locations = request.user.locations
params = request.session['stats_filter']
timescale = params.get('timescale', default_timescale)
location = params.get('location', request.user.location)
if params.get('location'):
location = Location.objects.get(pk=params['location'])
else:
location = request.user.location
try:
location_id = location.pk
except AttributeError:
location_id = 0
start_date = params.get('start_date', default_start_date)
end_date = params.get('end_date', default_end_date)
queues = request.user.queues.all()
try:
users = params.get('group').user_set
except AttributeError:
users = User.objects.filter(location=location)
if report == "sales":
if what == "invoices":
for i in queues:
data = stats.sales_invoices(timescale, i.pk, start_date, end_date)
result.append({'label': i.title, 'data': data})
if what == "purchases":
for i in queues:
data = stats.sales_purchases(timescale, i.pk, start_date, end_date)
result.append({'label': i.title, 'data': data})
if what == "parts":
i = 0
data = []
labels = []
results = stats.sales_parts_per_labtier(start_date, end_date)
for r in results:
data.append([i, r[1]])
labels.append([i, r[0]])
i += 1
result.append({'label': labels, 'data': data})
if what == "personal":
location_id = request.user.get_location().id
users = User.objects.filter(pk=request.user.pk)
for i in users.filter(is_active=True):
data = stats.order_runrate(timescale, location_id, i.pk, start_date, end_date)
result.append({'label': i.get_full_name(), 'data': data})
if what == "runrate":
for i in users.filter(is_active=True):
data = stats.order_runrate(timescale, location_id, i.pk, start_date, end_date)
result.append({'label': i.get_full_name(), 'data': data})
if report == "created":
if what == "user":
for i in location.user_set.all():
data = stats.orders_created_by(timescale,
location_id,
i.pk,
start_date,
end_date)
result.append({'label': i.get_full_name(), 'data': data})
if what == "location":
for i in locations.all():
data = stats.orders_created_at(timescale, i.pk, start_date, end_date)
result.append({'label': i.title, 'data': data})
if report == "closed":
if what == "location":
for i in locations.all():
data = stats.orders_closed_at(timescale, i.pk, start_date, end_date)
result.append({'label': i.title, 'data': data})
if what == "queue":
for i in queues:
data = stats.orders_closed_in(
timescale,
location.pk,
i.pk,
start_date,
end_date)
result.append({'label': i.title, 'data': data})
if what == "count":
for i in queues:
data = stats.order_count(timescale, location_id, i.pk, start_date, end_date)
result.append({'label': i.title, 'data': data})
if report == "status":
status = params.get('status')
if what == "location":
for i in locations.all():
data = stats.statuses_per_location(
timescale,
i.pk,
status,
start_date,
end_date)
result.append({'label': i.title, 'data': data})
if what == "tech":
for i in User.objects.filter(location=location, is_active=True):
data = stats.statuses_per_user(
timescale,
i.pk,
status,
start_date,
end_date)
result.append({'label': i.get_name(), 'data': data})
if report == "turnaround":
if what == "location":
for i in locations.all():
data = stats.turnaround_per_location(
timescale,
i.pk,
start_date,
end_date)
result.append({'label': i.title, 'data': data})
if report == "runrate":
if what == "location":
for i in locations.all():
data = stats.runrate_per_location(
timescale,
i.pk,
start_date,
end_date)
result.append({'label': i.title, 'data': data})
if report == "distribution":
if what == "location":
result = stats.distribution_per_location(start_date, end_date)
if what == "turnaround":
for i in queues:
data = stats.order_turnaround(
timescale,
location_id,
i.pk,
start_date,
end_date
)
result.append({'label': i.title, 'data': data})
if what == "queues":
cursor.execute("""SELECT q.title, COUNT(*)
FROM servo_order o LEFT OUTER JOIN servo_queue q on (o.queue_id = q.id)
WHERE (o.created_at, o.created_at) OVERLAPS (%s, %s)
GROUP BY q.title""", [start_date, end_date])
for k, v in cursor.fetchall():
k = k or _('No Queue')
result.append({'label': k, 'data': v})
if what == "techs":
for i in users.filter(is_active=True):
cursor.execute("""SELECT COUNT(*) as p
FROM servo_order o
WHERE user_id = %s
AND location_id = %s
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY user_id""", [i.pk, location_id, start_date, end_date])
for v in cursor.fetchall():
result.append({'label': i.username, 'data': v})
return HttpResponse(json.dumps(result))
def sales(request):
data = prep_view(request)
form = InvoiceStatsForm(initial=data['initial'])
if request.method == 'POST':
form = InvoiceStatsForm(request.POST, initial=data['initial'])
if form.is_valid():
request.session['stats_filter'] = form.serialize()
data['form'] = form
return render(request, "stats/sales.html", data)
def queues(request):
data = prep_view(request)
form = OrderStatsForm(initial=data['initial'])
if request.method == 'POST':
form = OrderStatsForm(request.POST, initial=data['initial'])
if form.is_valid():
request.session['stats_filter'] = form.serialize()
data['form'] = form
return render(request, "stats/queues.html", data)
def locations(request):
data = prep_view(request)
form = BasicStatsForm(initial=data['initial'])
if request.method == 'POST':
form = BasicStatsForm(request.POST, initial=data['initial'])
if form.is_valid():
request.session['stats_filter'] = form.serialize()
data['form'] = form
return render(request, "stats/locations.html", data)
def statuses(request):
data = prep_view(request)
form = StatusStatsForm(initial=data['initial'])
if request.method == 'POST':
form = StatusStatsForm(request.POST, initial=data['initial'])
if form.is_valid():
# Store the name of the status since we don't have
# IDs in events, yet
status = form.cleaned_data['status'].title
f = form.serialize()
f['status'] = status
request.session['stats_filter'] = f
data['form'] = form
return render(request, "stats/statuses.html", data)
def repairs(request):
title = _('Repair statistics')
form = NewStatsForm(initial={
'location': [request.user.location],
'queue': request.user.queues.all()
})
if request.GET.get('location'):
results = []
form = NewStatsForm(request.GET)
totals = {
'created' : 0,
'assigned' : 0,
'repairs' : 0,
'dispatched' : 0,
'tmp_orders' : [],
'turnaround' : timedelta(),
}
if not form.is_valid():
return render(request, "stats/newstats.html", locals())
cdata = form.cleaned_data
date_range = (cdata['start_date'], cdata['end_date'])
for u in User.active.filter(location=cdata['location']):
r = {'name': u.get_full_name()}
# Look at invoices first because that data may be different from
# assignment info (tech A startx, tech B finishes)
dispatched = u.invoice_set.filter(
order__queue=cdata['queue'],
order__location=cdata['location'],
created_at__range=date_range
)
if len(cdata.get('label')):
dispatched = dispatched.filter(order__tags=cdata['label'])
# Count each case's dispatch only once
r['dispatched'] = dispatched.values('order_id').distinct().count()
created = u.created_orders.filter(
queue=cdata['queue'],
location=cdata['location'],
created_at__range=date_range
)
if len(cdata.get('label')):
created = created.filter(tags=cdata['label'])
r['created'] = created.count()
totals['created'] += r['created'] # add amount to totals
assigned = u.order_set.filter(
queue=cdata['queue'],
location=cdata['location'],
started_at__range=date_range
)
if len(cdata.get('label')):
assigned = assigned.filter(tags=cdata['label'])
r['assigned'] = assigned.count()
if (r['assigned'] < 1) and (r['dispatched'] < 1):
continue # ... only continue with actual techs
repairs = u.created_repairs.filter(
order__queue=cdata['queue'],
order__location=cdata['location'],
submitted_at__range=date_range
)
if len(cdata.get('label')):
repairs = repairs.filter(order__tags=cdata['label'])
# Only count each case's GSX repair once
r['repairs'] = repairs.values('order_id').distinct().count()
totals['repairs'] += r['repairs']
totals['assigned'] += r['assigned']
totals['dispatched'] += r['dispatched']
results.append(r)
turnaround = timedelta()
# calculate turnaround time of dispatched cases
for o in dispatched:
totals['tmp_orders'].append(o.order)
for s in o.order.orderstatus_set.filter(status=cdata['status']):
if s.finished_at is None:
s.finished_at = s.order.closed_at or timezone.now()
totals['turnaround'] += (s.finished_at - s.started_at)
totals['diff'] = totals['dispatched'] - totals['assigned']
if totals['dispatched'] > 0:
totals['turnaround'] = ServoTimeDelta(totals['turnaround']/totals['dispatched'])
return render(request, "stats/newstats.html", locals())
|
|
#!/usr/bin/env python
"""Tests for administrative flows."""
import subprocess
import sys
import time
import psutil
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import email_alerts
from grr.lib import flow
from grr.lib import maintenance_utils
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.proto import flows_pb2
class TestClientConfigHandling(test_lib.FlowTestsBaseclass):
"""Test the GetConfig flow."""
def testUpdateConfig(self):
"""Ensure we can retrieve the config."""
pass
# # Only mock the pieces we care about.
# client_mock = test_lib.ActionMock("GetConfig", "UpdateConfig")
# # Fix up the client actions to not use /etc.
# conf.FLAGS.config = FLAGS.test_tmpdir + "/config.ini"
# loc = "http://www.example.com"
# grr_config = rdfvalue.GRRConfig(location=loc,
# foreman_check_frequency=3600,
# poll_min=1)
# # Write the config.
# for _ in test_lib.TestFlowHelper("UpdateConfig", client_mock,
# client_id=self.client_id,
# token=self.token,
# grr_config=grr_config):
# pass
# # Now retrieve it again to see if it got written.
# for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
# token=self.token,
# client_id=self.client_id):
# pass
# urn = aff4.ROOT_URN.Add(self.client_id)
# fd = aff4.FACTORY.Open(urn, token=self.token)
# config_dat = fd.Get(fd.Schema.GRR_CONFIG)
# self.assertEqual(config_dat.data.location, loc)
# self.assertEqual(config_dat.data.poll_min, 1)
class ClientActionRunnerArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.ClientActionRunnerArgs
class ClientActionRunner(flow.GRRFlow):
"""Just call the specified client action directly.
"""
args_type = ClientActionRunnerArgs
action_args = {}
@flow.StateHandler(next_state="End")
def Start(self):
self.CallClient(self.args.action, next_state="End", **self.action_args)
class TestAdministrativeFlows(test_lib.FlowTestsBaseclass):
def testClientKilled(self):
"""Test that client killed messages are handled correctly."""
self.email_message = {}
def SendEmail(address, sender, title, message, **_):
self.email_message.update(dict(address=address, sender=sender,
title=title, message=message))
with test_lib.Stubber(email_alerts, "SendEmail", SendEmail):
client = test_lib.CrashClientMock(self.client_id, self.token)
for _ in test_lib.TestFlowHelper(
"ListDirectory", client, client_id=self.client_id,
pathspec=rdfvalue.PathSpec(path="/"), token=self.token,
check_flow_errors=False):
pass
# We expect the email to be sent.
self.assertEqual(self.email_message.get("address", ""),
config_lib.CONFIG["Monitoring.alert_email"])
self.assertTrue(str(self.client_id) in self.email_message["title"])
# Make sure the flow state is included in the email message.
for s in ["Flow name", "ListDirectory", "current_state"]:
self.assertTrue(s in self.email_message["message"])
flow_obj = aff4.FACTORY.Open(client.flow_id, age=aff4.ALL_TIMES,
token=self.token)
self.assertEqual(flow_obj.state.context.state, rdfvalue.Flow.State.ERROR)
# Make sure crashes RDFValueCollections are created and written
# into proper locations. First check the per-client crashes collection.
client_crashes = sorted(
list(aff4.FACTORY.Open(self.client_id.Add("crashes"),
aff4_type="RDFValueCollection",
token=self.token)),
key=lambda x: x.timestamp)
self.assertTrue(len(client_crashes) >= 1)
crash = list(client_crashes)[0]
self.assertEqual(crash.client_id, self.client_id)
self.assertEqual(crash.session_id, flow_obj.session_id)
self.assertEqual(crash.client_info.client_name, "GRR Monitor")
self.assertEqual(crash.crash_type, "aff4:/flows/W:CrashHandler")
self.assertEqual(crash.crash_message, "Client killed during transaction")
# Check per-flow crash collection. Check that crash written there is
# equal to per-client crash.
flow_crashes = sorted(
list(flow_obj.GetValuesForAttribute(flow_obj.Schema.CLIENT_CRASH)),
key=lambda x: x.timestamp)
self.assertEqual(len(flow_crashes), len(client_crashes))
for a, b in zip(flow_crashes, client_crashes):
self.assertEqual(a, b)
# Check global crash collection. Check that crash written there is
# equal to per-client crash.
global_crashes = sorted(
aff4.FACTORY.Open(aff4.ROOT_URN.Add("crashes"),
aff4_type="RDFValueCollection",
token=self.token),
key=lambda x: x.timestamp)
self.assertEqual(len(global_crashes), len(client_crashes))
for a, b in zip(global_crashes, client_crashes):
self.assertEqual(a, b)
def testNannyMessage(self):
nanny_message = "Oh no!"
try:
old_send_email = email_alerts.SendEmail
self.email_message = {}
def SendEmail(address, sender, title, message, **_):
self.email_message.update(dict(address=address, sender=sender,
title=title, message=message))
email_alerts.SendEmail = SendEmail
msg = rdfvalue.GrrMessage(
session_id=rdfvalue.SessionID("aff4:/flows/W:NannyMessage"),
args=rdfvalue.DataBlob(string=nanny_message).SerializeToString(),
source=self.client_id,
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED)
# This is normally done by the FrontEnd when a CLIENT_KILLED message is
# received.
flow.Events.PublishEvent("NannyMessage", msg, token=self.token)
# Now emulate a worker to process the event.
worker = test_lib.MockWorker(token=self.token)
while worker.Next():
pass
worker.pool.Join()
# We expect the email to be sent.
self.assertEqual(self.email_message.get("address"),
config_lib.CONFIG["Monitoring.alert_email"])
self.assertTrue(str(self.client_id) in self.email_message["title"])
# Make sure the message is included in the email message.
self.assertTrue(nanny_message in self.email_message["message"])
# Make sure crashes RDFValueCollections are created and written
# into proper locations. First check the per-client crashes collection.
client_crashes = aff4.FACTORY.Open(self.client_id.Add("crashes"),
aff4_type="RDFValueCollection",
token=self.token)
self.assertEqual(len(client_crashes), 1)
crash = list(client_crashes)[0]
self.assertEqual(crash.client_id, self.client_id)
self.assertEqual(crash.client_info.client_name, "GRR Monitor")
self.assertEqual(crash.crash_type, "aff4:/flows/W:NannyMessage")
self.assertEqual(crash.crash_message, nanny_message)
# Check global crash collection. Check that crash written there is
# equal to per-client crash.
global_crashes = aff4.FACTORY.Open(aff4.ROOT_URN.Add("crashes"),
aff4_type="RDFValueCollection",
token=self.token)
self.assertEqual(len(global_crashes), 1)
self.assertEqual(list(global_crashes)[0], crash)
finally:
email_alerts.SendEmail = old_send_email
def testStartupHandler(self):
# Clean the client records.
aff4.FACTORY.Delete(self.client_id, token=self.token)
client_mock = test_lib.ActionMock("SendStartupInfo")
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
# Check the client's boot time and info.
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
client_info = fd.Get(fd.Schema.CLIENT_INFO)
boot_time = fd.Get(fd.Schema.LAST_BOOT_TIME)
self.assertEqual(client_info.client_name,
config_lib.CONFIG["Client.name"])
self.assertEqual(client_info.client_description,
config_lib.CONFIG["Client.description"])
# Check that the boot time is accurate.
self.assertAlmostEqual(psutil.BOOT_TIME, boot_time.AsSecondsFromEpoch())
# Run it again - this should not update any record.
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertEqual(boot_time.age, fd.Get(fd.Schema.LAST_BOOT_TIME).age)
self.assertEqual(client_info.age, fd.Get(fd.Schema.CLIENT_INFO).age)
# Simulate a reboot in 10 minutes.
psutil.BOOT_TIME += 600
# Run it again - this should now update the boot time.
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
# Ensure only this attribute is updated.
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertNotEqual(int(boot_time.age),
int(fd.Get(fd.Schema.LAST_BOOT_TIME).age))
self.assertEqual(int(client_info.age),
int(fd.Get(fd.Schema.CLIENT_INFO).age))
# Now set a new client build time.
config_lib.CONFIG.Set("Client.build_time", time.ctime())
# Run it again - this should now update the client info.
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
# Ensure the client info attribute is updated.
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertNotEqual(int(client_info.age),
int(fd.Get(fd.Schema.CLIENT_INFO).age))
def testExecutePythonHack(self):
client_mock = test_lib.ActionMock("ExecutePython")
# This is the code we test. If this runs on the client mock we can check for
# this attribute.
sys.test_code_ran_here = False
code = """
import sys
sys.test_code_ran_here = True
"""
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path="aff4:/config/python_hacks/test", token=self.token)
for _ in test_lib.TestFlowHelper(
"ExecutePythonHack", client_mock, client_id=self.client_id,
hack_name="test", token=self.token):
pass
self.assertTrue(sys.test_code_ran_here)
def testExecutePythonHackWithArgs(self):
client_mock = test_lib.ActionMock("ExecutePython")
sys.test_code_ran_here = 1234
code = """
import sys
sys.test_code_ran_here = py_args['value']
"""
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path="aff4:/config/python_hacks/test", token=self.token)
for _ in test_lib.TestFlowHelper(
"ExecutePythonHack", client_mock, client_id=self.client_id,
hack_name="test", py_args=dict(value=5678), token=self.token):
pass
self.assertEqual(sys.test_code_ran_here, 5678)
def testExecuteBinariesWithArgs(self):
client_mock = test_lib.ActionMock("ExecuteBinaryCommand")
code = "I am a binary file"
upload_path = config_lib.CONFIG["Executables.aff4_path"].Add("test.exe")
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path=upload_path, token=self.token)
class Popen(object):
"""A mock object for subprocess.Popen."""
def __init__(self, run, stdout, stderr, stdin):
Popen.running_args = run
Popen.stdout = stdout
Popen.stderr = stderr
Popen.stdin = stdin
Popen.returncode = 0
# Store the content of the executable file.
Popen.binary = open(run[0]).read()
def communicate(self): # pylint: disable=g-bad-name
return "stdout here", "stderr here"
with test_lib.Stubber(subprocess, "Popen", Popen):
for _ in test_lib.TestFlowHelper(
"LaunchBinary", client_mock, client_id=self.client_id,
binary=upload_path, command_line="--value 356", token=self.token):
pass
# Check that the executable file contains the code string.
self.assertEqual(Popen.binary, code)
# At this point, the actual binary should have been cleaned up by the
# client action so it should not exist.
self.assertRaises(IOError, open, Popen.running_args[0])
# Check the binary was run with the correct command line.
self.assertEqual(Popen.running_args[1], "--value")
self.assertEqual(Popen.running_args[2], "356")
# Check the command was in the tmp file.
self.assertTrue(Popen.running_args[0].startswith(
config_lib.CONFIG["Client.tempdir"]))
def testGetClientStats(self):
class ClientMock(object):
def GetClientStats(self, _):
response = rdfvalue.ClientStats()
for i in range(12):
sample = rdfvalue.CpuSample(
timestamp=int(i * 10 * 1e6),
user_cpu_time=10 + i,
system_cpu_time=20 + i,
cpu_percent=10 + i)
response.cpu_samples.Append(sample)
sample = rdfvalue.IOSample(
timestamp=int(i * 10 * 1e6),
read_bytes=10 + i,
write_bytes=10 + i)
response.io_samples.Append(sample)
return [response]
for _ in test_lib.TestFlowHelper("GetClientStats", ClientMock(),
token=self.token,
client_id=self.client_id):
pass
urn = self.client_id.Add("stats")
stats_fd = aff4.FACTORY.Create(urn, "ClientStats", token=self.token,
mode="rw")
sample = stats_fd.Get(stats_fd.Schema.STATS)
# Samples are taken at the following timestamps and should be split into 2
# bins as follows (sample_interval is 60000000):
# 00000000, 10000000, 20000000, 30000000, 40000000, 50000000 -> Bin 1
# 60000000, 70000000, 80000000, 90000000, 100000000, 110000000 -> Bin 2
self.assertEqual(len(sample.cpu_samples), 2)
self.assertEqual(len(sample.io_samples), 2)
self.assertAlmostEqual(sample.io_samples[0].read_bytes, 15.0)
self.assertAlmostEqual(sample.io_samples[1].read_bytes, 21.0)
self.assertAlmostEqual(sample.cpu_samples[0].cpu_percent,
sum(range(10, 16))/6.0)
self.assertAlmostEqual(sample.cpu_samples[1].cpu_percent,
sum(range(16, 22))/6.0)
self.assertAlmostEqual(sample.cpu_samples[0].user_cpu_time, 15.0)
self.assertAlmostEqual(sample.cpu_samples[1].system_cpu_time, 31.0)
|
|
# File: CeFs.py ; This file is part of Twister.
# version: 3.026
# Copyright (C) 2012-2014, Luxoft
# Authors:
# Andrei Costachi <[email protected]>
# Cristi Constantin <[email protected]>
# Daniel Cioata <[email protected]>
# Mihai Tudoran <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Local file system; used to help workign with files where user is owner
"""
import os, sys
import time
import copy
import random
import socket
import subprocess
from plumbum import local
import rpyc
import pwd
import grp
import signal
socket.setdefaulttimeout(3)
TWISTER_PATH = os.getenv('TWISTER_PATH')
if not TWISTER_PATH:
print '$TWISTER_PATH environment variable is not set! Exiting!'
exit(1)
if TWISTER_PATH not in sys.path:
sys.path.append(TWISTER_PATH)
from common.helpers import FsBorg, userHome
from common.tsclogging import logError, logInfo, logWarning, logDebug
class BaseFS(object):
"""
Base file system class.
"""
name = ''
def _usr_service(self, *arg, **kargs):
"""
This method is overwritten by the child classes
"""
pass
def __del__(self):
"""
Kill all services for a user.
"""
logInfo('Killing all services for the current CE.')
for user in self._services:
proc = self._services[user]['proc']
read_conn = self._services[user]['conn_read']
write_conn = self._services[user]['conn_write']
try:
read_conn.close()
except Exception as err:
logError('Cannot close connection: `{}`, exception `{}`!'.format(read_conn, err))
try:
write_conn.close()
except Exception as err:
logError('Cannot close connection: `{}`, exception `{}`!'.format(write_conn, err))
try:
proc.terminate()
except Exception as err:
logError('Cannot stop service: `{}`, exception `{}`!'.format(proc, err))
try:
time.sleep(0.1)
os.killpg(proc.pid, signal.SIGTERM)
time.sleep(0.1)
proc.kill()
except:
pass
def _kill(self, user):
"""
Kill all services for a user.
"""
p_ps = local['ps']
grep = local['grep']
try:
pids = (p_ps['aux'] | grep['/server/UserService.py'] | grep['^' + user] | grep[self.name])()
except Exception:
return
# Kill all leftover processes
for line in pids.strip().splitlines():
std_li = line.strip().decode('utf').split()
p_pid = int(std_li[1])
del std_li[2:5]
if '/bin/sh' in std_li:
continue
if '/bin/grep' in std_li:
continue
logDebug('User {}: Killing ugly zombie `{}`.'.format(user, ' '.join(std_li)))
try:
os.kill(p_pid, 9)
except:
pass
# ----- USER ---------------------------------------------------------------
def is_folder(self, user, fpath):
"""
Returns True of False. Client access via RPyc.
"""
if not fpath:
return '*ERROR* Empty `fpath` parameter on is folder, user `{}`!'.format(user)
srvr = self._usr_service(user)
if srvr:
try:
return srvr.root.is_folder(fpath)
except Exception as exp_err:
err = '*ERROR* Cannot detect file/ folder `{}`, user `{}`! {}'.format(fpath, user, exp_err)
logWarning(err)
return err
else:
return '*ERROR* Cannot access the UserService on is folder, user `{}`!'.format(user)
def file_size(self, user, fpath):
"""
Get file size for 1 file. Client access via RPyc.
"""
if not fpath:
return False
srvr = self._usr_service(user)
if srvr:
try:
return srvr.root.file_size(fpath)
except Exception:
return -1
else:
return -1
def read_user_file(self, user, fpath, flag='r', fstart=0):
"""
Read 1 file. Client access via RPyc.
"""
if not fpath:
return '*ERROR* Empty `fpath` parameter on read file, user `{}`!'.format(user)
srvr = self._usr_service(user)
if srvr:
try:
return srvr.root.read_file(fpath, flag, fstart)
except Exception as exp_err:
err = '*ERROR* Cannot read file `{}`, user `{}`! {}'.format(fpath, user, exp_err)
logWarning(err)
return err
else:
return '*ERROR* Cannot access the UserService on read file, user `{}`!'.format(user)
def write_user_file(self, user, fpath, fdata, flag='w'):
"""
Write 1 file. Client access via RPyc.
"""
if not fpath:
return '*ERROR* Empty `fpath` parameter on write file, user `{}`!'.format(user)
srvr = self._usr_service(user, 'write')
if len(fdata) > 20*1000*1000:
err = '*ERROR* File data too long `{}`: {}; User {}.'.format(fpath, len(fdata), user)
logWarning(err)
return err
if srvr:
try:
return srvr.root.write_file(fpath, fdata, flag)
except Exception as exp_err:
err = '*ERROR* Cannot write into file `{}`, user `{}`! {}'.format(fpath, user, exp_err)
logWarning(err)
return err
else:
return '*ERROR* Cannot access the UserService on write file, user `{}`!'.format(user)
def copy_user_file(self, user, fpath, newpath):
"""
Copy 1 user file.
"""
if not fpath:
return '*ERROR* Empty `fpath` parameter on copy file, user `{}`!'.format(user)
srvr = self._usr_service(user, 'write')
if srvr:
return srvr.root.copy_file(fpath, newpath)
else:
return '*ERROR* Cannot access the UserService on copy file, user `{}`!'.format(user)
def move_user_file(self, user, fpath, newpath):
"""
Move/rename a user file.
"""
if not fpath:
return '*ERROR* Empty `fpath` parameter on move file, user `{}`!'.format(user)
srvr = self._usr_service(user, 'write')
if srvr:
return srvr.root.move_file(fpath, newpath)
else:
return '*ERROR* Cannot access the UserService on move file, user `{}`!'.format(user)
def delete_user_file(self, user, fpath):
"""
Delete user file.
"""
if not fpath:
return '*ERROR* Empty `fpath` parameter on delete file, user `{}`!'.format(user)
srvr = self._usr_service(user)
if srvr:
return srvr.root.delete_file(fpath)
else:
return '*ERROR* Cannot access the UserService on delete file, user `{}`!'.format(user)
def create_user_folder(self, user, fdir):
"""
Create a folder in user client directory.
"""
if not fdir:
return '*ERROR* Empty `fdir` parameter on create folder, user `{}`!'.format(user)
srvr = self._usr_service(user)
if srvr:
return srvr.root.create_folder(fdir)
else:
return '*ERROR* Cannot access the UserService on create folder, user `{}`!'.format(user)
def list_user_files(self, user, fdir, hidden=True, recursive=True, accept=[], reject=[]):
"""
List the files in user directory.
"""
if not fdir:
return '*ERROR* Empty `fdir` parameter on list files, user `{}`!'.format(user)
srvr = self._usr_service(user)
if srvr:
try:
files = srvr.root.list_files(fdir, hidden, recursive, accept, reject)
return copy.copy(files)
except Exception as exp_err:
err = '*ERROR* Cannot list files `{}`, user `{}`! {}'.format(fdir, user, exp_err)
logWarning(err)
return err
else:
return '*ERROR* Cannot access the UserService on list files, user `{}`!'.format(user)
def delete_user_folder(self, user, fdir):
"""
Delete a user folder.
"""
if not fdir:
return '*ERROR* Empty `fdir` parameter on delete folder, user `{}`!'.format(user)
srvr = self._usr_service(user)
if srvr:
return srvr.root.delete_folder(fdir)
else:
return '*ERROR* Cannot access the UserService on delete folder, user `{}`!'.format(user)
def targz_user_folder(self, user, fdir, root=''):
"""
Tar.gz a folder, or file.
"""
if not fdir:
return '*ERROR* Empty `fdir` parameter on tar.gz folder, user `{}`!'.format(user)
srvr = self._usr_service(user)
if srvr:
return srvr.root.targz_folder(fdir, root)
else:
return '*ERROR* Cannot access the UserService on tar.gz folder, user `{}`!'.format(user)
def detect_libraries(self, user, files):
"""
Autodetect libraries: parses all the tests and finds the import statements.
Returns a list of the modules not available by default in python path.
"""
srvr = self._usr_service(user)
if srvr:
libs = srvr.root.detect_libraries(files)
return copy.copy(libs)
else:
return '*ERROR* Cannot access the UserService on tar.gz folder, user `{}`!'.format(user)
# ----- SYSTEM -------------------------------------------------------------
@staticmethod
def sys_file_size(fpath):
"""
Get file size for 1 file. ROOT access.
"""
if not fpath:
return False
try:
fsize = os.stat(fpath).st_size
# logDebug('File `{}` is size `{}`.'.format(fpath, fsize))
return fsize
except Exception as exp_err:
err = '*ERROR* Cannot find file `{}`! {}'.format(fpath, exp_err)
logWarning(err)
return err
@staticmethod
def read_system_file(fpath, flag='r', fstart=0):
"""
Read 1 file. ROOT access.
"""
if not fpath:
return False
if flag not in ['r', 'rb']:
err = '*ERROR* Invalid flag `{}`! Cannot read!'.format(flag)
logWarning(err)
return err
if not os.path.isfile(fpath):
err = '*ERROR* No such file `{}`!'.format(fpath)
logWarning(err)
return err
try:
with open(fpath, flag) as file_p:
# logDebug('Reading file `{}`, flag `{}`.'.format(fpath, flag))
if fstart:
file_p.seek(fstart)
fdata = file_p.read()
if len(fdata) > 20*1000*1000:
err = '*ERROR* File data too long `{}`: {}!'.format(fpath, len(fdata))
logWarning(err)
return err
return fdata
except Exception as exp_err:
err = '*ERROR* Cannot read file `{}`! {}'.format(fpath, exp_err)
logWarning(err)
return err
@staticmethod
def write_system_file(fpath, fdata, flag='a'):
"""
Write data in a file. ROOT access.
Overwrite or append, ascii or binary.
"""
if not fpath:
return False
if flag not in ['w', 'wb', 'a', 'ab']:
err = '*ERROR* Invalid flag `{}`! Cannot read!'.format(flag)
logWarning(err)
return err
try:
with open(fpath, flag) as file_p:
file_p.write(fdata)
# if flag == 'w':
# logDebug('Written `{}` chars in ascii file `{}`.'.format(len(fdata), fpath))
# elif flag == 'wb':
# logDebug('Written `{}` chars in binary file `{}`.'.format(len(fdata), fpath))
# elif flag == 'a':
# logDebug('Appended `{}` chars in ascii file `{}`.'.format(len(fdata), fpath))
# else:
# logDebug('Appended `{}` chars in binary file `{}`.'.format(len(fdata), fpath))
return True
except Exception as exp_err:
err = '*ERROR* Cannot write into file `{}`! {}'.\
format(fpath, exp_err)
logWarning(err)
return err
def delete_system_file(self, fname):
""" Dummy method """
pass
def create_system_folder(self, fdir):
""" Dummy method """
pass
def list_system_files(self, folder, hidden=True, recursive=True, accept=[], reject=[]):
"""
List all files, recursively.
"""
if folder == '/':
base_path = '/'
logWarning('*WARN* Listing folders from system ROOT.')
recursive = False
else:
base_path = folder.rstrip('/')
if not os.path.isdir(folder):
err = '*ERROR* Invalid folder path `{}`!'.format(folder)
logWarning(err)
return err
def dirList(path):
"""
Create recursive list of folders and files from base path.
The format of a node is: {"path": "/..." "data": "name", "folder":true|false, "children": []}
"""
# The node is valid ?
if not path:
return False
# Cleanup '/'
if path != '/':
path = path.rstrip('/')
# This is folder ?
if os.path.isfile(path):
return False
len_path = len(base_path) + 1
dlist = [] # Folders list
flist = [] # Files list
try:
names = sorted(os.listdir(path), key=str.lower)
except Exception as exp_err:
logWarning('*WARN* Cannot list folder `{}`: `{}`!'.\
format(path, exp_err))
return []
# Cycle a folder
for fname in names:
long_path = path + '/' + fname
# If Accept is active and file doesn't match, ignore file
if accept and os.path.isfile(long_path):
valid = True
if isinstance(accept, list):
# If nothing from the Accept matches the file
if True not in [(long_path.startswith(f) or long_path.endswith(f)) for f in accept]:
valid = False
elif isinstance(accept, str):
if not (long_path.startswith(accept) or long_path.endswith(accept)):
valid = False
if not valid:
continue
# If Reject is active and file matches, ignore the file
if reject and os.path.isfile(long_path):
valid = True
if isinstance(reject, list):
# If nothing from the Reject matches the file
if True in [(long_path.startswith(f) or long_path.endswith(f)) for f in reject]:
valid = False
elif isinstance(reject, str):
if long_path.startswith(reject) or long_path.endswith(reject):
valid = False
if not valid:
continue
# Ignore hidden files
if hidden and fname[0] == '.':
continue
# Meta info
try:
fstat = os.stat(long_path)
try:
uname = pwd.getpwuid(fstat.st_uid).pw_name
except Exception:
uname = fstat.st_uid
try:
gname = grp.getgrgid(fstat.st_gid).gr_name
except Exception:
gname = fstat.st_gid
meta_info = '{}|{}|{}|{}'.\
format(uname, gname, fstat.st_size,\
time.strftime('%Y-%m-%d %H:%M:%S',\
time.localtime(fstat.st_mtime)))
except Exception:
meta_info = ''
# Semi long path
short_path = long_path[len_path:]
# Data to append
data = {'path': short_path, 'data': fname, 'meta': meta_info}
if os.path.isdir(long_path):
data['folder'] = True
# Recursive !
if recursive:
children = dirList(long_path)
else:
children = []
if children in [False, None]:
continue
data['children'] = children
dlist.append(data)
else:
flist.append(data)
# Folders first, files second
return dlist + flist
paths = {
'path' : '/',
'data' : base_path,
'folder' : True,
'children' : dirList(base_path) or []
}
clen = len(paths['children'])
logDebug('Listing dir `{}`, it has `{}` direct children.'.format(base_path, clen))
return paths
def delete_system_folder(self, fdir):
""" Dummy method """
pass
#
class LocalFS(BaseFS, FsBorg):
"""
All local file operations should be done via THIS class.
This is a singleton.
"""
def __init__(self):
FsBorg.__init__(self)
self.name = 'Local'
if os.getuid():
logError('{} FS: Central Engine must run as ROOT in order to start the User Service!'.format(self.name))
logInfo('Created {} FS.'.format(self.name))
def _usr_service(self, user, oper='read'):
"""
Launch a user service.
"""
if oper not in ['read', 'write']:
logWarning('Invalid FS operation `{}`, for user `{}`! Will reset to "read".'.format(oper, user))
oper = 'read'
# Must block here, so more users cannot launch Logs at the same time and lose the PID
with self._srv_lock:
# Try to re-use the logger server, if available
conn = self._services.get(user, {}).get('conn_' + oper, None)
if conn:
try:
conn.ping(data='Hello', timeout=30.0)
# logDebug('Reuse old {} User Service connection for `{}` OK.'.format(op, user))
return conn
except Exception as exp_err:
logWarning('Cannot reuse {} User Service for `{}`: `{}`.'.format(oper, user, exp_err))
self._kill(user)
else:
logInfo('Launching a User Service for `{}`, the first time...'.format(user))
port = None
# If the server is not available, search for a free port in the safe range...
while 1:
port = random.randrange(63000, 65000)
try:
socket.create_connection((None, port), 1)
except Exception:
break
p_cmd = 'su {} -c "{} -u {}/server/UserService.py {} {}"'.\
format(user, sys.executable, TWISTER_PATH, port, self.name)
proc = subprocess.Popen(p_cmd, cwd='{}/twister'.\
format(userHome(user)), shell=True, close_fds=True,\
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.poll()
time.sleep(2.0)
config = {
'allow_pickle': True,
'allow_getattr': True,
'allow_setattr': True,
'allow_delattr': True
}
retry = 10
delay = 0.5
success = False
while retry > 0:
if success:
break
try:
stream_r = rpyc.SocketStream.connect('127.0.0.1', port, timeout=5.0)
conn_read = rpyc.connect_stream(stream_r, config=config)
conn_read.root.hello()
logDebug('Connected to User Service for `{}`, operation `read`.'.format(user))
success = True
except Exception as exp_err:
logWarning('Cannot connect to User Service for `{}` - \
Exception: `{}`! Wait {}s...'.format(user, exp_err, delay))
if success:
try:
stream_w = rpyc.SocketStream.connect('127.0.0.1', port, timeout=5.0)
conn_write = rpyc.connect_stream(stream_w, config=config)
conn_write.root.hello()
logDebug('Connected to User Service for `{}`, operation `write`.'.format(user))
break
except Exception as exp_err:
logWarning('Cannot connect to User Service for `{}` \
- Exception: `{}`! Wait {}s...'.\
format(user, exp_err, delay))
success = False
time.sleep(delay)
retry -= 1
delay += 0.75
if not success:
logError('Error on starting User Service for `{}`!'.format(user))
return None
# Save the process inside the block. 99% of the time, this block is executed instantly!
self._services[user] = {'proc': proc, 'conn_read': conn_read, 'conn_write': conn_write, 'port': port}
logDebug('User Service for `{}` launched on `127.0.0.1:{}` - PID `{}`.'.format(user, port, proc.pid))
return self._services[user].get('conn_' + oper, None)
#
if __name__ == '__main__':
FS_1 = LocalFS()
FS_2 = LocalFS()
assert FS_1 == FS_2, 'Not equal!'
assert FS_1 is FS_2, 'Not identical!'
print FS_1
print FS_2
print 'Ok.'
# Eof()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.common import exception
from heat.engine import stack_lock
from heat.objects import stack as stack_object
from heat.objects import stack_lock as stack_lock_object
from heat.tests import common
from heat.tests import utils
class StackLockTest(common.HeatTestCase):
def setUp(self):
super(StackLockTest, self).setUp()
self.context = utils.dummy_context()
self.stack_id = "aae01f2d-52ae-47ac-8a0d-3fde3d220fea"
self.engine_id = stack_lock.StackLock.generate_engine_id()
stack = mock.MagicMock()
stack.id = self.stack_id
stack.name = "test_stack"
stack.action = "CREATE"
self.mock_get_by_id = self.patchobject(
stack_object.Stack, 'get_by_id', return_value=stack)
class TestThreadLockException(Exception):
pass
def test_successful_acquire_new_lock(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
slock.acquire()
mock_create.assert_called_once_with(self.stack_id, self.engine_id)
def test_failed_acquire_existing_lock_current_engine(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value=self.engine_id)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.mock_get_by_id.assert_called_once_with(
self.context,
self.stack_id,
tenant_safe=False,
show_deleted=True)
mock_create.assert_called_once_with(self.stack_id, self.engine_id)
def test_successful_acquire_existing_lock_engine_dead(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
slock.acquire()
mock_create.assert_called_once_with(self.stack_id, self.engine_id)
mock_steal.assert_called_once_with(self.stack_id, 'fake-engine-id',
self.engine_id)
def test_failed_acquire_existing_lock_engine_alive(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=True)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.mock_get_by_id.assert_called_once_with(
self.context,
self.stack_id,
tenant_safe=False,
show_deleted=True)
mock_create.assert_called_once_with(self.stack_id, self.engine_id)
def test_failed_acquire_existing_lock_engine_dead(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
return_value='fake-engine-id2')
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.mock_get_by_id.assert_called_once_with(
self.context,
self.stack_id,
tenant_safe=False,
show_deleted=True)
mock_create.assert_called_once_with(self.stack_id, self.engine_id)
mock_steal.assert_called_once_with(self.stack_id, 'fake-engine-id',
self.engine_id)
def test_successful_acquire_with_retry(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
side_effect=[True, None])
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
slock.acquire()
mock_create.assert_has_calls(
[mock.call(self.stack_id, self.engine_id)] * 2)
mock_steal.assert_has_calls(
[mock.call(self.stack_id, 'fake-engine-id', self.engine_id)] * 2)
def test_failed_acquire_one_retry_only(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
return_value=True)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.mock_get_by_id.assert_called_with(
self.context,
self.stack_id,
tenant_safe=False,
show_deleted=True)
mock_create.assert_has_calls(
[mock.call(self.stack_id, self.engine_id)] * 2)
mock_steal.assert_has_calls(
[mock.call(self.stack_id, 'fake-engine-id', self.engine_id)] * 2)
def test_context_mgr_exception(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
def check_lock():
with slock:
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_lock)
self.assertEqual(1, stack_lock_object.StackLock.release.call_count)
def test_context_mgr_noexception(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
with slock:
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
self.assertEqual(1, stack_lock_object.StackLock.release.call_count)
def test_thread_lock_context_mgr_exception_acquire_success(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
def check_thread_lock():
with slock.thread_lock():
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_thread_lock)
self.assertEqual(1, stack_lock_object.StackLock.release.call_count)
def test_thread_lock_context_mgr_exception_acquire_fail(self):
stack_lock_object.StackLock.create = mock.Mock(
return_value=self.engine_id)
stack_lock_object.StackLock.release = mock.Mock()
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
def check_thread_lock():
with slock.thread_lock():
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
raise exception.ActionInProgress
self.assertRaises(exception.ActionInProgress, check_thread_lock)
self.assertFalse(stack_lock_object.StackLock.release.called)
def test_thread_lock_context_mgr_no_exception(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
with slock.thread_lock():
self.assertEqual(1, stack_lock_object.StackLock.create.call_count)
self.assertFalse(stack_lock_object.StackLock.release.called)
def test_try_thread_lock_context_mgr_exception(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
def check_thread_lock():
with slock.try_thread_lock():
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_thread_lock)
self.assertEqual(1, stack_lock_object.StackLock.release.call_count)
def test_try_thread_lock_context_mgr_no_exception(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
with slock.try_thread_lock():
self.assertEqual(1, stack_lock_object.StackLock.create.call_count)
self.assertFalse(stack_lock_object.StackLock.release.called)
def test_try_thread_lock_context_mgr_existing_lock(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=1234)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
def check_thread_lock():
with slock.try_thread_lock():
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_thread_lock)
self.assertFalse(stack_lock_object.StackLock.release.called)
|
|
# Analyzes an object and outputs numeric properties
import cv2
import numpy as np
from . import fatal_error
from . import print_image
from . import plot_image
from . import rgb2gray_hsv
from . import find_objects
from . import binary_threshold
from . import define_roi
from . import roi_objects
from . import object_composition
def report_size_marker_area(img, shape, device, debug, marker='define', x_adj=0, y_adj=0, w_adj=0, h_adj=0,
base='white', objcolor='dark', thresh_channel=None, thresh=None, filename=False):
"""Outputs numeric properties for an input object (contour or grouped contours).
Inputs:
img = image object (most likely the original), color(RGB)
shape = 'rectangle', 'circle', 'ellipse'
device = device number. Used to count steps in the pipeline
debug = None, print, or plot. Print = save to file, Plot = print to screen.
marker = define or detect, if define it means you set an area, if detect it means you want to
detect within an area
x_adj = x position of shape, integer
y_adj = y position of shape, integer
w_adj = width
h_adj = height
base = background color 'white' is default
objcolor = object color is 'dark' or 'light'
thresh_channel = 'h', 's','v'
thresh = integer value
filename = name of file
Returns:
device = device number
marker_header = shape data table headers
marker_data = shape data table values
analysis_images = list of output images
:param img: numpy array
:param shape: str
:param device: int
:param debug: str
:param marker: str
:param x_adj:int
:param y_adj:int
:param w_adj:int
:param h_adj:int
:param h_adj:int
:param base:str
:param objcolor: str
:param thresh_channel:str
:param thresh:int
:param filename: str
:return: device: int
:return: marker_header: str
:return: marker_data: int
:return: analysis_images: list
"""
device += 1
ori_img = np.copy(img)
if len(np.shape(img)) == 3:
ix, iy, iz = np.shape(img)
else:
ix, iy = np.shape(img)
size = ix, iy
roi_background = np.zeros(size, dtype=np.uint8)
roi_size = (ix - 5), (iy - 5)
roi = np.zeros(roi_size, dtype=np.uint8)
roi1 = roi + 1
roi_contour, roi_heirarchy = cv2.findContours(roi1, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(roi_background, roi_contour[0], -1, (255, 0, 0), 5)
if (x_adj > 0 and w_adj > 0) or (y_adj > 0 and h_adj > 0):
fatal_error('Adjusted ROI position is out of frame, this will cause problems in detecting objects')
for cnt in roi_contour:
size1 = ix, iy, 3
background = np.zeros(size1, dtype=np.uint8)
if shape == 'rectangle' and (x_adj >= 0 and y_adj >= 0):
x, y, w, h = cv2.boundingRect(cnt)
x1 = x + x_adj
y1 = y + y_adj
w1 = w + w_adj
h1 = h + h_adj
cv2.rectangle(background, (x1, y1), (x + w1, y + h1), (1, 1, 1), -1)
elif shape == 'circle':
x, y, w, h = cv2.boundingRect(cnt)
x1 = x + x_adj
y1 = y + y_adj
w1 = w + w_adj
h1 = h + h_adj
center = (int((w + x1) / 2), int((h + y1) / 2))
if h > w:
radius = int(w1 / 2)
cv2.circle(background, center, radius, (1, 1, 1), -1)
else:
radius = int(h1 / 2)
cv2.circle(background, center, radius, (1, 1, 1), -1)
elif shape == 'ellipse':
x, y, w, h = cv2.boundingRect(cnt)
x1 = x + x_adj
y1 = y + y_adj
w1 = w + w_adj
h1 = h + h_adj
center = (int((w + x1) / 2), int((h + y1) / 2))
if w > h:
cv2.ellipse(background, center, (w1 / 2, h1 / 2), 0, 0, 360, (1, 1, 1), -1)
else:
cv2.ellipse(background, center, (h1 / 2, w1 / 2), 0, 0, 360, (1, 1, 1), -1)
else:
fatal_error('Shape' + str(shape) + ' is not "rectangle", "circle", or "ellipse"!')
markerback = cv2.cvtColor(background, cv2.COLOR_RGB2GRAY)
shape_contour, hierarchy = cv2.findContours(markerback, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(ori_img, shape_contour, -1, (255, 255, 0), 5)
if debug is 'print':
print_image(ori_img, (str(device) + '_marker_roi.png'))
elif debug is 'plot':
plot_image(ori_img)
if marker == 'define':
m = cv2.moments(markerback, binaryImage=True)
area = m['m00']
device, id_objects, obj_hierarchy = find_objects(img, markerback, device, debug)
device, obj, mask = object_composition(img, id_objects, obj_hierarchy, device, debug)
center, axes, angle = cv2.fitEllipse(obj)
major_axis = np.argmax(axes)
minor_axis = 1 - major_axis
major_axis_length = axes[major_axis]
minor_axis_length = axes[minor_axis]
eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2)
elif marker == 'detect':
if thresh_channel is not None and thresh is not None:
if base == 'white':
masked = cv2.multiply(img, background)
marker1 = markerback * 255
mask1 = cv2.bitwise_not(marker1)
markstack = np.dstack((mask1, mask1, mask1))
added = cv2.add(masked, markstack)
else:
added = cv2.multiply(img, background)
device, maskedhsv = rgb2gray_hsv(added, thresh_channel, device, debug)
device, masked2a_thresh = binary_threshold(maskedhsv, thresh, 255, objcolor, device, debug)
device, id_objects, obj_hierarchy = find_objects(added, masked2a_thresh, device, debug)
device, roi1, roi_hierarchy = define_roi(added, shape, device, None, 'default', debug, True, x_adj, y_adj,
w_adj, h_adj)
device, roi_o, hierarchy3, kept_mask, obj_area = roi_objects(img, 'partial', roi1, roi_hierarchy,
id_objects, obj_hierarchy, device, debug)
device, obj, mask = object_composition(img, roi_o, hierarchy3, device, debug)
cv2.drawContours(ori_img, roi_o, -1, (0, 255, 0), -1, lineType=8, hierarchy=hierarchy3)
m = cv2.moments(mask, binaryImage=True)
area = m['m00']
center, axes, angle = cv2.fitEllipse(obj)
major_axis = np.argmax(axes)
minor_axis = 1 - major_axis
major_axis_length = axes[major_axis]
minor_axis_length = axes[minor_axis]
eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2)
else:
fatal_error('thresh_channel and thresh must be defined in detect mode')
else:
fatal_error("marker must be either in 'detect' or 'define' mode")
analysis_images = []
if filename:
out_file = str(filename[0:-4]) + '_sizemarker.jpg'
print_image(ori_img, out_file)
analysis_images.append(['IMAGE', 'marker', out_file])
if debug is 'print':
print_image(ori_img, (str(device) + '_marker_shape.png'))
elif debug is 'plot':
plot_image(ori_img)
marker_header = (
'HEADER_MARKER',
'marker_area',
'marker_major_axis_length',
'marker_minor_axis_length',
'marker_eccentricity'
)
marker_data = (
'MARKER_DATA',
area,
major_axis_length,
minor_axis_length,
eccentricity
)
return device, marker_header, marker_data, analysis_images
|
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import difflib
import json
import textwrap
from typing import Dict, cast
from typing_extensions import Literal
from pants.base.build_environment import pants_version
from pants.help.help_formatter import HelpFormatter
from pants.help.help_info_extracter import AllHelpInfo, HelpJSONEncoder
from pants.help.help_tools import ToolHelpInfo
from pants.help.maybe_color import MaybeColor
from pants.option.arg_splitter import (
AllHelp,
HelpRequest,
NoGoalHelp,
ThingHelp,
UnknownGoalHelp,
VersionHelp,
)
from pants.option.scope import GLOBAL_SCOPE
from pants.util.docutil import bin_name, terminal_width
from pants.util.strutil import first_paragraph, hard_wrap, pluralize
class HelpPrinter(MaybeColor):
"""Prints general and goal-related help to the console."""
def __init__(
self,
*,
help_request: HelpRequest,
all_help_info: AllHelpInfo,
color: bool,
) -> None:
super().__init__(color)
self._help_request = help_request
self._all_help_info = all_help_info
self._width = terminal_width()
def print_help(self) -> Literal[0, 1]:
"""Print help to the console."""
def print_hint() -> None:
print(f"Use `{self.maybe_green(bin_name() + ' help')}` to get help.")
print(f"Use `{self.maybe_green(bin_name() + ' help goals')}` to list goals.")
if isinstance(self._help_request, VersionHelp):
print(pants_version())
elif isinstance(self._help_request, AllHelp):
self._print_all_help()
elif isinstance(self._help_request, ThingHelp):
self._print_thing_help()
elif isinstance(self._help_request, UnknownGoalHelp):
# Only print help and suggestions for the first unknown goal.
# It gets confusing to try and show suggestions for multiple cases.
unknown_goal = self._help_request.unknown_goals[0]
print(f"Unknown goal: {self.maybe_red(unknown_goal)}")
did_you_mean = list(
difflib.get_close_matches(
unknown_goal, self._all_help_info.name_to_goal_info.keys()
)
)
if did_you_mean:
formatted_matches = self._format_did_you_mean_matches(did_you_mean)
print(f"Did you mean {formatted_matches}?")
print_hint()
return 1
elif isinstance(self._help_request, NoGoalHelp):
print("No goals specified.")
print_hint()
return 1
return 0
def _print_title(self, title_text: str) -> None:
title = self.maybe_green(f"{title_text}\n{'-' * len(title_text)}")
print(f"\n{title}\n")
def _print_all_help(self) -> None:
print(self._get_help_json())
def _print_thing_help(self) -> None:
"""Print a help screen.
Assumes that self._help_request is an instance of OptionsHelp.
Note: Ony useful if called after options have been registered.
"""
help_request = cast(ThingHelp, self._help_request)
things = set(help_request.things)
if things:
for thing in sorted(things):
if thing == "goals":
self._print_all_goals()
elif thing == "subsystems":
self._print_all_subsystems()
elif thing == "targets":
self._print_all_targets()
elif thing == "global":
self._print_options_help(GLOBAL_SCOPE, help_request.advanced)
elif thing == "tools":
self._print_all_tools()
elif thing == "api-types":
self._print_all_api_types()
elif thing in self._all_help_info.scope_to_help_info:
self._print_options_help(thing, help_request.advanced)
elif thing in self._all_help_info.name_to_target_type_info:
self._print_target_help(thing)
elif thing in self._all_help_info.rule_output_type_to_rule_infos:
self._print_api_type_help(thing, help_request.advanced)
else:
print(self.maybe_red(f"Unknown entity: {thing}"))
else:
self._print_global_help()
def _format_summary_description(self, descr: str, chars_before_description: int) -> str:
lines = textwrap.wrap(descr, self._width - chars_before_description)
if len(lines) > 1:
lines = [
lines[0],
*(f"{' ' * chars_before_description}{line}" for line in lines[1:]),
]
return "\n".join(lines)
def _print_all_goals(self) -> None:
goal_descriptions: Dict[str, str] = {}
for goal_info in self._all_help_info.name_to_goal_info.values():
if goal_info.is_implemented:
goal_descriptions[goal_info.name] = goal_info.description
self._print_title("Goals")
max_width = max((len(name) for name in goal_descriptions.keys()), default=0)
chars_before_description = max_width + 2
def format_goal(name: str, descr: str) -> str:
name = self.maybe_cyan(name.ljust(chars_before_description))
descr = self._format_summary_description(descr, chars_before_description)
return f"{name}{descr}\n"
for name, description in sorted(goal_descriptions.items()):
print(format_goal(name, first_paragraph(description)))
specific_help_cmd = f"{bin_name()} help $goal"
print(f"Use `{self.maybe_green(specific_help_cmd)}` to get help for a specific goal.\n")
def _print_all_subsystems(self) -> None:
self._print_title("Subsystems")
subsystem_description: Dict[str, str] = {}
for help_info in self._all_help_info.non_deprecated_option_scope_help_infos():
if not help_info.is_goal and help_info.scope:
subsystem_description[help_info.scope] = first_paragraph(help_info.description)
longest_subsystem_alias = max(len(alias) for alias in subsystem_description.keys())
chars_before_description = longest_subsystem_alias + 2
for alias, description in sorted(subsystem_description.items()):
alias = self.maybe_cyan(alias.ljust(chars_before_description))
description = self._format_summary_description(description, chars_before_description)
print(f"{alias}{description}\n")
specific_help_cmd = f"{bin_name()} help $subsystem"
print(
f"Use `{self.maybe_green(specific_help_cmd)}` to get help for a "
f"specific subsystem.\n"
)
def _print_all_targets(self) -> None:
self._print_title("Target types")
longest_target_alias = max(
len(alias) for alias in self._all_help_info.name_to_target_type_info.keys()
)
chars_before_description = longest_target_alias + 2
for alias, target_type_info in sorted(
self._all_help_info.name_to_target_type_info.items(), key=lambda x: x[0]
):
alias_str = self.maybe_cyan(f"{alias}".ljust(chars_before_description))
summary = self._format_summary_description(
target_type_info.summary, chars_before_description
)
print(f"{alias_str}{summary}\n")
specific_help_cmd = f"{bin_name()} help $target_type"
print(
f"Use `{self.maybe_green(specific_help_cmd)}` to get help for a specific "
f"target type.\n"
)
def _print_all_tools(self) -> None:
self._print_title("External Tools")
ToolHelpInfo.print_all(ToolHelpInfo.iter(self._all_help_info), self)
tool_help_cmd = f"{bin_name()} help $tool"
print(f"Use `{self.maybe_green(tool_help_cmd)}` to get help for a specific tool.\n")
def _print_all_api_types(self) -> None:
self._print_title("Plugin API Types")
api_type_descriptions: Dict[str, str] = {}
for api_type, rule_infos in self._all_help_info.rule_output_type_to_rule_infos.items():
if api_type.startswith("_"):
continue
api_type_descriptions[api_type] = rule_infos[0].output_desc or ""
longest_api_type_name = max(len(name) for name in api_type_descriptions.keys())
chars_before_description = longest_api_type_name + 2
for api_type, description in api_type_descriptions.items():
name = self.maybe_cyan(api_type.ljust(chars_before_description))
description = self._format_summary_description(description, chars_before_description)
print(f"{name}{description}\n")
api_help_cmd = f"{bin_name()} help $api_type"
print(f"Use `{self.maybe_green(api_help_cmd)}` to get help for a specific API type.\n")
def _print_global_help(self):
def print_cmd(args: str, desc: str):
cmd = self.maybe_green(f"{bin_name()} {args}".ljust(50))
print(f" {cmd} {desc}")
print(f"\nPants {pants_version()}")
print("\nUsage:\n")
print_cmd(
"[option ...] [goal ...] [file/target ...]",
"Attempt the specified goals on the specified files/targets.",
)
print_cmd("help", "Display this usage message.")
print_cmd("help goals", "List all installed goals.")
print_cmd("help targets", "List all installed target types.")
print_cmd("help subsystems", "List all configurable subsystems.")
print_cmd("help tools", "List all external tools.")
print_cmd("help global", "Help for global options.")
print_cmd("help-advanced global", "Help for global advanced options.")
print_cmd("help [target_type/goal/subsystem]", "Help for a target type, goal or subsystem.")
print_cmd(
"help-advanced [goal/subsystem]", "Help for a goal or subsystem's advanced options."
)
print_cmd("help-all", "Print a JSON object containing all help info.")
print("")
print(" [file] can be:")
print(f" {self.maybe_cyan('path/to/file.ext')}")
glob_str = self.maybe_cyan("'**/*.ext'")
print(
f" A path glob, such as {glob_str}, in quotes to prevent premature shell expansion."
)
print("\n [target] can be:")
print(f" {self.maybe_cyan('path/to/dir:target_name')}.")
print(
f" {self.maybe_cyan('path/to/dir')} for a target whose name is the same as the directory name."
)
print(
f" {self.maybe_cyan('path/to/dir:')} to include all targets in the specified directory."
)
print(
f" {self.maybe_cyan('path/to/dir::')} to include all targets found recursively under the directory.\n"
)
print(f"Documentation at {self.maybe_magenta('https://www.pantsbuild.org')}")
pypi_url = f"https://pypi.org/pypi/pantsbuild.pants/{pants_version()}"
print(f"Download at {self.maybe_magenta(pypi_url)}")
def _print_options_help(self, scope: str, show_advanced_and_deprecated: bool) -> None:
"""Prints a human-readable help message for the options registered on this object.
Assumes that self._help_request is an instance of OptionsHelp.
"""
help_formatter = HelpFormatter(
show_advanced=show_advanced_and_deprecated,
show_deprecated=show_advanced_and_deprecated,
color=self.color,
)
oshi = self._all_help_info.scope_to_help_info.get(scope)
if not oshi:
return
formatted_lines = help_formatter.format_options(oshi)
goal_info = self._all_help_info.name_to_goal_info.get(scope)
if goal_info:
related_scopes = sorted(set(goal_info.consumed_scopes) - {GLOBAL_SCOPE, goal_info.name})
if related_scopes:
related_subsystems_label = self.maybe_green("Related subsystems:")
formatted_lines.append(f"{related_subsystems_label} {', '.join(related_scopes)}")
formatted_lines.append("")
for line in formatted_lines:
print(line)
def _print_target_help(self, target_alias: str) -> None:
self._print_title(f"`{target_alias}` target")
tinfo = self._all_help_info.name_to_target_type_info[target_alias]
if tinfo.description:
formatted_desc = "\n".join(hard_wrap(tinfo.description, width=self._width))
print(formatted_desc)
print(f"\n\nActivated by {self.maybe_magenta(tinfo.provider)}")
print("Valid fields:")
for field in sorted(tinfo.fields, key=lambda x: x.alias):
print()
print(self.maybe_magenta(field.alias))
indent = " "
required_or_default = "required" if field.required else f"default: {field.default}"
if field.provider not in ["", tinfo.provider]:
print(self.maybe_cyan(f"{indent}from: {field.provider}"))
print(self.maybe_cyan(f"{indent}type: {field.type_hint}"))
print(self.maybe_cyan(f"{indent}{required_or_default}"))
if field.description:
formatted_desc = "\n".join(
hard_wrap(field.description, indent=len(indent), width=self._width)
)
print("\n" + formatted_desc)
print()
def _print_api_type_help(self, output_type: str, show_advanced: bool) -> None:
self._print_title(f"`{output_type}` API type")
rule_infos = self._all_help_info.rule_output_type_to_rule_infos[output_type]
if rule_infos[0].output_desc:
print("\n".join(hard_wrap(rule_infos[0].output_desc, width=self._width)))
print()
print(f"Returned by {pluralize(len(rule_infos), 'rule')}:")
for rule_info in rule_infos:
print()
print(self.maybe_magenta(rule_info.name))
indent = " "
print(self.maybe_cyan(f"{indent}activated by"), rule_info.provider)
if rule_info.input_types:
print(
self.maybe_cyan(f"{indent}{pluralize(len(rule_info.input_types), 'input')}:"),
", ".join(rule_info.input_types),
)
else:
print(self.maybe_cyan(f"{indent}no inputs"))
if show_advanced and rule_info.input_gets:
print(
f"\n{indent}".join(
hard_wrap(
self.maybe_cyan(f"{pluralize(len(rule_info.input_gets), 'get')}: ")
+ ", ".join(rule_info.input_gets),
indent=4,
width=self._width - 4,
)
)
)
if rule_info.description:
print(f"{indent}{rule_info.description}")
if rule_info.help:
print("\n" + "\n".join(hard_wrap(rule_info.help, indent=4, width=self._width)))
print()
def _get_help_json(self) -> str:
"""Return a JSON object containing all the help info we have, for every scope."""
return json.dumps(
self._all_help_info.asdict(), sort_keys=True, indent=2, cls=HelpJSONEncoder
)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
##############################################################################
This module mainly implements the calculation of MOE-type descriptors, which
include LabuteASA, TPSA, slogPVSA, MRVSA, PEOEVSA, EstateVSA and VSAEstate,
respectively (60).
If you have any question about these indices please contact me via email.
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.04
Email: [email protected] and [email protected]
##############################################################################
"""
# Third party modules
from rdkit import Chem
from rdkit.Chem import MolSurf as MOE
from rdkit.Chem.EState import EState_VSA as EVSA
Version = 1.0
################################################################
def CalculateLabuteASA(mol):
"""
#################################################################
Calculation of Labute's Approximate Surface Area (ASA from MOE)
Usage:
result=CalculateLabuteASA(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
res = {}
temp = MOE.pyLabuteASA(mol, includeHs=1)
res["LabuteASA"] = round(temp, 3)
return res
def CalculateTPSA(mol):
"""
#################################################################
Calculation of topological polar surface area based on fragments.
Implementation based on the Daylight contrib program tpsa.
Usage:
result=CalculateTPSA(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
res = {}
temp = MOE.TPSA(mol)
res["MTPSA"] = round(temp, 3)
return res
def CalculateSLOGPVSA(mol, bins=None):
"""
#################################################################
MOE-type descriptors using LogP contributions and surface
area contributions.
logpBins=[-0.4,-0.2,0,0.1,0.15,0.2,0.25,0.3,0.4,0.5,0.6]
You can specify your own bins to compute some descriptors.
Usage:
result=CalculateSLOGPVSA(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
temp = MOE.SlogP_VSA_(mol, bins, force=1)
res = {}
for i, j in enumerate(temp):
res["slogPVSA" + str(i)] = round(j, 3)
return res
def CalculateSMRVSA(mol, bins=None):
"""
#################################################################
MOE-type descriptors using MR contributions and surface
area contributions.
mrBins=[1.29, 1.82, 2.24, 2.45, 2.75, 3.05, 3.63,3.8,4.0]
You can specify your own bins to compute some descriptors.
Usage:
result=CalculateSMRVSA(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
temp = MOE.SMR_VSA_(mol, bins, force=1)
res = {}
for i, j in enumerate(temp):
res["MRVSA" + str(i)] = round(j, 3)
return res
def CalculatePEOEVSA(mol, bins=None):
"""
#################################################################
MOE-type descriptors using partial charges and surface
area contributions.
chgBins=[-.3,-.25,-.20,-.15,-.10,-.05,0,.05,.10,.15,.20,.25,.30]
You can specify your own bins to compute some descriptors
Usage:
result=CalculatePEOEVSA(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
temp = MOE.PEOE_VSA_(mol, bins, force=1)
res = {}
for i, j in enumerate(temp):
res["PEOEVSA" + str(i)] = round(j, 3)
return res
def CalculateEstateVSA(mol, bins=None):
"""
#################################################################
MOE-type descriptors using Estate indices and surface area
contributions.
estateBins=[-0.390,0.290,0.717,1.165,1.540,1.807,2.05,4.69,9.17,15.0]
You can specify your own bins to compute some descriptors
Usage:
result=CalculateEstateVSA(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
temp = EVSA.EState_VSA_(mol, bins, force=1)
res = {}
for i, j in enumerate(temp):
res["EstateVSA" + str(i)] = round(j, 3)
return res
def CalculateVSAEstate(mol, bins=None):
"""
#################################################################
MOE-type descriptors using Estate indices and surface
area contributions.
vsaBins=[4.78,5.00,5.410,5.740,6.00,6.07,6.45,7.00,11.0]
You can specify your own bins to compute some descriptors
Usage:
result=CalculateVSAEstate(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
temp = EVSA.VSA_EState_(mol, bins, force=1)
res = {}
for i, j in enumerate(temp):
res["VSAEstate" + str(i)] = round(j, 3)
return res
def GetMOE(mol):
"""
#################################################################
The calculation of MOE-type descriptors (ALL).
Usage:
result=GetMOE(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
result = {}
result.update(CalculateLabuteASA(mol))
result.update(CalculateTPSA(mol))
result.update(CalculateSLOGPVSA(mol, bins=None))
result.update(CalculateSMRVSA(mol, bins=None))
result.update(CalculatePEOEVSA(mol, bins=None))
result.update(CalculateEstateVSA(mol, bins=None))
result.update(CalculateVSAEstate(mol, bins=None))
return result
#########################################################################
if __name__ == "__main__":
smi5 = ["COCCCC", "CCC(C)CC", "CC(C)CCC", "CC(C)C(C)C", "CCOCCN", "c1ccccc1N"]
smis = ["CCCC", "CCCCC", "CCCCCC", "CC(N)C(=O)O", "CC(N)C(=O)[O-].[Na+]"]
for index, smi in enumerate(smis):
m = Chem.MolFromSmiles(smi)
print(index + 1)
print(smi)
print("\t", GetMOE(m))
print("\t", len(GetMOE(m)))
|
|
#!/usr/bin/env python
"""Module with GRRWorker implementation."""
import pdb
import time
import traceback
import logging
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import master
from grr.lib import queue_manager as queue_manager_lib
from grr.lib import queues as queues_config
from grr.lib import rdfvalue
from grr.lib import registry
# pylint: disable=unused-import
from grr.lib import server_stubs
# pylint: enable=unused-import
from grr.lib import stats
from grr.lib import threadpool
from grr.lib import utils
class Error(Exception):
"""Base error class."""
class FlowProcessingError(Error):
"""Raised when flow requests/responses can't be processed."""
class GRRWorker(object):
"""A GRR worker."""
# time to wait before polling when no jobs are currently in the
# task scheduler (sec)
POLLING_INTERVAL = 2
SHORT_POLLING_INTERVAL = 0.3
SHORT_POLL_TIME = 30
# target maximum time to spend on RunOnce
RUN_ONCE_MAX_SECONDS = 300
# A class global threadpool to be used for all workers.
thread_pool = None
# This is a timed cache of locked flows. If this worker encounters a lock
# failure on a flow, it will not attempt to grab this flow until the timeout.
queued_flows = None
def __init__(self, queues=queues_config.WORKER_LIST,
threadpool_prefix="grr_threadpool",
threadpool_size=None, token=None):
"""Constructor.
Args:
queues: The queues we use to fetch new messages from.
threadpool_prefix: A name for the thread pool used by this worker.
threadpool_size: The number of workers to start in this thread pool.
token: The token to use for the worker.
Raises:
RuntimeError: If the token is not provided.
"""
logging.info("started worker with queues: " + str(queues))
self.queues = queues
self.queued_flows = utils.TimeBasedCache(max_size=10, max_age=60)
if token is None:
raise RuntimeError("A valid ACLToken is required.")
# Make the thread pool a global so it can be reused for all workers.
if GRRWorker.thread_pool is None:
if threadpool_size is None:
threadpool_size = config_lib.CONFIG["Threadpool.size"]
GRRWorker.thread_pool = threadpool.ThreadPool.Factory(
threadpool_prefix, min_threads=2, max_threads=threadpool_size)
GRRWorker.thread_pool.Start()
self.token = token
self.last_active = 0
# Well known flows are just instantiated.
self.well_known_flows = flow.WellKnownFlow.GetAllWellKnownFlows(token=token)
self.flow_lease_time = config_lib.CONFIG["Worker.flow_lease_time"]
self.well_known_flow_lease_time = config_lib.CONFIG[
"Worker.well_known_flow_lease_time"]
def Run(self):
"""Event loop."""
try:
while 1:
if master.MASTER_WATCHER.IsMaster():
processed = self.RunOnce()
else:
processed = 0
if processed == 0:
if time.time() - self.last_active > self.SHORT_POLL_TIME:
interval = self.POLLING_INTERVAL
else:
interval = self.SHORT_POLLING_INTERVAL
time.sleep(interval)
else:
self.last_active = time.time()
except KeyboardInterrupt:
logging.info("Caught interrupt, exiting.")
self.thread_pool.Join()
def RunOnce(self):
"""Processes one set of messages from Task Scheduler.
The worker processes new jobs from the task master. For each job
we retrieve the session from the Task Scheduler.
Returns:
Total number of messages processed by this call.
"""
start_time = time.time()
processed = 0
queue_manager = queue_manager_lib.QueueManager(token=self.token)
for queue in self.queues:
# Freezeing the timestamp used by queue manager to query/delete
# notifications to avoid possible race conditions.
queue_manager.FreezeTimestamp()
fetch_messages_start = time.time()
notifications_by_priority = queue_manager.GetNotificationsByPriority(
queue)
stats.STATS.RecordEvent("worker_time_to_retrieve_notifications",
time.time() - fetch_messages_start)
# Process stuck flows first
stuck_flows = notifications_by_priority.pop(
queue_manager.STUCK_PRIORITY, [])
if stuck_flows:
self.ProcessStuckFlows(stuck_flows, queue_manager)
notifications_available = []
for priority in sorted(notifications_by_priority, reverse=True):
for notification in notifications_by_priority[priority]:
# Filter out session ids we already tried to lock but failed.
if notification.session_id not in self.queued_flows:
notifications_available.append(notification)
try:
# If we spent too much time processing what we have so far, the
# active_sessions list might not be current. We therefore break here
# so we can re-fetch a more up to date version of the list, and try
# again later. The risk with running with an old active_sessions list
# is that another worker could have already processed this message,
# and when we try to process it, there is nothing to do - costing us a
# lot of processing time. This is a tradeoff between checking the data
# store for current information and processing out of date
# information.
processed += self.ProcessMessages(notifications_available,
queue_manager,
self.RUN_ONCE_MAX_SECONDS -
(time.time() - start_time))
# We need to keep going no matter what.
except Exception as e: # pylint: disable=broad-except
logging.error("Error processing message %s. %s.", e,
traceback.format_exc())
stats.STATS.IncrementCounter("grr_worker_exceptions")
if flags.FLAGS.debug:
pdb.post_mortem()
queue_manager.UnfreezeTimestamp()
# If we have spent too much time, stop.
if (time.time() - start_time) > self.RUN_ONCE_MAX_SECONDS:
return processed
return processed
def ProcessStuckFlows(self, stuck_flows, queue_manager):
stats.STATS.IncrementCounter("grr_flows_stuck", len(stuck_flows))
for stuck_flow in stuck_flows:
try:
flow.GRRFlow.TerminateFlow(
stuck_flow.session_id, reason="Stuck in the worker",
status=rdfvalue.GrrStatus.ReturnedStatus.WORKER_STUCK,
force=True, token=self.token)
except Exception: # pylint: disable=broad-except
logging.exception("Error terminating stuck flow: %s", stuck_flow)
finally:
# Remove notifications for this flow. This will also remove the
# "stuck flow" notification itself.
queue_manager.DeleteNotification(stuck_flow.session_id)
def ProcessMessages(self, active_notifications, queue_manager, time_limit=0):
"""Processes all the flows in the messages.
Precondition: All tasks come from the same queue.
Note that the server actually completes the requests in the
flow when receiving the messages from the client. We do not really
look at the messages here at all any more - we just work from the
completed messages in the flow RDFValue.
Args:
active_notifications: The list of notifications.
queue_manager: QueueManager object used to manage notifications,
requests and responses.
time_limit: If set return as soon as possible after this many seconds.
Returns:
The number of processed flows.
"""
now = time.time()
processed = 0
for notification in active_notifications:
if notification.session_id not in self.queued_flows:
if time_limit and time.time() - now > time_limit:
break
processed += 1
self.queued_flows.Put(notification.session_id, 1)
self.thread_pool.AddTask(target=self._ProcessMessages,
args=(notification,
queue_manager.Copy()),
name=self.__class__.__name__)
return processed
def _ProcessRegularFlowMessages(self, flow_obj, notification):
"""Processes messages for a given flow."""
session_id = notification.session_id
if not isinstance(flow_obj, flow.GRRFlow):
logging.warn("%s is not a proper flow object (got %s)", session_id,
type(flow_obj))
stats.STATS.IncrementCounter("worker_bad_flow_objects",
fields=[str(type(flow_obj))])
raise FlowProcessingError("Not a GRRFlow.")
runner = flow_obj.GetRunner()
if runner.schedule_kill_notifications:
# Create a notification for the flow in the future that
# indicates that this flow is in progess. We'll delete this
# notification when we're done with processing completed
# requests. If we're stuck for some reason, the notification
# will be delivered later and the stuck flow will get
# terminated.
stuck_flows_timeout = rdfvalue.Duration(
config_lib.CONFIG["Worker.stuck_flows_timeout"])
kill_timestamp = (rdfvalue.RDFDatetime().Now() +
stuck_flows_timeout)
with queue_manager_lib.QueueManager(token=self.token) as manager:
manager.QueueNotification(session_id=session_id,
in_progress=True,
timestamp=kill_timestamp)
# kill_timestamp may get updated via flow.HeartBeat() calls, so we
# have to store it in the runner context.
runner.context.kill_timestamp = kill_timestamp
try:
runner.ProcessCompletedRequests(notification, self.thread_pool)
# Something went wrong - log it in the flow.
except Exception as e: # pylint: disable=broad-except
runner.context.state = rdfvalue.Flow.State.ERROR
runner.context.backtrace = traceback.format_exc()
logging.error("Flow %s: %s", flow_obj, e)
raise FlowProcessingError(e)
finally:
# Delete kill notification as the flow got processed and is not
# stuck.
with queue_manager_lib.QueueManager(token=self.token) as manager:
if runner.schedule_kill_notifications:
manager.DeleteNotification(
session_id, start=runner.context.kill_timestamp,
end=runner.context.kill_timestamp)
runner.context.kill_timestamp = None
if (runner.process_requests_in_order and
notification.last_status and
(runner.context.next_processed_request <=
notification.last_status)):
# We are processing requests in order and have received a
# notification for a specific request but could not process
# that request. This might be a race condition in the data
# store so we reschedule the notification in the future.
delay = config_lib.CONFIG[
"Worker.notification_retry_interval"]
manager.QueueNotification(
notification, timestamp=notification.timestamp + delay)
def _ProcessMessages(self, notification, queue_manager):
"""Does the real work with a single flow."""
flow_obj = None
session_id = notification.session_id
try:
# Take a lease on the flow:
flow_name = session_id.FlowName()
if flow_name in self.well_known_flows:
# Well known flows are not necessarily present in the data store so
# we need to create them instead of opening.
expected_flow = self.well_known_flows[flow_name].__class__.__name__
flow_obj = aff4.FACTORY.CreateWithLock(
session_id, expected_flow,
lease_time=self.well_known_flow_lease_time,
blocking=False, token=self.token)
else:
flow_obj = aff4.FACTORY.OpenWithLock(
session_id, lease_time=self.flow_lease_time,
blocking=False, token=self.token)
now = time.time()
logging.debug("Got lock on %s", session_id)
# If we get here, we now own the flow. We can delete the notifications
# we just retrieved but we need to make sure we don't delete any that
# came in later.
queue_manager.DeleteNotification(session_id, end=notification.timestamp)
if flow_name in self.well_known_flows:
stats.STATS.IncrementCounter("well_known_flow_requests",
fields=[str(session_id)])
# We remove requests first and then process them in the thread pool.
# On one hand this approach increases the risk of losing requests in
# case the worker process dies. On the other hand, it doesn't hold
# the lock while requests are processed, so other workers can
# process well known flows requests as well.
with flow_obj:
responses = flow_obj.FetchAndRemoveRequestsAndResponses(session_id)
flow_obj.ProcessResponses(responses, self.thread_pool)
else:
with flow_obj:
self._ProcessRegularFlowMessages(flow_obj, notification)
elapsed = time.time() - now
logging.debug("Done processing %s: %s sec", session_id, elapsed)
stats.STATS.RecordEvent("worker_flow_processing_time", elapsed,
fields=[flow_obj.Name()])
# Everything went well -> session can be run again.
self.queued_flows.ExpireObject(session_id)
except aff4.LockError:
# Another worker is dealing with this flow right now, we just skip it.
# We expect lots of these when there are few messages (the system isn't
# highly loaded) but it is interesting when the system is under load to
# know if we are pulling the optimal number of messages off the queue.
# A high number of lock fails when there is plenty of work to do would
# indicate we are wasting time trying to process work that has already
# been completed by other workers.
stats.STATS.IncrementCounter("worker_flow_lock_error")
except FlowProcessingError:
# Do nothing as we expect the error to be correctly logged and accounted
# already.
pass
except Exception as e: # pylint: disable=broad-except
# Something went wrong when processing this session. In order not to spin
# here, we just remove the notification.
logging.exception("Error processing session %s: %s", session_id, e)
stats.STATS.IncrementCounter("worker_session_errors",
fields=[str(type(e))])
queue_manager.DeleteNotification(session_id)
class WorkerInit(registry.InitHook):
"""Registers worker stats variables."""
pre = ["StatsInit"]
def RunOnce(self):
"""Exports the vars.."""
stats.STATS.RegisterCounterMetric("grr_flows_stuck")
stats.STATS.RegisterCounterMetric("worker_bad_flow_objects",
fields=[("type", str)])
stats.STATS.RegisterCounterMetric("worker_session_errors",
fields=[("type", str)])
stats.STATS.RegisterCounterMetric(
"worker_flow_lock_error", docstring=("Worker lock failures. We expect "
"these to be high when the system"
"is idle."))
stats.STATS.RegisterEventMetric("worker_flow_processing_time",
fields=[("flow", str)])
stats.STATS.RegisterEventMetric("worker_time_to_retrieve_notifications")
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as summary_lib
from tensorflow.python.summary.writer import writer as writer_lib
from tensorflow.python.summary.writer.writer import SummaryToEventTransformer
from tensorflow.python.training import saver
from tensorflow.tensorboard.backend.event_processing import event_accumulator as ea
class _EventGenerator(object):
"""Class that can add_events and then yield them back.
Satisfies the EventGenerator API required for the EventAccumulator.
Satisfies the EventWriter API required to create a SummaryWriter.
Has additional convenience methods for adding test events.
"""
def __init__(self, testcase, zero_out_timestamps=False):
self._testcase = testcase
self.items = []
self.zero_out_timestamps = zero_out_timestamps
def Load(self):
while self.items:
yield self.items.pop(0)
def AddScalar(self, tag, wall_time=0, step=0, value=0):
event = event_pb2.Event(
wall_time=wall_time,
step=step,
summary=summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag=tag, simple_value=value)]))
self.AddEvent(event)
def AddHealthPill(self, wall_time, step, op_name, output_slot, elements):
event = event_pb2.Event(step=step, wall_time=wall_time)
value = event.summary.value.add(
tag='__health_pill__',
node_name='%s:%d:DebugNumericSummary' % (op_name, output_slot))
value.tensor.tensor_shape.dim.add(size=len(elements))
value.tensor.dtype = types_pb2.DT_DOUBLE
value.tensor.tensor_content = np.array(elements, dtype=np.float64).tobytes()
self.AddEvent(event)
def AddHistogram(self,
tag,
wall_time=0,
step=0,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=None,
hbucket=None):
histo = summary_pb2.HistogramProto(
min=hmin,
max=hmax,
num=hnum,
sum=hsum,
sum_squares=hsum_squares,
bucket_limit=hbucket_limit,
bucket=hbucket)
event = event_pb2.Event(
wall_time=wall_time,
step=step,
summary=summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag=tag, histo=histo)]))
self.AddEvent(event)
def AddImage(self,
tag,
wall_time=0,
step=0,
encoded_image_string=b'imgstr',
width=150,
height=100):
image = summary_pb2.Summary.Image(
encoded_image_string=encoded_image_string, width=width, height=height)
event = event_pb2.Event(
wall_time=wall_time,
step=step,
summary=summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag=tag, image=image)]))
self.AddEvent(event)
def AddAudio(self,
tag,
wall_time=0,
step=0,
encoded_audio_string=b'sndstr',
content_type='audio/wav',
sample_rate=44100,
length_frames=22050):
audio = summary_pb2.Summary.Audio(
encoded_audio_string=encoded_audio_string,
content_type=content_type,
sample_rate=sample_rate,
length_frames=length_frames)
event = event_pb2.Event(
wall_time=wall_time,
step=step,
summary=summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag=tag, audio=audio)]))
self.AddEvent(event)
def AddEvent(self, event):
if self.zero_out_timestamps:
event.wall_time = 0
self.items.append(event)
def add_event(self, event): # pylint: disable=invalid-name
"""Match the EventWriter API."""
self.AddEvent(event)
def get_logdir(self): # pylint: disable=invalid-name
"""Return a temp directory for asset writing."""
return self._testcase.get_temp_dir()
class EventAccumulatorTest(test.TestCase):
def assertTagsEqual(self, actual, expected):
"""Utility method for checking the return value of the Tags() call.
It fills out the `expected` arg with the default (empty) values for every
tag type, so that the author needs only specify the non-empty values they
are interested in testing.
Args:
actual: The actual Accumulator tags response.
expected: The expected tags response (empty fields may be omitted)
"""
empty_tags = {
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: [],
ea.TENSORS: [],
}
# Verifies that there are no unexpected keys in the actual response.
# If this line fails, likely you added a new tag type, and need to update
# the empty_tags dictionary above.
self.assertItemsEqual(actual.keys(), empty_tags.keys())
for key in actual:
expected_value = expected.get(key, empty_tags[key])
if isinstance(expected_value, list):
self.assertItemsEqual(actual[key], expected_value)
else:
self.assertEqual(actual[key], expected_value)
class MockingEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(MockingEventAccumulatorTest, self).setUp()
self.stubs = googletest.StubOutForTesting()
self._real_constructor = ea.EventAccumulator
self._real_generator = ea._GeneratorFromPath
def _FakeAccumulatorConstructor(generator, *args, **kwargs):
ea._GeneratorFromPath = lambda x: generator
return self._real_constructor(generator, *args, **kwargs)
ea.EventAccumulator = _FakeAccumulatorConstructor
def tearDown(self):
self.stubs.CleanUp()
ea.EventAccumulator = self._real_constructor
ea._GeneratorFromPath = self._real_generator
def testEmptyAccumulator(self):
gen = _EventGenerator(self)
x = ea.EventAccumulator(gen)
x.Reload()
self.assertTagsEqual(x.Tags(), {})
def testTags(self):
gen = _EventGenerator(self)
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
})
def testReload(self):
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {})
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
})
def testScalars(self):
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
s1 = ea.ScalarEvent(wall_time=1, step=10, value=32)
s2 = ea.ScalarEvent(wall_time=2, step=12, value=64)
gen.AddScalar('s1', wall_time=1, step=10, value=32)
gen.AddScalar('s2', wall_time=2, step=12, value=64)
acc.Reload()
self.assertEqual(acc.Scalars('s1'), [s1])
self.assertEqual(acc.Scalars('s2'), [s2])
def _compareHealthPills(self, expected_event, gotten_event):
"""Compares 2 health pills.
Args:
expected_event: The expected HealthPillEvent.
gotten_event: The gotten HealthPillEvent.
"""
self.assertEqual(expected_event.wall_time, gotten_event.wall_time)
self.assertEqual(expected_event.step, gotten_event.step)
self.assertEqual(expected_event.node_name, gotten_event.node_name)
self.assertEqual(expected_event.output_slot, gotten_event.output_slot)
self.assertEqual(len(expected_event.value), len(gotten_event.value))
for i, expected_value in enumerate(expected_event.value):
self.assertEqual(expected_value, gotten_event.value[i])
def testHealthPills(self):
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
gen.AddHealthPill(13371337, 41, 'Add', 0, range(1, 13))
gen.AddHealthPill(13381338, 42, 'Add', 1, range(42, 54))
acc = ea.EventAccumulator(gen)
acc.Reload()
# Retrieve the health pills for each node name.
gotten_events = acc.HealthPills('Add')
self.assertEquals(2, len(gotten_events))
self._compareHealthPills(
ea.HealthPillEvent(
wall_time=13371337,
step=41,
node_name='Add',
output_slot=0,
value=range(1, 13)),
gotten_events[0])
self._compareHealthPills(
ea.HealthPillEvent(
wall_time=13381338,
step=42,
node_name='Add',
output_slot=1,
value=range(42, 54)),
gotten_events[1])
def testHistograms(self):
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
val1 = ea.HistogramValue(
min=1,
max=2,
num=3,
sum=4,
sum_squares=5,
bucket_limit=[1, 2, 3],
bucket=[0, 3, 0])
val2 = ea.HistogramValue(
min=-2,
max=3,
num=4,
sum=5,
sum_squares=6,
bucket_limit=[2, 3, 4],
bucket=[1, 3, 0])
hst1 = ea.HistogramEvent(wall_time=1, step=10, histogram_value=val1)
hst2 = ea.HistogramEvent(wall_time=2, step=12, histogram_value=val2)
gen.AddHistogram(
'hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram(
'hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
self.assertEqual(acc.Histograms('hst1'), [hst1])
self.assertEqual(acc.Histograms('hst2'), [hst2])
def testCompressedHistograms(self):
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen, compression_bps=(0, 2500, 5000, 7500, 10000))
gen.AddHistogram(
'hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram(
'hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
# Create the expected values after compressing hst1
expected_vals1 = [
ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, 1.0), (2500, 1.25), (5000, 1.5), (7500, 1.75
), (10000, 2.0)]
]
expected_cmphst1 = ea.CompressedHistogramEvent(
wall_time=1, step=10, compressed_histogram_values=expected_vals1)
self.assertEqual(acc.CompressedHistograms('hst1'), [expected_cmphst1])
# Create the expected values after compressing hst2
expected_vals2 = [
ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, -2),
(2500, 2),
(5000, 2 + 1 / 3),
(7500, 2 + 2 / 3),
(10000, 3)]
]
expected_cmphst2 = ea.CompressedHistogramEvent(
wall_time=2, step=12, compressed_histogram_values=expected_vals2)
self.assertEqual(acc.CompressedHistograms('hst2'), [expected_cmphst2])
def testCompressedHistogramsWithEmptyHistogram(self):
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen, compression_bps=(0, 2500, 5000, 7500, 10000))
gen.AddHistogram(
'hst1',
wall_time=1,
step=10,
hmin=None,
hmax=None,
hnum=0,
hsum=0,
hsum_squares=0,
hbucket_limit=[1, 2, 3],
hbucket=[0, 0, 0])
acc.Reload()
# Create the expected values after compressing hst1
expected_vals1 = [
ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, 0.0), (2500, 0), (5000, 0), (7500, 0), (10000, 0)]
]
expected_cmphst1 = ea.CompressedHistogramEvent(
wall_time=1, step=10, compressed_histogram_values=expected_vals1)
self.assertEqual(acc.CompressedHistograms('hst1'), [expected_cmphst1])
def testCompressHistogram_uglyHistogram(self):
bps = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
histogram_values = ea.HistogramValue(
min=0.0,
max=1.0,
num=960.0,
sum=64.0,
sum_squares=64.0,
bucket_limit=[
0.0, 1e-12, 0.917246389039776, 1.0089710279437536,
1.7976931348623157e+308
],
bucket=[0.0, 896.0, 0.0, 64.0, 0.0])
histogram_event = ea.HistogramEvent(0, 0, histogram_values)
compressed_event = ea._CompressHistogram(histogram_event, bps)
vals = compressed_event.compressed_histogram_values
self.assertEquals(tuple(v.basis_point for v in vals), bps)
self.assertAlmostEqual(vals[0].value, 0.0)
self.assertAlmostEqual(vals[1].value, 7.157142857142856e-14)
self.assertAlmostEqual(vals[2].value, 1.7003571428571426e-13)
self.assertAlmostEqual(vals[3].value, 3.305357142857143e-13)
self.assertAlmostEqual(vals[4].value, 5.357142857142857e-13)
self.assertAlmostEqual(vals[5].value, 7.408928571428571e-13)
self.assertAlmostEqual(vals[6].value, 9.013928571428571e-13)
self.assertAlmostEqual(vals[7].value, 9.998571428571429e-13)
self.assertAlmostEqual(vals[8].value, 1.0)
def testImages(self):
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
im1 = ea.ImageEvent(
wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
im2 = ea.ImageEvent(
wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
gen.AddImage(
'im1',
wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
gen.AddImage(
'im2',
wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
acc.Reload()
self.assertEqual(acc.Images('im1'), [im1])
self.assertEqual(acc.Images('im2'), [im2])
def testAudio(self):
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
snd1 = ea.AudioEvent(
wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
snd2 = ea.AudioEvent(
wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
gen.AddAudio(
'snd1',
wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
gen.AddAudio(
'snd2',
wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
acc.Reload()
self.assertEqual(acc.Audio('snd1'), [snd1])
self.assertEqual(acc.Audio('snd2'), [snd2])
def testKeyError(self):
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
acc.Reload()
with self.assertRaises(KeyError):
acc.Scalars('s1')
with self.assertRaises(KeyError):
acc.Scalars('hst1')
with self.assertRaises(KeyError):
acc.Scalars('im1')
with self.assertRaises(KeyError):
acc.Histograms('s1')
with self.assertRaises(KeyError):
acc.Histograms('im1')
with self.assertRaises(KeyError):
acc.Images('s1')
with self.assertRaises(KeyError):
acc.Images('hst1')
with self.assertRaises(KeyError):
acc.Audio('s1')
with self.assertRaises(KeyError):
acc.Audio('hst1')
def testNonValueEvents(self):
"""Tests that non-value events in the generator don't cause early exits."""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=10, value=20)
gen.AddEvent(event_pb2.Event(wall_time=2, step=20, file_version='nots2'))
gen.AddScalar('s3', wall_time=3, step=100, value=1)
gen.AddHistogram('hst1')
gen.AddImage('im1')
gen.AddAudio('snd1')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1'],
ea.AUDIO: ['snd1'],
ea.SCALARS: ['s1', 's3'],
ea.HISTOGRAMS: ['hst1'],
ea.COMPRESSED_HISTOGRAMS: ['hst1'],
})
def testExpiredDataDiscardedAfterRestartForFileVersionLessThan2(self):
"""Tests that events are discarded after a restart is detected.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items with the same tag
that are outdated.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
gen.AddEvent(
event_pb2.Event(
wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
def testOrphanedDataNotDiscardedIfFlagUnset(self):
"""Tests that events are not discarded if purge_orphaned_data is false.
"""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen, purge_orphaned_data=False)
gen.AddEvent(
event_pb2.Event(
wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')],
[100, 200, 300, 101, 201, 301])
def testEventsDiscardedPerTagAfterRestartForFileVersionLessThan2(self):
"""Tests that event discards after restart, only affect the misordered tag.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items that are outdated, but
only for the out of order tag. Other tags should remain unaffected.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
gen.AddEvent(
event_pb2.Event(
wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
gen.AddScalar('s2', wall_time=1, step=101, value=20)
gen.AddScalar('s2', wall_time=1, step=201, value=20)
gen.AddScalar('s2', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
## Check that s1 discards do not affect s2
## i.e. check that only events from the out of order tag are discarded
self.assertEqual([x.step for x in acc.Scalars('s2')], [101, 201, 301])
def testOnlySummaryEventsTriggerDiscards(self):
"""Test that file version event does not trigger data purge."""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=100, value=20)
ev1 = event_pb2.Event(wall_time=2, step=0, file_version='brain.Event:1')
graph_bytes = graph_pb2.GraphDef().SerializeToString()
ev2 = event_pb2.Event(wall_time=3, step=0, graph_def=graph_bytes)
gen.AddEvent(ev1)
gen.AddEvent(ev2)
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100])
def testSessionLogStartMessageDiscardsExpiredEvents(self):
"""Test that SessionLog.START message discards expired events.
This discard logic is preferred over the out-of-order step discard logic,
but this logic can only be used for event protos which have the SessionLog
enum, which was introduced to event.proto for file_version >= brain.Event:2.
"""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
gen.AddEvent(
event_pb2.Event(
wall_time=0, step=1, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=400, value=20)
gen.AddScalar('s2', wall_time=1, step=202, value=20)
gen.AddScalar('s2', wall_time=1, step=203, value=20)
slog = event_pb2.SessionLog(status=event_pb2.SessionLog.START)
gen.AddEvent(event_pb2.Event(wall_time=2, step=201, session_log=slog))
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200])
self.assertEqual([x.step for x in acc.Scalars('s2')], [])
def testFirstEventTimestamp(self):
"""Test that FirstEventTimestamp() returns wall_time of the first event."""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
gen.AddEvent(
event_pb2.Event(
wall_time=10, step=20, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=30, step=40, value=20)
self.assertEqual(acc.FirstEventTimestamp(), 10)
def testReloadPopulatesFirstEventTimestamp(self):
"""Test that Reload() means FirstEventTimestamp() won't load events."""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
gen.AddEvent(
event_pb2.Event(
wall_time=1, step=2, file_version='brain.Event:2'))
acc.Reload()
def _Die(*args, **kwargs): # pylint: disable=unused-argument
raise RuntimeError('Load() should not be called')
self.stubs.Set(gen, 'Load', _Die)
self.assertEqual(acc.FirstEventTimestamp(), 1)
def testFirstEventTimestampLoadsEvent(self):
"""Test that FirstEventTimestamp() doesn't discard the loaded event."""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
gen.AddEvent(
event_pb2.Event(
wall_time=1, step=2, file_version='brain.Event:2'))
self.assertEqual(acc.FirstEventTimestamp(), 1)
acc.Reload()
self.assertEqual(acc.file_version, 2.0)
def testTFSummaryScalar(self):
"""Verify processing of tf.summary.scalar."""
event_sink = _EventGenerator(self, zero_out_timestamps=True)
writer = SummaryToEventTransformer(event_sink)
with self.test_session() as sess:
ipt = array_ops.placeholder(dtypes.float32)
summary_lib.scalar('scalar1', ipt)
summary_lib.scalar('scalar2', ipt * ipt)
merged = summary_lib.merge_all()
writer.add_graph(sess.graph)
for i in xrange(10):
summ = sess.run(merged, feed_dict={ipt: i})
writer.add_summary(summ, global_step=i)
accumulator = ea.EventAccumulator(event_sink)
accumulator.Reload()
seq1 = [ea.ScalarEvent(wall_time=0, step=i, value=i) for i in xrange(10)]
seq2 = [
ea.ScalarEvent(
wall_time=0, step=i, value=i * i) for i in xrange(10)
]
self.assertTagsEqual(accumulator.Tags(), {
ea.SCALARS: ['scalar1', 'scalar2'],
ea.GRAPH: True,
ea.META_GRAPH: False,
})
self.assertEqual(accumulator.Scalars('scalar1'), seq1)
self.assertEqual(accumulator.Scalars('scalar2'), seq2)
first_value = accumulator.Scalars('scalar1')[0].value
self.assertTrue(isinstance(first_value, float))
def testTFSummaryImage(self):
"""Verify processing of tf.summary.image."""
event_sink = _EventGenerator(self, zero_out_timestamps=True)
writer = SummaryToEventTransformer(event_sink)
with self.test_session() as sess:
ipt = array_ops.ones([10, 4, 4, 3], dtypes.uint8)
# This is an interesting example, because the old tf.image_summary op
# would throw an error here, because it would be tag reuse.
# Using the tf node name instead allows argument re-use to the image
# summary.
with ops.name_scope('1'):
summary_lib.image('images', ipt, max_outputs=1)
with ops.name_scope('2'):
summary_lib.image('images', ipt, max_outputs=2)
with ops.name_scope('3'):
summary_lib.image('images', ipt, max_outputs=3)
merged = summary_lib.merge_all()
writer.add_graph(sess.graph)
for i in xrange(10):
summ = sess.run(merged)
writer.add_summary(summ, global_step=i)
accumulator = ea.EventAccumulator(event_sink)
accumulator.Reload()
tags = [
u'1/images/image', u'2/images/image/0', u'2/images/image/1',
u'3/images/image/0', u'3/images/image/1', u'3/images/image/2'
]
self.assertTagsEqual(accumulator.Tags(), {
ea.IMAGES: tags,
ea.GRAPH: True,
ea.META_GRAPH: False,
})
def testTFSummaryTensor(self):
"""Verify processing of tf.summary.tensor."""
event_sink = _EventGenerator(self, zero_out_timestamps=True)
writer = SummaryToEventTransformer(event_sink)
with self.test_session() as sess:
summary_lib.tensor_summary('scalar', constant_op.constant(1.0))
summary_lib.tensor_summary('vector', constant_op.constant(
[1.0, 2.0, 3.0]))
summary_lib.tensor_summary('string',
constant_op.constant(six.b('foobar')))
merged = summary_lib.merge_all()
summ = sess.run(merged)
writer.add_summary(summ, 0)
accumulator = ea.EventAccumulator(event_sink)
accumulator.Reload()
self.assertTagsEqual(accumulator.Tags(), {
ea.TENSORS: ['scalar', 'vector', 'string'],
})
scalar_proto = accumulator.Tensors('scalar')[0].tensor_proto
scalar = tensor_util.MakeNdarray(scalar_proto)
vector_proto = accumulator.Tensors('vector')[0].tensor_proto
vector = tensor_util.MakeNdarray(vector_proto)
string_proto = accumulator.Tensors('string')[0].tensor_proto
string = tensor_util.MakeNdarray(string_proto)
self.assertTrue(np.array_equal(scalar, 1.0))
self.assertTrue(np.array_equal(vector, [1.0, 2.0, 3.0]))
self.assertTrue(np.array_equal(string, six.b('foobar')))
class RealisticEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(RealisticEventAccumulatorTest, self).setUp()
def testScalarsRealistically(self):
"""Test accumulator by writing values and then reading them."""
def FakeScalarSummary(tag, value):
value = summary_pb2.Summary.Value(tag=tag, simple_value=value)
summary = summary_pb2.Summary(value=[value])
return summary
directory = os.path.join(self.get_temp_dir(), 'values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = writer_lib.FileWriter(directory, max_queue=100)
with ops.Graph().as_default() as graph:
_ = constant_op.constant([2.0, 1.0])
# Add a graph to the summary writer.
writer.add_graph(graph)
meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True))
writer.add_meta_graph(meta_graph_def)
run_metadata = config_pb2.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# Write a bunch of events using the writer.
for i in xrange(30):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.SCALARS: ['id', 'sq'],
ea.GRAPH: True,
ea.META_GRAPH: True,
ea.RUN_METADATA: ['test run'],
})
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(30, len(id_events))
self.assertEqual(30, len(sq_events))
for i in xrange(30):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
# Write a few more events to test incremental reloading
for i in xrange(30, 40):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify we can now see all of the data
acc.Reload()
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(40, len(id_events))
self.assertEqual(40, len(sq_events))
for i in xrange(40):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
def testGraphFromMetaGraphBecomesAvailable(self):
"""Test accumulator by writing values and then reading them."""
directory = os.path.join(self.get_temp_dir(), 'metagraph_test_values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = writer_lib.FileWriter(directory, max_queue=100)
with ops.Graph().as_default() as graph:
_ = constant_op.constant([2.0, 1.0])
# Add a graph to the summary writer.
meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True))
writer.add_meta_graph(meta_graph_def)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.GRAPH: True,
ea.META_GRAPH: True,
})
self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
if __name__ == '__main__':
test.main()
|
|
"""
Copyright (c) 2020 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import copy
import io
import os
import pathlib
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap
import pytest
import responses
from flexmock import flexmock
import atomic_reactor.util
from atomic_reactor.constants import INSPECT_CONFIG, PLUGIN_PIN_OPERATOR_DIGESTS_KEY
from atomic_reactor.plugin import PluginFailedException
from atomic_reactor.plugins.build_orchestrate_build import (OrchestrateBuildPlugin,
WORKSPACE_KEY_OVERRIDE_KWARGS)
from atomic_reactor.plugins.pre_pin_operator_digest import (PinOperatorDigestsPlugin,
PullspecReplacer)
from tests.util import OPERATOR_MANIFESTS_DIR
from osbs.exceptions import OsbsValidationException
from osbs.utils import ImageName
from tests.stubs import StubConfig
from tests.mock_env import MockEnv
PKG_LABEL = 'com.redhat.component'
PKG_NAME = 'test-package'
yaml = YAML()
# When defining mock configuration for source_registry/pull_registries,
# do not use auth unless you also want to mock a dockercfg file
SOURCE_REGISTRY_URI = 'registry.private.example.com'
SOURCE_REGISTRY = {
'url': 'https://{}'.format(SOURCE_REGISTRY_URI),
}
pytestmark = pytest.mark.usefixtures('user_params')
def mock_dockerfile(repo_dir, base='scratch', operator_bundle_label=True):
dockerfile = (
'FROM {base}\n'
'LABEL {component_label}={component_value}\n'
'LABEL com.redhat.delivery.operator.bundle={label_value}\n'
).format(base=base, component_label=PKG_LABEL, component_value=PKG_NAME,
label_value=operator_bundle_label)
repo_dir.join('Dockerfile').write(dockerfile)
def make_reactor_config(operators_config):
config = {
'version': 1,
'source_registry': SOURCE_REGISTRY
}
if operators_config:
config['operator_manifests'] = operators_config
return config
def make_user_config(operator_config):
config = StubConfig()
setattr(config, 'operator_manifests', operator_config)
return config
def mock_env(docker_tasker, repo_dir, orchestrator,
user_config=None, site_config=None,
df_base='scratch', df_operator_label=True,
replacement_pullspecs=None, add_to_config=None,
write_container_yaml=True, operator_csv_modifications_url=None):
"""
Mock environment for test
:param docker_tasker: conftest fixture
:param repo_dir: pylint fixture,
:type repo_dir: py.path.LocalPath
:param orchestrator: is the plugin running in orchestrator?
:param user_config: container.yaml operator_manifest config
:param site_config: reactor-config-map operator_manifests config
:param df_base: base image in Dockerfile, non-scratch should fail
:param df_operator_label: presence of operator manifest bundle label
:param replacement_pullspecs: plugin argument from osbs-client
:param operator_csv_modifications_url: plugin argument from osbs-client
:return: configured plugin runner
"""
reactor_config = make_reactor_config(site_config)
if add_to_config:
reactor_config.update(add_to_config)
env = (
MockEnv()
.for_plugin(
'prebuild',
PinOperatorDigestsPlugin.key,
{
'replacement_pullspecs': replacement_pullspecs,
'operator_csv_modifications_url': operator_csv_modifications_url,
})
.set_reactor_config(reactor_config))
if orchestrator:
env.make_orchestrator()
if write_container_yaml:
with open(str(repo_dir.join('container.yaml')), 'w') as f:
yaml.dump({'operator_manifests': user_config}, stream=f)
mock_dockerfile(repo_dir, df_base, df_operator_label)
from atomic_reactor.source import PathSource
env.workflow.source = PathSource('path', str(repo_dir))
# NOTE: is this path correct?
env.workflow.builder.set_df_path(str(repo_dir))
return env.create_runner(docker_tasker)
def mock_operator_csv(tmpdir, filename, pullspecs, for_ocp_44=False,
with_related_images=False, with_related_image_envs=False):
path = tmpdir.join(filename)
containers = [
# utils.operator adds related images as ordered dicts
# ("name" first, "image" second) - make sure order matches here
CommentedMap([('name', 'foo-{}'.format(i + 1)), ('image', image)])
for i, image in enumerate(pullspecs)
]
# Add a random RELATED_IMAGE env var, only for testing
# relatedImages vs. RELATED_IMAGE_* conflicts
if with_related_image_envs:
containers[0]['env'] = [{'name': 'RELATED_IMAGE_XYZ', 'value': 'xyz'}]
data = {
'kind': 'ClusterServiceVersion',
'metadata': {},
'spec': {
'relatedImages': [],
# It does not really matter where in the CSV these pullspecs go
# as long as utils.operator is known to work properly, just do not
# put them in relatedImages because those get special handling
'install': {
'spec': {
'deployments': [
{
'spec': {
'template': {
'spec': {
'containers': containers
}
}
}
}
]
}
}
}
}
# To test OCP 4.4 workaround, also add pullspecs under a random key which
# is not normally considered a pullspec location
if for_ocp_44:
data['foo'] = pullspecs
# To mock what the file should look like after relatedImages are updated,
# add pullspecs also under .spec.relatedImages
if with_related_images:
# deepcopy the containers list to prevent ruamel.yaml from being overly
# clever and using YAML anchors to refer to the same objects
data['spec']['relatedImages'] = copy.deepcopy(containers)
with open(str(path), 'w') as f:
yaml.dump(data, f)
return path
def mock_package_mapping_files(repo_replacements):
repo_replacements = repo_replacements or {}
# create unique url for each registry, mock responses, update mapping to point to urls
for registry, mapping in repo_replacements.items():
url = 'https://somewhere.net/mapping-{}.yaml'.format(registry)
# ruamel.yaml does not support dumping to str, use an io stream
# on python2, it also does not support writing to a StringIO stream, use BytesIO
f = io.BytesIO()
yaml.dump(mapping, f)
f.seek(0)
responses.add(responses.GET, url, body=f.read().decode('utf-8'))
repo_replacements[registry] = url
return repo_replacements
def mock_digest_query(image_digest_map):
updated_map = {
ImageName.parse(pullspec).to_str(): digest
for pullspec, digest in image_digest_map.items()
}
def mocked_get_manifest_list_digest(image):
return updated_map[image.to_str()]
(flexmock(atomic_reactor.util.RegistryClient)
.should_receive('get_manifest_list_digest')
.replace_with(mocked_get_manifest_list_digest))
def mock_inspect_query(pullspec, labels, times=1):
image = ImageName.parse(pullspec)
inspect = {
INSPECT_CONFIG: {
'Labels': labels
}
}
(flexmock(atomic_reactor.util.RegistryClient)
.should_receive('get_inspect_for_image')
.with_args(image)
.and_return(inspect)
.times(times))
def get_build_kwarg(workflow, k, platform=None):
"""
Get build-kwarg override
"""
key = OrchestrateBuildPlugin.key
workspace = workflow.plugin_workspace.get(key, {})
override_kwargs = workspace.get(WORKSPACE_KEY_OVERRIDE_KWARGS, {})
return override_kwargs.get(platform, {}).get(k)
def get_site_config(allowed_registries=None, registry_post_replace=None, repo_replacements=None,
skip_all_allow_list=None):
registry_post_replace = registry_post_replace or {}
repo_replacements = repo_replacements or {}
skip_allow_list = skip_all_allow_list or []
return {
'allowed_registries': allowed_registries,
'registry_post_replace': [
{'old': old, 'new': new} for old, new in registry_post_replace.items()
],
'repo_replacements': [
{'registry': registry, 'package_mappings_url': path}
for registry, path in repo_replacements.items()
],
'skip_all_allow_list': [package for package in skip_allow_list]
}
def get_user_config(manifests_dir, repo_replacements=None, enable_digest_pinning=True,
enable_repo_replacements=True, enable_registry_replacements=True,
skip_all=False):
repo_replacements = repo_replacements or {}
return {
'manifests_dir': manifests_dir,
'repo_replacements': [
{'registry': registry, 'package_mappings': mapping}
for registry, mapping in repo_replacements.items()
],
'enable_digest_pinning': enable_digest_pinning,
'enable_repo_replacements': enable_repo_replacements,
'enable_registry_replacements': enable_registry_replacements,
'skip_all': skip_all,
}
class TestPinOperatorDigest(object):
def _get_worker_arg(self, workflow):
return get_build_kwarg(workflow, "operator_bundle_replacement_pullspecs")
@pytest.mark.parametrize('orchestrator', [True, False])
def test_run_only_for_operator_bundle_label(self, orchestrator,
docker_tasker, tmpdir, caplog):
runner = mock_env(docker_tasker, tmpdir,
orchestrator=orchestrator,
df_operator_label=False,
write_container_yaml=False)
runner.run()
assert "Not an operator manifest bundle build, skipping plugin" in caplog.text
@pytest.mark.parametrize('orchestrator', [True, False])
def test_missing_site_config(self, orchestrator, docker_tasker, tmpdir, caplog):
runner = mock_env(docker_tasker, tmpdir,
orchestrator=orchestrator,
write_container_yaml=False)
runner.run()
msg = "operator_manifests configuration missing in reactor config map, aborting"
assert msg in caplog.text
assert "Looking for operator CSV files" not in caplog.text
@pytest.mark.parametrize('orchestrator', [True, False])
def test_missing_user_config(self, orchestrator, docker_tasker, tmpdir):
# make sure plugin is not skipped because of missing site config
site_config = get_site_config()
runner = mock_env(docker_tasker, tmpdir,
orchestrator=orchestrator,
site_config=site_config,
write_container_yaml=False)
with pytest.raises(PluginFailedException) as exc_info:
runner.run()
msg = "operator_manifests configuration missing in container.yaml"
assert msg in str(exc_info.value)
# FIXME: !!!
@pytest.mark.parametrize('orchestrator', [True, False])
@pytest.mark.parametrize('manifests_dir, symlinks', [
('foo', {'foo': '/tmp/foo'}),
])
def test_manifests_dir_not_subdir_of_repo(self, manifests_dir, symlinks,
orchestrator, docker_tasker, tmpdir):
# make sure plugin is not skipped because of missing site config
site_config = get_site_config()
user_config = get_user_config(manifests_dir)
runner = mock_env(docker_tasker, tmpdir, orchestrator,
site_config=site_config,
user_config=user_config)
# make symlinks
for rel_dest, src in (symlinks or {}).items():
dest = os.path.join(runner.workflow.source.path, rel_dest)
pathlib.Path(src).mkdir(exist_ok=True)
os.symlink(src, str(dest))
with pytest.raises(PluginFailedException) as exc_info:
runner.run()
assert "manifests_dir points outside of cloned repo" in str(exc_info.value)
@pytest.mark.parametrize('filepaths', [
['csv1.yaml'],
['csv2.yaml'],
['csv1.yaml', 'csv2.yaml']
])
@pytest.mark.parametrize('skip_all', [True, False])
def test_orchestrator_no_pullspecs(self, docker_tasker, tmpdir, caplog, filepaths, skip_all):
manifests_dir = tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir()
for path in filepaths:
mock_operator_csv(manifests_dir, path, [])
user_config = get_user_config(OPERATOR_MANIFESTS_DIR, skip_all=skip_all)
site_config = get_site_config(skip_all_allow_list=[PKG_NAME])
runner = mock_env(docker_tasker, tmpdir, orchestrator=True,
user_config=user_config, site_config=site_config)
if len(filepaths) > 1:
with pytest.raises(PluginFailedException) as exc_info:
runner.run()
msg = "Operator bundle may contain only 1 CSV file, but contains more:"
assert msg in str(exc_info.value)
return
result = runner.run()
caplog_text = "\n".join(rec.message for rec in caplog.records)
source_path = runner.workflow.source.path
assert f"Looking for operator CSV files in {source_path}" in caplog_text
assert "Found operator CSV file:" in caplog_text
csv_files = [
os.path.join(runner.workflow.source.manifests_dir, path)
for path in filepaths
]
for f in csv_files:
assert str(f) in caplog_text
assert "No pullspecs found" in caplog_text
assert self._get_worker_arg(runner.workflow) is None
expected = {
'related_images': {
'pullspecs': [],
'created_by_osbs': False,
}
}
assert result['pin_operator_digest'] == expected
@pytest.mark.parametrize('orchestrator', [True, False])
def test_fail_without_csv(self, docker_tasker, tmpdir, orchestrator):
"""CSV file is mandatory part of operator, fail if it's not present"""
tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir()
user_config = get_user_config(OPERATOR_MANIFESTS_DIR)
site_config = get_site_config()
runner = mock_env(docker_tasker, tmpdir, orchestrator=orchestrator,
user_config=user_config, site_config=site_config)
with pytest.raises(PluginFailedException) as exc_info:
runner.run()
assert "Missing ClusterServiceVersion in operator manifests" in str(exc_info.value)
def test_orchestrator_disallowed_registry(self, docker_tasker, tmpdir):
# TODO: ImageName parses x/y as namespace/repo and not registry/repo - does it matter?
pullspecs = ['allowed-registry/ns/foo:1', 'disallowed-registry/ns/bar:2']
mock_operator_csv(tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir(), 'csv.yaml', pullspecs)
user_config = get_user_config(OPERATOR_MANIFESTS_DIR)
site_config = get_site_config(allowed_registries=['allowed-registry'])
runner = mock_env(docker_tasker, tmpdir, orchestrator=True,
user_config=user_config, site_config=site_config)
with pytest.raises(PluginFailedException) as exc_info:
runner.run()
msg = "Registry not allowed: disallowed-registry (in disallowed-registry/ns/bar:2)"
assert msg in str(exc_info.value)
def test_orchestrator_raise_error_if_csv_has_both_related_images_and_related_env_vars(
self, docker_tasker, tmpdir, caplog
):
csv = mock_operator_csv(tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir(),
'csv.yaml', ['foo'],
with_related_images=True,
with_related_image_envs=True)
user_config = get_user_config(OPERATOR_MANIFESTS_DIR)
site_config = get_site_config()
runner = mock_env(docker_tasker, tmpdir, orchestrator=True,
user_config=user_config, site_config=site_config)
with pytest.raises(PluginFailedException) as exc_info:
runner.run()
csv = os.path.join(runner.workflow.source.manifests_dir, csv.basename)
expected = (
f"Both relatedImages and RELATED_IMAGE_* env vars present in {csv}. "
f"Please remove the relatedImages section, it will be reconstructed "
f"automatically."
)
assert expected in str(exc_info.value)
@responses.activate
def test_orchestrator(self, docker_tasker, tmpdir, caplog):
pullspecs = [
# registry.private.example.com: do not replace registry or repos
'registry.private.example.com/ns/foo@sha256:1', # -> no change
'registry.private.example.com/ns/foo:1',
# -> registry.private.example.com/ns/foo@sha256:1
# weird-registry: keep registry but replace repos
'weird-registry/ns/bar@sha256:2', # -> weird-registry/new-bar@sha256:2
'weird-registry/ns/bar:1', # -> weird-registry/new-bar@sha256:2
# private-registry: replace registry but keep repos
'private-registry/ns/baz@sha256:3', # -> public-registry/ns/baz@sha256:3
'private-registry/ns/baz:1', # -> public-registry/ns/baz@sha256:3
# old-registry: replace everything
'old-registry/ns/spam@sha256:4', # -> new-registry/new-ns/new-spam@sha256:4
'old-registry/ns/spam:1', # -> new-registry/new-ns/new-spam@sha256:4
]
replacement_registries = {
'private-registry': 'public-registry',
'old-registry': 'new-registry',
}
site_replace_repos = {
'old-registry': {
'spam-package': ['new-ns/new-spam']
}
}
user_replace_repos = {
'weird-registry': {
'bar-package': 'new-bar'
}
}
mock_digest_query({
'registry.private.example.com/ns/foo:1': 'sha256:1',
'weird-registry/ns/bar:1': 'sha256:2',
'private-registry/ns/baz:1': 'sha256:3',
'old-registry/ns/spam:1': 'sha256:4',
})
# there should be no queries for the pullspecs which already contain a digest
# images should be inspected after their digests are pinned
mock_inspect_query('weird-registry/ns/bar@sha256:2', {PKG_LABEL: 'bar-package'}, times=2)
mock_inspect_query('old-registry/ns/spam@sha256:4', {PKG_LABEL: 'spam-package'}, times=2)
f = mock_operator_csv(tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir(), 'csv.yaml', pullspecs)
pre_content = f.read()
mock_package_mapping_files(site_replace_repos)
user_config = get_user_config(manifests_dir=OPERATOR_MANIFESTS_DIR,
repo_replacements=user_replace_repos)
site_config = get_site_config(registry_post_replace=replacement_registries,
repo_replacements=site_replace_repos)
pull_registries = {'pull_registries': [
{'url': 'https://old-registry'},
{'url': 'https://private-registry'},
{'url': 'https://weird-registry'},
]}
runner = mock_env(docker_tasker, tmpdir, orchestrator=True,
user_config=user_config, site_config=site_config,
add_to_config=pull_registries)
result = runner.run()
post_content = f.read()
assert pre_content == post_content # worker does the replacement, not orchestrator
caplog_text = "\n".join(rec.message for rec in caplog.records)
# pullspecs are logged in alphabetical order, if tag is missing, :latest is added
pullspecs_log = (
'Found pullspecs:\n'
'old-registry/ns/spam:1\n'
'old-registry/ns/spam@sha256:4\n'
'private-registry/ns/baz:1\n'
'private-registry/ns/baz@sha256:3\n'
'registry.private.example.com/ns/foo:1\n'
'registry.private.example.com/ns/foo@sha256:1\n'
'weird-registry/ns/bar:1\n'
'weird-registry/ns/bar@sha256:2'
)
assert pullspecs_log in caplog_text
assert "Computing replacement pullspecs" in caplog_text
# replacements are logged in alphabetical order (ordered by the original pullspec)
replacements_log = (
'To be replaced:\n'
'old-registry/ns/spam:1 -> new-registry/new-ns/new-spam@sha256:4\n'
'old-registry/ns/spam@sha256:4 -> new-registry/new-ns/new-spam@sha256:4\n'
'private-registry/ns/baz:1 -> public-registry/ns/baz@sha256:3\n'
'private-registry/ns/baz@sha256:3 -> public-registry/ns/baz@sha256:3\n'
'registry.private.example.com/ns/foo:1 -> '
'registry.private.example.com/ns/foo@sha256:1\n'
'registry.private.example.com/ns/foo@sha256:1 - no change\n'
'weird-registry/ns/bar:1 -> weird-registry/new-bar@sha256:2\n'
'weird-registry/ns/bar@sha256:2 -> weird-registry/new-bar@sha256:2'
)
assert replacements_log in caplog_text
replacement_pullspecs = {
'registry.private.example.com/ns/foo:1': 'registry.private.example.com/ns/foo@sha256:1',
# registry.private.example.com/ns/foo@sha256:1 - no change
'weird-registry/ns/bar@sha256:2': 'weird-registry/new-bar@sha256:2',
'weird-registry/ns/bar:1': 'weird-registry/new-bar@sha256:2',
'private-registry/ns/baz@sha256:3': 'public-registry/ns/baz@sha256:3',
'private-registry/ns/baz:1': 'public-registry/ns/baz@sha256:3',
'old-registry/ns/spam@sha256:4': 'new-registry/new-ns/new-spam@sha256:4',
'old-registry/ns/spam:1': 'new-registry/new-ns/new-spam@sha256:4',
}
assert self._get_worker_arg(runner.workflow) == replacement_pullspecs
expected_result = {
'related_images': {
'pullspecs': [
{
'original': ImageName.parse('old-registry/ns/spam:1'),
'new': ImageName.parse('new-registry/new-ns/new-spam@sha256:4'),
'pinned': True,
'replaced': True
}, {
'original': ImageName.parse('old-registry/ns/spam@sha256:4'),
'new': ImageName.parse('new-registry/new-ns/new-spam@sha256:4'),
'pinned': False,
'replaced': True
}, {
'original': ImageName.parse('private-registry/ns/baz:1'),
'new': ImageName.parse('public-registry/ns/baz@sha256:3'),
'pinned': True,
'replaced': True
}, {
'original': ImageName.parse('private-registry/ns/baz@sha256:3'),
'new': ImageName.parse('public-registry/ns/baz@sha256:3'),
'pinned': False,
'replaced': True
}, {
'original': ImageName.parse('registry.private.example.com/ns/foo:1'),
'new': ImageName.parse('registry.private.example.com/ns/foo@sha256:1'),
'pinned': True,
'replaced': True
}, {
'original': ImageName.parse('registry.private.example.com/ns/foo@sha256:1'),
'new': ImageName.parse('registry.private.example.com/ns/foo@sha256:1'),
'pinned': False,
'replaced': False
}, {
'original': ImageName.parse('weird-registry/ns/bar:1'),
'new': ImageName(
registry='weird-registry', repo='new-bar', tag='sha256:2'),
'pinned': True,
'replaced': True
}, {
'original': ImageName.parse('weird-registry/ns/bar@sha256:2'),
'new': ImageName(
registry='weird-registry', repo='new-bar', tag='sha256:2'),
'pinned': False,
'replaced': True
},
],
'created_by_osbs': True,
}
}
assert result['pin_operator_digest'] == expected_result
@pytest.mark.parametrize('pin_digest', [True, False])
@pytest.mark.parametrize('replace_repo', [True, False])
@pytest.mark.parametrize('replace_registry', [True, False])
def test_orchestrator_replacement_opt_out(self, pin_digest, replace_repo, replace_registry,
docker_tasker, tmpdir, caplog):
original = '{}/ns/foo:1'.format(SOURCE_REGISTRY_URI)
replaced = ImageName.parse(original)
mock_operator_csv(tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir(), 'csv.yaml', [original])
if pin_digest:
replaced.tag = 'sha256:123456'
mock_digest_query({original: 'sha256:123456'})
if replace_repo:
replaced.namespace = 'new-ns'
replaced.repo = 'new-foo'
user_replace_repos = {
SOURCE_REGISTRY_URI: {
'foo-package': 'new-ns/new-foo'
}
}
query_image = ImageName.parse(original)
if pin_digest:
# inspect query is done after pinning digest
query_image.tag = 'sha256:123456'
mock_inspect_query(query_image, {PKG_LABEL: 'foo-package'})
else:
user_replace_repos = None
if replace_registry:
replaced.registry = 'new-registry'
registry_post_replace = {SOURCE_REGISTRY_URI: 'new-registry'}
else:
registry_post_replace = None
user_config = get_user_config(manifests_dir=OPERATOR_MANIFESTS_DIR,
repo_replacements=user_replace_repos,
enable_digest_pinning=pin_digest,
enable_repo_replacements=replace_repo,
enable_registry_replacements=replace_registry)
site_config = get_site_config(registry_post_replace=registry_post_replace)
runner = mock_env(docker_tasker, tmpdir, orchestrator=True,
user_config=user_config, site_config=site_config)
result = runner.run()
if not pin_digest:
assert "User disabled digest pinning" in caplog.text
assert "Making sure tag is manifest list digest" not in caplog.text
if not replace_repo:
assert "User disabled repo replacements" in caplog.text
assert "Replacing namespace/repo" not in caplog.text
if not replace_registry:
assert "User disabled registry replacements" in caplog.text
assert "Replacing registry" not in caplog.text
if not any([pin_digest, replace_repo, replace_registry]):
assert "All replacement features disabled" in caplog.text
assert self._get_worker_arg(runner.workflow) == {}
else:
assert self._get_worker_arg(runner.workflow) == {original: replaced.to_str()}
# plugin must always retun pullspecs
assert result['pin_operator_digest']['related_images']['pullspecs']
@pytest.mark.parametrize('has_envs', [True, False])
def test_worker_exclude_csvs(self, docker_tasker, tmpdir, caplog, has_envs):
# Worker does not care if there is a conflict between relatedImages
# and RELATED_IMAGE_* env vars, orchestrator should have caught this already
csv = mock_operator_csv(tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir(), 'csv.yaml', ['foo'],
with_related_images=True,
with_related_image_envs=has_envs)
original_content = csv.read()
user_config = get_user_config(OPERATOR_MANIFESTS_DIR)
runner = mock_env(docker_tasker, tmpdir, orchestrator=False,
site_config=get_site_config(), user_config=user_config)
runner.run()
assert "Replacing pullspecs" not in caplog.text
assert "Creating relatedImages section" not in caplog.text
assert csv.read() == original_content
@pytest.mark.parametrize('ocp_44', [True, False])
def test_worker(self, ocp_44, docker_tasker, tmpdir, caplog):
pullspecs = [
'keep-registry/ns/foo',
'replace-registry/ns/bar:1',
'keep-registry/ns/spam@sha256:123456',
'replace-registry/ns/eggs@sha256:654321',
]
replacement_pullspecs = {
'keep-registry/ns/foo:latest': 'keep-registry/ns/foo@sha256:abcdef',
'replace-registry/ns/bar:1': 'new-registry/ns/bar@sha256:fedcba',
'replace-registry/ns/eggs@sha256:654321': 'new-registry/ns/eggs@sha256:654321',
}
replaced_pullspecs = [
'keep-registry/ns/foo@sha256:abcdef',
'new-registry/ns/bar@sha256:fedcba',
'keep-registry/ns/spam@sha256:123456',
'new-registry/ns/eggs@sha256:654321',
]
# manifests_dir = tmpdir.mkdir('manifests')
mock_operator_csv(tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir(),
'csv1.yaml', pullspecs,
for_ocp_44=ocp_44)
# this a reference file, make sure it does not get touched by putting it in parent dir
reference = mock_operator_csv(tmpdir, 'csv2.yaml', replaced_pullspecs,
for_ocp_44=ocp_44, with_related_images=True)
user_config = get_user_config(OPERATOR_MANIFESTS_DIR)
runner = mock_env(docker_tasker, tmpdir, orchestrator=False, site_config=get_site_config(),
user_config=user_config, replacement_pullspecs=replacement_pullspecs)
runner.run()
replaced_csv = os.path.join(runner.workflow.source.manifests_dir, 'csv1.yaml')
with open(replaced_csv, 'r') as f:
assert f.read() == reference.read()
caplog_text = "\n".join(rec.message for rec in caplog.records)
assert f'Found operator CSV file: {replaced_csv}' in caplog_text
assert str(reference) not in caplog_text
assert f'Replacing pullspecs in {replaced_csv}' in caplog_text
assert f'Creating relatedImages section in {replaced_csv}' in caplog_text
assert 'Replacing pullspecs in {}'.format(reference) not in caplog_text
assert 'Creating relatedImages section in {}'.format(reference) not in caplog_text
def test_return_pullspecs_in_related_images(self, docker_tasker, tmpdir):
"""
Ensure the pullspecs listed in spec.relatedImages are returned if a CSV
file has such a section
"""
pullspecs = [
'registry.r.c/project/foo@sha256:123456',
# Whatever the pullspec includes digest or tag, the pullspec inside spec.relatedImages
# should be returned directly without any change.
'registry.r.c/project/bar:20200901',
]
mock_operator_csv(tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir(), 'csv1.yaml', pullspecs,
with_related_images=True)
runner = mock_env(docker_tasker, tmpdir,
orchestrator=True,
user_config=get_user_config(OPERATOR_MANIFESTS_DIR),
site_config=get_site_config())
result = runner.run()
expected_result = [
{
'original': ImageName.parse(item),
'new': ImageName.parse(item),
'pinned': False,
'replaced': False
}
for item in pullspecs
]
got_pullspecs_metadata = result[PLUGIN_PIN_OPERATOR_DIGESTS_KEY]['related_images']
assert not got_pullspecs_metadata['created_by_osbs'], \
'Returning pullspecs inlcuded in spec.relatedImages directly. ' \
'Expected created_by_osbs is False.'
assert (
sorted(expected_result, key=str) ==
sorted(got_pullspecs_metadata['pullspecs'], key=str)
)
@pytest.mark.parametrize('orchestrator', [True, False])
@pytest.mark.parametrize('has_related_images', [True, False])
@pytest.mark.parametrize('pull_specs, has_related_image_envs', [
([], False),
(['foo'], True),
(['foo'], False),
])
@pytest.mark.parametrize('skip_all_allow_list', [None, [PKG_NAME]])
def test_skip_all(self, docker_tasker, tmpdir, caplog, orchestrator, has_related_images,
pull_specs, has_related_image_envs, skip_all_allow_list):
mock_operator_csv(tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir(), 'csv.yaml', pull_specs,
with_related_images=has_related_images,
with_related_image_envs=has_related_image_envs)
user_config = get_user_config(OPERATOR_MANIFESTS_DIR, skip_all=True)
runner = mock_env(docker_tasker, tmpdir, orchestrator=orchestrator,
site_config=get_site_config(skip_all_allow_list=skip_all_allow_list),
user_config=user_config)
has_skip_log_entry = True
if not skip_all_allow_list or (not has_related_images and pull_specs and orchestrator):
with pytest.raises(PluginFailedException) as exc_info:
runner.run()
if not skip_all_allow_list:
exc_msg = "Koji package: {} isn't allowed to use skip_all for " \
"operator bundles".format(PKG_NAME)
has_skip_log_entry = False
else:
exc_msg = "skip_all defined but relatedImages section doesn't exist"
assert exc_msg in str(exc_info.value)
else:
runner.run()
if has_skip_log_entry:
if orchestrator:
assert "skip_all defined for operator manifests" in caplog.text
else:
assert "skip_all defined, not running on worker" in caplog.text
class TestPullspecReplacer(object):
def mock_workflow(self, site_config):
return MockEnv().set_reactor_config(make_reactor_config(site_config)).workflow
@pytest.mark.parametrize('allowed_registries, image, allowed', [
(None, 'registry/ns/foo', True),
(['registry'], 'registry/ns/foo', True),
([], 'registry/ns/foo', False), # not actually allowed in schema, but sensible
(['other-registry'], 'registry/ns/foo', False),
])
def test_registry_is_allowed(self, allowed_registries, image, allowed):
site_config = get_site_config(allowed_registries=allowed_registries)
replacer = PullspecReplacer(user_config={}, workflow=self.mock_workflow(site_config))
image = ImageName.parse(image)
assert replacer.registry_is_allowed(image) == allowed
@pytest.mark.parametrize('pullspec, should_query, digest', [
('{}/ns/foo'.format(SOURCE_REGISTRY_URI), True, 'sha256:123456'),
('{}/ns/bar@sha256:654321'.format(SOURCE_REGISTRY_URI), False, 'sha256:654321'),
])
def test_pin_digest(self, pullspec, should_query, digest, caplog):
if should_query:
mock_digest_query({pullspec: digest})
image = ImageName.parse(pullspec)
site_config = get_site_config()
replacer = PullspecReplacer(user_config={}, workflow=self.mock_workflow(site_config))
replaced = replacer.pin_digest(image)
assert replaced.registry == image.registry
assert replaced.namespace == image.namespace
assert replaced.repo == image.repo
assert replaced.tag == digest
if should_query:
assert "Querying {} for manifest list digest".format(image.registry) in caplog.text
else:
assert "{} looks like a digest, skipping query".format(digest) in caplog.text
@pytest.mark.parametrize('image, replacement_registries, replaced', [
('old-registry/ns/foo', {'old-registry': 'new-registry'}, 'new-registry/ns/foo'),
('registry/ns/foo', {}, 'registry/ns/foo'),
])
def test_replace_registry(self, image, replacement_registries, replaced, caplog):
image = ImageName.parse(image)
replaced = ImageName.parse(replaced)
site_config = get_site_config(registry_post_replace=replacement_registries)
replacer = PullspecReplacer(user_config={}, workflow=self.mock_workflow(site_config))
assert replacer.replace_registry(image) == replaced
if image.registry not in replacement_registries:
msg = "registry_post_replace not configured for {}".format(image.registry)
assert msg in caplog.text
@pytest.mark.parametrize('image,site_replacements,user_replacements,replaced,should_query', [
# can replace repo if only 1 option in site config
('{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
{SOURCE_REGISTRY_URI: {'foo-package': ['y/bar']}},
None,
'{}/y/bar:1'.format(SOURCE_REGISTRY_URI),
True),
# user can define replacement if package not in site config
('{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
None,
{SOURCE_REGISTRY_URI: {'foo-package': 'y/bar'}},
'{}/y/bar:1'.format(SOURCE_REGISTRY_URI),
True),
# user can choose one of the options in site config
('{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
{SOURCE_REGISTRY_URI: {'foo-package': ['y/bar', 'y/baz']}},
{SOURCE_REGISTRY_URI: {'foo-package': 'y/baz'}},
'{}/y/baz:1'.format(SOURCE_REGISTRY_URI),
True),
# replacement can be just repo
('{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
{SOURCE_REGISTRY_URI: {'foo-package': ['bar']}},
None,
ImageName(registry=SOURCE_REGISTRY_URI, repo='bar', tag='1'),
True),
# no config, no replacement
('{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
None,
None,
'{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
False),
# missing registry, no replacement
('foo:1',
{SOURCE_REGISTRY_URI: {'foo-package': ['y/bar']}},
{SOURCE_REGISTRY_URI: {'foo-package': 'y/bar'}},
'foo:1',
False),
])
@responses.activate
def test_replace_repo(self, image, site_replacements, user_replacements,
replaced, should_query, tmpdir, caplog):
image = ImageName.parse(image)
replaced = ImageName.parse(replaced)
mock_package_mapping_files(site_replacements)
mock_inspect_query(image,
{PKG_LABEL: '{}-package'.format(image.repo)},
times=1 if should_query else 0)
site_config = get_site_config(repo_replacements=site_replacements)
user_config = get_user_config(manifests_dir=str(tmpdir),
repo_replacements=user_replacements)
replacer = PullspecReplacer(user_config=user_config,
workflow=self.mock_workflow(site_config))
assert replacer.replace_repo(image) == replaced
if site_replacements and image.registry in site_replacements:
assert "Downloading mapping file for {}".format(image.registry) in caplog.text
if should_query:
assert "Querying {} for image labels".format(image.registry) in caplog.text
assert "Resolved package name" in caplog.text
assert "Replacement for package" in caplog.text
else:
assert "repo_replacements not configured for {}".format(image.registry) in caplog.text
@pytest.mark.parametrize('image,site_replacements,user_replacements,inspect_labels,exc_msg', [
# replacements configured in site config, repo missing
('{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
{SOURCE_REGISTRY_URI: {}},
None,
{PKG_LABEL: 'foo-package'},
'Replacement not configured for package foo-package (from {}/x/foo:1). '
'Please specify replacement in container.yaml'.format(SOURCE_REGISTRY_URI)),
# replacements configured in user config, repo missing
('{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
None,
{SOURCE_REGISTRY_URI: {}},
{PKG_LABEL: 'foo-package'},
'Replacement not configured for package foo-package (from {}/x/foo:1). '
'Please specify replacement in container.yaml'.format(SOURCE_REGISTRY_URI)),
# multiple options for replacement in site config
('{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
{SOURCE_REGISTRY_URI: {'foo-package': ['bar', 'baz']}},
None,
{PKG_LABEL: 'foo-package'},
'Multiple replacements for package foo-package (from {}/x/foo:1): bar, baz. '
'Please specify replacement in container.yaml'.format(SOURCE_REGISTRY_URI)),
# user tried to override with an invalid replacement
('{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
{SOURCE_REGISTRY_URI: {'foo-package': ['bar', 'baz']}},
{SOURCE_REGISTRY_URI: {'foo-package': 'spam'}},
{PKG_LABEL: 'foo-package'},
'Invalid replacement for package foo-package: spam (choices: bar, baz)'),
# replacements configured, image has no component label
('{}/x/foo:1'.format(SOURCE_REGISTRY_URI),
{SOURCE_REGISTRY_URI: {}},
None,
{},
'Image has no component label: {}/x/foo:1'.format(SOURCE_REGISTRY_URI)),
])
@responses.activate
def test_replace_repo_failure(self, image, site_replacements, user_replacements,
inspect_labels, exc_msg, tmpdir):
image = ImageName.parse(image)
mock_package_mapping_files(site_replacements)
mock_inspect_query(image, inspect_labels)
site_config = get_site_config(repo_replacements=site_replacements)
user_config = get_user_config(manifests_dir=str(tmpdir),
repo_replacements=user_replacements)
replacer = PullspecReplacer(user_config=user_config,
workflow=self.mock_workflow(site_config))
with pytest.raises(RuntimeError) as exc_info:
replacer.replace_repo(image)
assert str(exc_info.value) == exc_msg
@pytest.mark.parametrize('site_replacements, exc_msg', [
# replacement is not a list
({'a': {'foo-package': 'bar'}},
'is not of type {!r}'.format('array')),
# replacement is an empty list
({'a': {'foo-package': []}},
'[] is too short'),
])
@responses.activate
def test_replace_repo_schema_validation(self, site_replacements, exc_msg):
image = ImageName.parse('a/x/foo')
mock_package_mapping_files(site_replacements)
mock_inspect_query(image, {}, times=0)
site_config = get_site_config(repo_replacements=site_replacements)
replacer = PullspecReplacer(user_config={}, workflow=self.mock_workflow(site_config))
with pytest.raises(OsbsValidationException) as exc_info:
replacer.replace_repo(image)
assert exc_msg in str(exc_info.value)
PULLSPEC_REPLACEMENTS = [
{
"original": "myimage:v1.2.2",
"new": "myimage:v1.2.700",
"pinned": False,
},
]
class TestOperatorCSVModifications:
"""Test suite for user modifications for Operator CSV file"""
def _test_assert_error(self, *, tmpdir, docker_tasker, test_url, pull_specs, exc_msg):
manifests_dir = tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir()
mock_operator_csv(manifests_dir, 'csv.yaml', pull_specs)
user_config = get_user_config(OPERATOR_MANIFESTS_DIR)
site_config = get_site_config()
runner = mock_env(
docker_tasker, tmpdir, orchestrator=True,
user_config=user_config,
site_config=site_config,
operator_csv_modifications_url=test_url
)
with pytest.raises(PluginFailedException) as exc_info:
runner.run()
assert exc_msg in str(exc_info.value)
@pytest.mark.parametrize('pull_specs,exc_msg', [
# case: missing definitions
(
["missing:v5.6"],
"Provided operator CSV modifications misses following pullspecs: missing:v5.6"
),
# case: extra definitions
(
["myimage:v1.2.2", "yet-another-image:123"],
("Provided operator CSV modifications misses following pullspecs: "
"yet-another-image:123")
),
])
@responses.activate
def test_pullspecs_replacements_errors(self, tmpdir, docker_tasker, pull_specs, exc_msg):
"""Plugin should fail when CSV modifications doesn't meet expectations"""
test_url = "https://example.com/modifications.json"
modification_data = {
"pullspec_replacements": PULLSPEC_REPLACEMENTS
}
responses.add(responses.GET, test_url, json=modification_data)
self._test_assert_error(
tmpdir=tmpdir,
docker_tasker=docker_tasker,
test_url=test_url,
pull_specs=pull_specs,
exc_msg=exc_msg,
)
@responses.activate
def test_fetch_modifications_http_error(self, tmpdir, docker_tasker):
"""Test if HTTP error during fetching is properly described to user"""
test_url = "https://example.com/modifications.json"
exc_msg = f"Failed to fetch the operator CSV modification JSON from {test_url}"
responses.add(responses.GET, test_url, status=404)
self._test_assert_error(
tmpdir=tmpdir,
docker_tasker=docker_tasker,
test_url=test_url,
pull_specs=['mytestimage:v5'],
exc_msg=exc_msg)
@responses.activate
def test_fetch_modifications_json_error(self, tmpdir, docker_tasker):
"""Test if JSON decoding failure properly described to user"""
test_url = "https://example.com/modifications.json"
exc_msg = f"Failed to parse operator CSV modification JSON from {test_url}"
responses.add(responses.GET, test_url, body="invalid json")
self._test_assert_error(
tmpdir=tmpdir,
docker_tasker=docker_tasker,
test_url=test_url,
pull_specs=['mytestimage:v5'],
exc_msg=exc_msg
)
@responses.activate
def test_csv_has_related_images(self, tmpdir, docker_tasker):
"""Modifications must fail if RelatedImages section exists"""
test_url = "https://example.com/modifications.json"
modification_data = {
"pullspec_replacements": PULLSPEC_REPLACEMENTS
}
responses.add(responses.GET, test_url, json=modification_data)
manifests_dir = tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir()
mock_operator_csv(
manifests_dir, 'csv.yaml', ['mytestimage:v6'],
with_related_images=True
)
user_config = get_user_config(OPERATOR_MANIFESTS_DIR)
site_config = get_site_config()
runner = mock_env(
docker_tasker, tmpdir, orchestrator=True,
user_config=user_config,
site_config=site_config,
operator_csv_modifications_url=test_url,
)
with pytest.raises(PluginFailedException) as exc_info:
runner.run()
exc_msg = (
"OSBS cannot modify operator CSV file because this operator bundle "
"is managed by owner (digest pinning explicitly disabled or "
"RelatedImages section in CSV exists)"
)
assert exc_msg in str(exc_info.value)
@responses.activate
def test_pullspecs_replacements(self, tmpdir, docker_tasker):
"""Test if pullspecs are properly replaced"""
test_url = "https://example.com/modifications.json"
modification_data = {
"pullspec_replacements": PULLSPEC_REPLACEMENTS
}
responses.add(responses.GET, test_url, json=modification_data)
manifests_dir = tmpdir.join(OPERATOR_MANIFESTS_DIR).mkdir()
mock_operator_csv(manifests_dir, 'csv.yaml', ['myimage:v1.2.2'])
user_config = get_user_config(OPERATOR_MANIFESTS_DIR)
site_config = get_site_config()
runner = mock_env(
docker_tasker, tmpdir, orchestrator=True,
user_config=user_config,
site_config=site_config,
operator_csv_modifications_url=test_url
)
result = runner.run()
expected = {
'related_images': {
'created_by_osbs': True,
'pullspecs': [
{
'new': ImageName.parse(p['new']),
'original': ImageName.parse(p['original']),
'pinned': p['pinned'],
'replaced': ImageName.parse(p['new']) != ImageName.parse(p['original']),
}
for p in PULLSPEC_REPLACEMENTS
]
}
}
assert result['pin_operator_digest'] == expected
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
from .sub_resource import SubResource
from .expression import Expression
from .secure_string import SecureString
from .linked_service_reference import LinkedServiceReference
from .azure_key_vault_secret_reference import AzureKeyVaultSecretReference
from .secret_base import SecretBase
from .factory_identity import FactoryIdentity
from .factory import Factory
from .integration_runtime import IntegrationRuntime
from .integration_runtime_resource import IntegrationRuntimeResource
from .integration_runtime_reference import IntegrationRuntimeReference
from .integration_runtime_status import IntegrationRuntimeStatus
from .integration_runtime_status_response import IntegrationRuntimeStatusResponse
from .integration_runtime_status_list_response import IntegrationRuntimeStatusListResponse
from .update_integration_runtime_request import UpdateIntegrationRuntimeRequest
from .update_integration_runtime_node_request import UpdateIntegrationRuntimeNodeRequest
from .parameter_specification import ParameterSpecification
from .linked_service import LinkedService
from .linked_service_resource import LinkedServiceResource
from .dataset import Dataset
from .dataset_resource import DatasetResource
from .activity_dependency import ActivityDependency
from .activity import Activity
from .pipeline_resource import PipelineResource
from .trigger import Trigger
from .trigger_resource import TriggerResource
from .create_run_response import CreateRunResponse
from .error_response import ErrorResponse, ErrorResponseException
from .pipeline_reference import PipelineReference
from .trigger_pipeline_reference import TriggerPipelineReference
from .factory_update_parameters import FactoryUpdateParameters
from .dataset_reference import DatasetReference
from .pipeline_run_query_filter import PipelineRunQueryFilter
from .pipeline_run_query_order_by import PipelineRunQueryOrderBy
from .pipeline_run_filter_parameters import PipelineRunFilterParameters
from .pipeline_run_invoked_by import PipelineRunInvokedBy
from .pipeline_run import PipelineRun
from .pipeline_run_query_response import PipelineRunQueryResponse
from .activity_run import ActivityRun
from .trigger_run import TriggerRun
from .operation_display import OperationDisplay
from .operation_log_specification import OperationLogSpecification
from .operation_metric_availability import OperationMetricAvailability
from .operation_metric_specification import OperationMetricSpecification
from .operation_service_specification import OperationServiceSpecification
from .operation import Operation
from .operation_list_response import OperationListResponse
from .azure_databricks_linked_service import AzureDatabricksLinkedService
from .azure_data_lake_analytics_linked_service import AzureDataLakeAnalyticsLinkedService
from .hd_insight_on_demand_linked_service import HDInsightOnDemandLinkedService
from .salesforce_marketing_cloud_linked_service import SalesforceMarketingCloudLinkedService
from .netezza_linked_service import NetezzaLinkedService
from .vertica_linked_service import VerticaLinkedService
from .zoho_linked_service import ZohoLinkedService
from .xero_linked_service import XeroLinkedService
from .square_linked_service import SquareLinkedService
from .spark_linked_service import SparkLinkedService
from .shopify_linked_service import ShopifyLinkedService
from .service_now_linked_service import ServiceNowLinkedService
from .quick_books_linked_service import QuickBooksLinkedService
from .presto_linked_service import PrestoLinkedService
from .phoenix_linked_service import PhoenixLinkedService
from .paypal_linked_service import PaypalLinkedService
from .marketo_linked_service import MarketoLinkedService
from .maria_db_linked_service import MariaDBLinkedService
from .magento_linked_service import MagentoLinkedService
from .jira_linked_service import JiraLinkedService
from .impala_linked_service import ImpalaLinkedService
from .hubspot_linked_service import HubspotLinkedService
from .hive_linked_service import HiveLinkedService
from .hbase_linked_service import HBaseLinkedService
from .greenplum_linked_service import GreenplumLinkedService
from .google_big_query_linked_service import GoogleBigQueryLinkedService
from .eloqua_linked_service import EloquaLinkedService
from .drill_linked_service import DrillLinkedService
from .couchbase_linked_service import CouchbaseLinkedService
from .concur_linked_service import ConcurLinkedService
from .azure_postgre_sql_linked_service import AzurePostgreSqlLinkedService
from .amazon_mws_linked_service import AmazonMWSLinkedService
from .sap_hana_linked_service import SapHanaLinkedService
from .sap_bw_linked_service import SapBWLinkedService
from .sftp_server_linked_service import SftpServerLinkedService
from .ftp_server_linked_service import FtpServerLinkedService
from .http_linked_service import HttpLinkedService
from .azure_search_linked_service import AzureSearchLinkedService
from .custom_data_source_linked_service import CustomDataSourceLinkedService
from .amazon_redshift_linked_service import AmazonRedshiftLinkedService
from .amazon_s3_linked_service import AmazonS3LinkedService
from .sap_ecc_linked_service import SapEccLinkedService
from .sap_cloud_for_customer_linked_service import SapCloudForCustomerLinkedService
from .salesforce_linked_service import SalesforceLinkedService
from .azure_data_lake_store_linked_service import AzureDataLakeStoreLinkedService
from .mongo_db_linked_service import MongoDbLinkedService
from .cassandra_linked_service import CassandraLinkedService
from .web_client_certificate_authentication import WebClientCertificateAuthentication
from .web_basic_authentication import WebBasicAuthentication
from .web_anonymous_authentication import WebAnonymousAuthentication
from .web_linked_service_type_properties import WebLinkedServiceTypeProperties
from .web_linked_service import WebLinkedService
from .odata_linked_service import ODataLinkedService
from .hdfs_linked_service import HdfsLinkedService
from .odbc_linked_service import OdbcLinkedService
from .azure_ml_linked_service import AzureMLLinkedService
from .teradata_linked_service import TeradataLinkedService
from .db2_linked_service import Db2LinkedService
from .sybase_linked_service import SybaseLinkedService
from .postgre_sql_linked_service import PostgreSqlLinkedService
from .my_sql_linked_service import MySqlLinkedService
from .azure_my_sql_linked_service import AzureMySqlLinkedService
from .oracle_linked_service import OracleLinkedService
from .file_server_linked_service import FileServerLinkedService
from .hd_insight_linked_service import HDInsightLinkedService
from .dynamics_linked_service import DynamicsLinkedService
from .cosmos_db_linked_service import CosmosDbLinkedService
from .azure_key_vault_linked_service import AzureKeyVaultLinkedService
from .azure_batch_linked_service import AzureBatchLinkedService
from .azure_sql_database_linked_service import AzureSqlDatabaseLinkedService
from .sql_server_linked_service import SqlServerLinkedService
from .azure_sql_dw_linked_service import AzureSqlDWLinkedService
from .azure_storage_linked_service import AzureStorageLinkedService
from .salesforce_marketing_cloud_object_dataset import SalesforceMarketingCloudObjectDataset
from .vertica_table_dataset import VerticaTableDataset
from .netezza_table_dataset import NetezzaTableDataset
from .zoho_object_dataset import ZohoObjectDataset
from .xero_object_dataset import XeroObjectDataset
from .square_object_dataset import SquareObjectDataset
from .spark_object_dataset import SparkObjectDataset
from .shopify_object_dataset import ShopifyObjectDataset
from .service_now_object_dataset import ServiceNowObjectDataset
from .quick_books_object_dataset import QuickBooksObjectDataset
from .presto_object_dataset import PrestoObjectDataset
from .phoenix_object_dataset import PhoenixObjectDataset
from .paypal_object_dataset import PaypalObjectDataset
from .marketo_object_dataset import MarketoObjectDataset
from .maria_db_table_dataset import MariaDBTableDataset
from .magento_object_dataset import MagentoObjectDataset
from .jira_object_dataset import JiraObjectDataset
from .impala_object_dataset import ImpalaObjectDataset
from .hubspot_object_dataset import HubspotObjectDataset
from .hive_object_dataset import HiveObjectDataset
from .hbase_object_dataset import HBaseObjectDataset
from .greenplum_table_dataset import GreenplumTableDataset
from .google_big_query_object_dataset import GoogleBigQueryObjectDataset
from .eloqua_object_dataset import EloquaObjectDataset
from .drill_table_dataset import DrillTableDataset
from .couchbase_table_dataset import CouchbaseTableDataset
from .concur_object_dataset import ConcurObjectDataset
from .azure_postgre_sql_table_dataset import AzurePostgreSqlTableDataset
from .amazon_mws_object_dataset import AmazonMWSObjectDataset
from .dataset_zip_deflate_compression import DatasetZipDeflateCompression
from .dataset_deflate_compression import DatasetDeflateCompression
from .dataset_gzip_compression import DatasetGZipCompression
from .dataset_bzip2_compression import DatasetBZip2Compression
from .dataset_compression import DatasetCompression
from .parquet_format import ParquetFormat
from .orc_format import OrcFormat
from .avro_format import AvroFormat
from .json_format import JsonFormat
from .text_format import TextFormat
from .dataset_storage_format import DatasetStorageFormat
from .http_dataset import HttpDataset
from .azure_search_index_dataset import AzureSearchIndexDataset
from .web_table_dataset import WebTableDataset
from .sql_server_table_dataset import SqlServerTableDataset
from .sap_ecc_resource_dataset import SapEccResourceDataset
from .sap_cloud_for_customer_resource_dataset import SapCloudForCustomerResourceDataset
from .salesforce_object_dataset import SalesforceObjectDataset
from .relational_table_dataset import RelationalTableDataset
from .azure_my_sql_table_dataset import AzureMySqlTableDataset
from .oracle_table_dataset import OracleTableDataset
from .odata_resource_dataset import ODataResourceDataset
from .mongo_db_collection_dataset import MongoDbCollectionDataset
from .file_share_dataset import FileShareDataset
from .azure_data_lake_store_dataset import AzureDataLakeStoreDataset
from .dynamics_entity_dataset import DynamicsEntityDataset
from .document_db_collection_dataset import DocumentDbCollectionDataset
from .custom_dataset import CustomDataset
from .cassandra_table_dataset import CassandraTableDataset
from .azure_sql_dw_table_dataset import AzureSqlDWTableDataset
from .azure_sql_table_dataset import AzureSqlTableDataset
from .azure_table_dataset import AzureTableDataset
from .azure_blob_dataset import AzureBlobDataset
from .amazon_s3_dataset import AmazonS3Dataset
from .retry_policy import RetryPolicy
from .tumbling_window_trigger import TumblingWindowTrigger
from .blob_trigger import BlobTrigger
from .recurrence_schedule_occurrence import RecurrenceScheduleOccurrence
from .recurrence_schedule import RecurrenceSchedule
from .schedule_trigger_recurrence import ScheduleTriggerRecurrence
from .schedule_trigger import ScheduleTrigger
from .multiple_pipeline_trigger import MultiplePipelineTrigger
from .activity_policy import ActivityPolicy
from .databricks_notebook_activity import DatabricksNotebookActivity
from .data_lake_analytics_usql_activity import DataLakeAnalyticsUSQLActivity
from .azure_ml_update_resource_activity import AzureMLUpdateResourceActivity
from .azure_ml_web_service_file import AzureMLWebServiceFile
from .azure_ml_batch_execution_activity import AzureMLBatchExecutionActivity
from .get_metadata_activity import GetMetadataActivity
from .web_activity_authentication import WebActivityAuthentication
from .web_activity import WebActivity
from .redshift_unload_settings import RedshiftUnloadSettings
from .amazon_redshift_source import AmazonRedshiftSource
from .salesforce_marketing_cloud_source import SalesforceMarketingCloudSource
from .vertica_source import VerticaSource
from .netezza_source import NetezzaSource
from .zoho_source import ZohoSource
from .xero_source import XeroSource
from .square_source import SquareSource
from .spark_source import SparkSource
from .shopify_source import ShopifySource
from .service_now_source import ServiceNowSource
from .quick_books_source import QuickBooksSource
from .presto_source import PrestoSource
from .phoenix_source import PhoenixSource
from .paypal_source import PaypalSource
from .marketo_source import MarketoSource
from .maria_db_source import MariaDBSource
from .magento_source import MagentoSource
from .jira_source import JiraSource
from .impala_source import ImpalaSource
from .hubspot_source import HubspotSource
from .hive_source import HiveSource
from .hbase_source import HBaseSource
from .greenplum_source import GreenplumSource
from .google_big_query_source import GoogleBigQuerySource
from .eloqua_source import EloquaSource
from .drill_source import DrillSource
from .couchbase_source import CouchbaseSource
from .concur_source import ConcurSource
from .azure_postgre_sql_source import AzurePostgreSqlSource
from .amazon_mws_source import AmazonMWSSource
from .http_source import HttpSource
from .azure_data_lake_store_source import AzureDataLakeStoreSource
from .mongo_db_source import MongoDbSource
from .cassandra_source import CassandraSource
from .web_source import WebSource
from .oracle_source import OracleSource
from .azure_my_sql_source import AzureMySqlSource
from .distcp_settings import DistcpSettings
from .hdfs_source import HdfsSource
from .file_system_source import FileSystemSource
from .sql_dw_source import SqlDWSource
from .stored_procedure_parameter import StoredProcedureParameter
from .sql_source import SqlSource
from .sap_ecc_source import SapEccSource
from .sap_cloud_for_customer_source import SapCloudForCustomerSource
from .salesforce_source import SalesforceSource
from .relational_source import RelationalSource
from .dynamics_source import DynamicsSource
from .document_db_collection_source import DocumentDbCollectionSource
from .blob_source import BlobSource
from .azure_table_source import AzureTableSource
from .copy_source import CopySource
from .lookup_activity import LookupActivity
from .sql_server_stored_procedure_activity import SqlServerStoredProcedureActivity
from .custom_activity_reference_object import CustomActivityReferenceObject
from .custom_activity import CustomActivity
from .ssis_package_location import SSISPackageLocation
from .execute_ssis_package_activity import ExecuteSSISPackageActivity
from .hd_insight_spark_activity import HDInsightSparkActivity
from .hd_insight_streaming_activity import HDInsightStreamingActivity
from .hd_insight_map_reduce_activity import HDInsightMapReduceActivity
from .hd_insight_pig_activity import HDInsightPigActivity
from .hd_insight_hive_activity import HDInsightHiveActivity
from .redirect_incompatible_row_settings import RedirectIncompatibleRowSettings
from .staging_settings import StagingSettings
from .tabular_translator import TabularTranslator
from .copy_translator import CopyTranslator
from .salesforce_sink import SalesforceSink
from .dynamics_sink import DynamicsSink
from .odbc_sink import OdbcSink
from .azure_search_index_sink import AzureSearchIndexSink
from .azure_data_lake_store_sink import AzureDataLakeStoreSink
from .oracle_sink import OracleSink
from .polybase_settings import PolybaseSettings
from .sql_dw_sink import SqlDWSink
from .sql_sink import SqlSink
from .document_db_collection_sink import DocumentDbCollectionSink
from .file_system_sink import FileSystemSink
from .blob_sink import BlobSink
from .azure_table_sink import AzureTableSink
from .azure_queue_sink import AzureQueueSink
from .sap_cloud_for_customer_sink import SapCloudForCustomerSink
from .copy_sink import CopySink
from .copy_activity import CopyActivity
from .execution_activity import ExecutionActivity
from .filter_activity import FilterActivity
from .until_activity import UntilActivity
from .wait_activity import WaitActivity
from .for_each_activity import ForEachActivity
from .if_condition_activity import IfConditionActivity
from .execute_pipeline_activity import ExecutePipelineActivity
from .control_activity import ControlActivity
from .linked_integration_runtime import LinkedIntegrationRuntime
from .self_hosted_integration_runtime_node import SelfHostedIntegrationRuntimeNode
from .self_hosted_integration_runtime_status import SelfHostedIntegrationRuntimeStatus
from .managed_integration_runtime_operation_result import ManagedIntegrationRuntimeOperationResult
from .managed_integration_runtime_error import ManagedIntegrationRuntimeError
from .managed_integration_runtime_node import ManagedIntegrationRuntimeNode
from .managed_integration_runtime_status import ManagedIntegrationRuntimeStatus
from .linked_integration_runtime_rbac import LinkedIntegrationRuntimeRbac
from .linked_integration_runtime_key import LinkedIntegrationRuntimeKey
from .linked_integration_runtime_properties import LinkedIntegrationRuntimeProperties
from .self_hosted_integration_runtime import SelfHostedIntegrationRuntime
from .integration_runtime_custom_setup_script_properties import IntegrationRuntimeCustomSetupScriptProperties
from .integration_runtime_ssis_catalog_info import IntegrationRuntimeSsisCatalogInfo
from .integration_runtime_ssis_properties import IntegrationRuntimeSsisProperties
from .integration_runtime_vnet_properties import IntegrationRuntimeVNetProperties
from .integration_runtime_compute_properties import IntegrationRuntimeComputeProperties
from .managed_integration_runtime import ManagedIntegrationRuntime
from .integration_runtime_node_ip_address import IntegrationRuntimeNodeIpAddress
from .integration_runtime_node_monitoring_data import IntegrationRuntimeNodeMonitoringData
from .integration_runtime_monitoring_data import IntegrationRuntimeMonitoringData
from .integration_runtime_remove_node_request import IntegrationRuntimeRemoveNodeRequest
from .integration_runtime_auth_keys import IntegrationRuntimeAuthKeys
from .integration_runtime_regenerate_key_parameters import IntegrationRuntimeRegenerateKeyParameters
from .integration_runtime_connection_info import IntegrationRuntimeConnectionInfo
from .factory_paged import FactoryPaged
from .integration_runtime_resource_paged import IntegrationRuntimeResourcePaged
from .linked_service_resource_paged import LinkedServiceResourcePaged
from .dataset_resource_paged import DatasetResourcePaged
from .pipeline_resource_paged import PipelineResourcePaged
from .activity_run_paged import ActivityRunPaged
from .trigger_resource_paged import TriggerResourcePaged
from .trigger_run_paged import TriggerRunPaged
from .data_factory_management_client_enums import (
IntegrationRuntimeState,
IntegrationRuntimeAutoUpdate,
ParameterType,
DependencyCondition,
TriggerRuntimeState,
PipelineRunQueryFilterOperand,
PipelineRunQueryFilterOperator,
PipelineRunQueryOrderByField,
PipelineRunQueryOrder,
TriggerRunStatus,
SparkServerType,
SparkThriftTransportProtocol,
SparkAuthenticationType,
ServiceNowAuthenticationType,
PrestoAuthenticationType,
PhoenixAuthenticationType,
ImpalaAuthenticationType,
HiveServerType,
HiveThriftTransportProtocol,
HiveAuthenticationType,
HBaseAuthenticationType,
GoogleBigQueryAuthenticationType,
SapHanaAuthenticationType,
SftpAuthenticationType,
FtpAuthenticationType,
HttpAuthenticationType,
MongoDbAuthenticationType,
ODataAuthenticationType,
TeradataAuthenticationType,
Db2AuthenticationType,
SybaseAuthenticationType,
DatasetCompressionLevel,
JsonFormatFilePattern,
TumblingWindowFrequency,
DayOfWeek,
DaysOfWeek,
RecurrenceFrequency,
WebActivityMethod,
CassandraSourceReadConsistencyLevels,
StoredProcedureParameterType,
SalesforceSourceReadBehavior,
SSISExecutionRuntime,
HDInsightActivityDebugInfoOption,
SalesforceSinkWriteBehavior,
AzureSearchIndexWriteBehaviorType,
CopyBehaviorType,
PolybaseSettingsRejectType,
SapCloudForCustomerSinkWriteBehavior,
IntegrationRuntimeType,
SelfHostedIntegrationRuntimeNodeStatus,
IntegrationRuntimeUpdateResult,
IntegrationRuntimeInternalChannelEncryptionMode,
ManagedIntegrationRuntimeNodeStatus,
IntegrationRuntimeSsisCatalogPricingTier,
IntegrationRuntimeLicenseType,
IntegrationRuntimeEdition,
IntegrationRuntimeAuthKeyName,
)
__all__ = [
'Resource',
'SubResource',
'Expression',
'SecureString',
'LinkedServiceReference',
'AzureKeyVaultSecretReference',
'SecretBase',
'FactoryIdentity',
'Factory',
'IntegrationRuntime',
'IntegrationRuntimeResource',
'IntegrationRuntimeReference',
'IntegrationRuntimeStatus',
'IntegrationRuntimeStatusResponse',
'IntegrationRuntimeStatusListResponse',
'UpdateIntegrationRuntimeRequest',
'UpdateIntegrationRuntimeNodeRequest',
'ParameterSpecification',
'LinkedService',
'LinkedServiceResource',
'Dataset',
'DatasetResource',
'ActivityDependency',
'Activity',
'PipelineResource',
'Trigger',
'TriggerResource',
'CreateRunResponse',
'ErrorResponse', 'ErrorResponseException',
'PipelineReference',
'TriggerPipelineReference',
'FactoryUpdateParameters',
'DatasetReference',
'PipelineRunQueryFilter',
'PipelineRunQueryOrderBy',
'PipelineRunFilterParameters',
'PipelineRunInvokedBy',
'PipelineRun',
'PipelineRunQueryResponse',
'ActivityRun',
'TriggerRun',
'OperationDisplay',
'OperationLogSpecification',
'OperationMetricAvailability',
'OperationMetricSpecification',
'OperationServiceSpecification',
'Operation',
'OperationListResponse',
'AzureDatabricksLinkedService',
'AzureDataLakeAnalyticsLinkedService',
'HDInsightOnDemandLinkedService',
'SalesforceMarketingCloudLinkedService',
'NetezzaLinkedService',
'VerticaLinkedService',
'ZohoLinkedService',
'XeroLinkedService',
'SquareLinkedService',
'SparkLinkedService',
'ShopifyLinkedService',
'ServiceNowLinkedService',
'QuickBooksLinkedService',
'PrestoLinkedService',
'PhoenixLinkedService',
'PaypalLinkedService',
'MarketoLinkedService',
'MariaDBLinkedService',
'MagentoLinkedService',
'JiraLinkedService',
'ImpalaLinkedService',
'HubspotLinkedService',
'HiveLinkedService',
'HBaseLinkedService',
'GreenplumLinkedService',
'GoogleBigQueryLinkedService',
'EloquaLinkedService',
'DrillLinkedService',
'CouchbaseLinkedService',
'ConcurLinkedService',
'AzurePostgreSqlLinkedService',
'AmazonMWSLinkedService',
'SapHanaLinkedService',
'SapBWLinkedService',
'SftpServerLinkedService',
'FtpServerLinkedService',
'HttpLinkedService',
'AzureSearchLinkedService',
'CustomDataSourceLinkedService',
'AmazonRedshiftLinkedService',
'AmazonS3LinkedService',
'SapEccLinkedService',
'SapCloudForCustomerLinkedService',
'SalesforceLinkedService',
'AzureDataLakeStoreLinkedService',
'MongoDbLinkedService',
'CassandraLinkedService',
'WebClientCertificateAuthentication',
'WebBasicAuthentication',
'WebAnonymousAuthentication',
'WebLinkedServiceTypeProperties',
'WebLinkedService',
'ODataLinkedService',
'HdfsLinkedService',
'OdbcLinkedService',
'AzureMLLinkedService',
'TeradataLinkedService',
'Db2LinkedService',
'SybaseLinkedService',
'PostgreSqlLinkedService',
'MySqlLinkedService',
'AzureMySqlLinkedService',
'OracleLinkedService',
'FileServerLinkedService',
'HDInsightLinkedService',
'DynamicsLinkedService',
'CosmosDbLinkedService',
'AzureKeyVaultLinkedService',
'AzureBatchLinkedService',
'AzureSqlDatabaseLinkedService',
'SqlServerLinkedService',
'AzureSqlDWLinkedService',
'AzureStorageLinkedService',
'SalesforceMarketingCloudObjectDataset',
'VerticaTableDataset',
'NetezzaTableDataset',
'ZohoObjectDataset',
'XeroObjectDataset',
'SquareObjectDataset',
'SparkObjectDataset',
'ShopifyObjectDataset',
'ServiceNowObjectDataset',
'QuickBooksObjectDataset',
'PrestoObjectDataset',
'PhoenixObjectDataset',
'PaypalObjectDataset',
'MarketoObjectDataset',
'MariaDBTableDataset',
'MagentoObjectDataset',
'JiraObjectDataset',
'ImpalaObjectDataset',
'HubspotObjectDataset',
'HiveObjectDataset',
'HBaseObjectDataset',
'GreenplumTableDataset',
'GoogleBigQueryObjectDataset',
'EloquaObjectDataset',
'DrillTableDataset',
'CouchbaseTableDataset',
'ConcurObjectDataset',
'AzurePostgreSqlTableDataset',
'AmazonMWSObjectDataset',
'DatasetZipDeflateCompression',
'DatasetDeflateCompression',
'DatasetGZipCompression',
'DatasetBZip2Compression',
'DatasetCompression',
'ParquetFormat',
'OrcFormat',
'AvroFormat',
'JsonFormat',
'TextFormat',
'DatasetStorageFormat',
'HttpDataset',
'AzureSearchIndexDataset',
'WebTableDataset',
'SqlServerTableDataset',
'SapEccResourceDataset',
'SapCloudForCustomerResourceDataset',
'SalesforceObjectDataset',
'RelationalTableDataset',
'AzureMySqlTableDataset',
'OracleTableDataset',
'ODataResourceDataset',
'MongoDbCollectionDataset',
'FileShareDataset',
'AzureDataLakeStoreDataset',
'DynamicsEntityDataset',
'DocumentDbCollectionDataset',
'CustomDataset',
'CassandraTableDataset',
'AzureSqlDWTableDataset',
'AzureSqlTableDataset',
'AzureTableDataset',
'AzureBlobDataset',
'AmazonS3Dataset',
'RetryPolicy',
'TumblingWindowTrigger',
'BlobTrigger',
'RecurrenceScheduleOccurrence',
'RecurrenceSchedule',
'ScheduleTriggerRecurrence',
'ScheduleTrigger',
'MultiplePipelineTrigger',
'ActivityPolicy',
'DatabricksNotebookActivity',
'DataLakeAnalyticsUSQLActivity',
'AzureMLUpdateResourceActivity',
'AzureMLWebServiceFile',
'AzureMLBatchExecutionActivity',
'GetMetadataActivity',
'WebActivityAuthentication',
'WebActivity',
'RedshiftUnloadSettings',
'AmazonRedshiftSource',
'SalesforceMarketingCloudSource',
'VerticaSource',
'NetezzaSource',
'ZohoSource',
'XeroSource',
'SquareSource',
'SparkSource',
'ShopifySource',
'ServiceNowSource',
'QuickBooksSource',
'PrestoSource',
'PhoenixSource',
'PaypalSource',
'MarketoSource',
'MariaDBSource',
'MagentoSource',
'JiraSource',
'ImpalaSource',
'HubspotSource',
'HiveSource',
'HBaseSource',
'GreenplumSource',
'GoogleBigQuerySource',
'EloquaSource',
'DrillSource',
'CouchbaseSource',
'ConcurSource',
'AzurePostgreSqlSource',
'AmazonMWSSource',
'HttpSource',
'AzureDataLakeStoreSource',
'MongoDbSource',
'CassandraSource',
'WebSource',
'OracleSource',
'AzureMySqlSource',
'DistcpSettings',
'HdfsSource',
'FileSystemSource',
'SqlDWSource',
'StoredProcedureParameter',
'SqlSource',
'SapEccSource',
'SapCloudForCustomerSource',
'SalesforceSource',
'RelationalSource',
'DynamicsSource',
'DocumentDbCollectionSource',
'BlobSource',
'AzureTableSource',
'CopySource',
'LookupActivity',
'SqlServerStoredProcedureActivity',
'CustomActivityReferenceObject',
'CustomActivity',
'SSISPackageLocation',
'ExecuteSSISPackageActivity',
'HDInsightSparkActivity',
'HDInsightStreamingActivity',
'HDInsightMapReduceActivity',
'HDInsightPigActivity',
'HDInsightHiveActivity',
'RedirectIncompatibleRowSettings',
'StagingSettings',
'TabularTranslator',
'CopyTranslator',
'SalesforceSink',
'DynamicsSink',
'OdbcSink',
'AzureSearchIndexSink',
'AzureDataLakeStoreSink',
'OracleSink',
'PolybaseSettings',
'SqlDWSink',
'SqlSink',
'DocumentDbCollectionSink',
'FileSystemSink',
'BlobSink',
'AzureTableSink',
'AzureQueueSink',
'SapCloudForCustomerSink',
'CopySink',
'CopyActivity',
'ExecutionActivity',
'FilterActivity',
'UntilActivity',
'WaitActivity',
'ForEachActivity',
'IfConditionActivity',
'ExecutePipelineActivity',
'ControlActivity',
'LinkedIntegrationRuntime',
'SelfHostedIntegrationRuntimeNode',
'SelfHostedIntegrationRuntimeStatus',
'ManagedIntegrationRuntimeOperationResult',
'ManagedIntegrationRuntimeError',
'ManagedIntegrationRuntimeNode',
'ManagedIntegrationRuntimeStatus',
'LinkedIntegrationRuntimeRbac',
'LinkedIntegrationRuntimeKey',
'LinkedIntegrationRuntimeProperties',
'SelfHostedIntegrationRuntime',
'IntegrationRuntimeCustomSetupScriptProperties',
'IntegrationRuntimeSsisCatalogInfo',
'IntegrationRuntimeSsisProperties',
'IntegrationRuntimeVNetProperties',
'IntegrationRuntimeComputeProperties',
'ManagedIntegrationRuntime',
'IntegrationRuntimeNodeIpAddress',
'IntegrationRuntimeNodeMonitoringData',
'IntegrationRuntimeMonitoringData',
'IntegrationRuntimeRemoveNodeRequest',
'IntegrationRuntimeAuthKeys',
'IntegrationRuntimeRegenerateKeyParameters',
'IntegrationRuntimeConnectionInfo',
'FactoryPaged',
'IntegrationRuntimeResourcePaged',
'LinkedServiceResourcePaged',
'DatasetResourcePaged',
'PipelineResourcePaged',
'ActivityRunPaged',
'TriggerResourcePaged',
'TriggerRunPaged',
'IntegrationRuntimeState',
'IntegrationRuntimeAutoUpdate',
'ParameterType',
'DependencyCondition',
'TriggerRuntimeState',
'PipelineRunQueryFilterOperand',
'PipelineRunQueryFilterOperator',
'PipelineRunQueryOrderByField',
'PipelineRunQueryOrder',
'TriggerRunStatus',
'SparkServerType',
'SparkThriftTransportProtocol',
'SparkAuthenticationType',
'ServiceNowAuthenticationType',
'PrestoAuthenticationType',
'PhoenixAuthenticationType',
'ImpalaAuthenticationType',
'HiveServerType',
'HiveThriftTransportProtocol',
'HiveAuthenticationType',
'HBaseAuthenticationType',
'GoogleBigQueryAuthenticationType',
'SapHanaAuthenticationType',
'SftpAuthenticationType',
'FtpAuthenticationType',
'HttpAuthenticationType',
'MongoDbAuthenticationType',
'ODataAuthenticationType',
'TeradataAuthenticationType',
'Db2AuthenticationType',
'SybaseAuthenticationType',
'DatasetCompressionLevel',
'JsonFormatFilePattern',
'TumblingWindowFrequency',
'DayOfWeek',
'DaysOfWeek',
'RecurrenceFrequency',
'WebActivityMethod',
'CassandraSourceReadConsistencyLevels',
'StoredProcedureParameterType',
'SalesforceSourceReadBehavior',
'SSISExecutionRuntime',
'HDInsightActivityDebugInfoOption',
'SalesforceSinkWriteBehavior',
'AzureSearchIndexWriteBehaviorType',
'CopyBehaviorType',
'PolybaseSettingsRejectType',
'SapCloudForCustomerSinkWriteBehavior',
'IntegrationRuntimeType',
'SelfHostedIntegrationRuntimeNodeStatus',
'IntegrationRuntimeUpdateResult',
'IntegrationRuntimeInternalChannelEncryptionMode',
'ManagedIntegrationRuntimeNodeStatus',
'IntegrationRuntimeSsisCatalogPricingTier',
'IntegrationRuntimeLicenseType',
'IntegrationRuntimeEdition',
'IntegrationRuntimeAuthKeyName',
]
|
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import errno
import multiprocessing
import re
import os
import signal
import sys
import subprocess
from color import Coloring
from command import Command, MirrorSafeCommand
import platform_utils
_CAN_COLOR = [
'branch',
'diff',
'grep',
'log',
]
class ForallColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'forall')
self.project = self.printer('project', attr='bold')
class Forall(Command, MirrorSafeCommand):
common = False
helpSummary = "Run a shell command in each project"
helpUsage = """
%prog [<project>...] -c <command> [<arg>...]
%prog -r str1 [str2] ... -c <command> [<arg>...]"
"""
helpDescription = """
Executes the same shell command in each project.
The -r option allows running the command only on projects matching
regex or wildcard expression.
# Output Formatting
The -p option causes '%prog' to bind pipes to the command's stdin,
stdout and stderr streams, and pipe all output into a continuous
stream that is displayed in a single pager session. Project headings
are inserted before the output of each command is displayed. If the
command produces no output in a project, no heading is displayed.
The formatting convention used by -p is very suitable for some
types of searching, e.g. `repo forall -p -c git log -SFoo` will
print all commits that add or remove references to Foo.
The -v option causes '%prog' to display stderr messages if a
command produces output only on stderr. Normally the -p option
causes command output to be suppressed until the command produces
at least one byte of output on stdout.
# Environment
pwd is the project's working directory. If the current client is
a mirror client, then pwd is the Git repository.
REPO_PROJECT is set to the unique name of the project.
REPO_PATH is the path relative the the root of the client.
REPO_REMOTE is the name of the remote system from the manifest.
REPO_LREV is the name of the revision from the manifest, translated
to a local tracking branch. If you need to pass the manifest
revision to a locally executed git command, use REPO_LREV.
REPO_RREV is the name of the revision from the manifest, exactly
as written in the manifest.
REPO_COUNT is the total number of projects being iterated.
REPO_I is the current (1-based) iteration count. Can be used in
conjunction with REPO_COUNT to add a simple progress indicator to your
command.
REPO__* are any extra environment variables, specified by the
"annotation" element under any project element. This can be useful
for differentiating trees based on user-specific criteria, or simply
annotating tree details.
shell positional arguments ($1, $2, .., $#) are set to any arguments
following <command>.
Example: to list projects:
%prog -c 'echo $REPO_PROJECT'
Notice that $REPO_PROJECT is quoted to ensure it is expanded in
the context of running <command> instead of in the calling shell.
Unless -p is used, stdin, stdout, stderr are inherited from the
terminal and are not redirected.
If -e is used, when a command exits unsuccessfully, '%prog' will abort
without iterating through the remaining projects.
"""
def _Options(self, p):
def cmd(option, opt_str, value, parser):
setattr(parser.values, option.dest, list(parser.rargs))
while parser.rargs:
del parser.rargs[0]
p.add_option('-r', '--regex',
dest='regex', action='store_true',
help="Execute the command only on projects matching regex or wildcard expression")
p.add_option('-i', '--inverse-regex',
dest='inverse_regex', action='store_true',
help="Execute the command only on projects not matching regex or wildcard expression")
p.add_option('-g', '--groups',
dest='groups',
help="Execute the command only on projects matching the specified groups")
p.add_option('-c', '--command',
help='Command (and arguments) to execute',
dest='command',
action='callback',
callback=cmd)
p.add_option('-e', '--abort-on-errors',
dest='abort_on_errors', action='store_true',
help='Abort if a command exits unsuccessfully')
g = p.add_option_group('Output')
g.add_option('-p',
dest='project_header', action='store_true',
help='Show project headers before output')
g.add_option('-v', '--verbose',
dest='verbose', action='store_true',
help='Show command error messages')
g.add_option('-j', '--jobs',
dest='jobs', action='store', type='int', default=1,
help='number of commands to execute simultaneously')
def WantPager(self, opt):
return opt.project_header and opt.jobs == 1
def _SerializeProject(self, project):
""" Serialize a project._GitGetByExec instance.
project._GitGetByExec is not pickle-able. Instead of trying to pass it
around between processes, make a dict ourselves containing only the
attributes that we need.
"""
if not self.manifest.IsMirror:
lrev = project.GetRevisionId()
else:
lrev = None
return {
'name': project.name,
'relpath': project.relpath,
'remote_name': project.remote.name,
'lrev': lrev,
'rrev': project.revisionExpr,
'annotations': dict((a.name, a.value) for a in project.annotations),
'gitdir': project.gitdir,
'worktree': project.worktree,
}
def ValidateOptions(self, opt, args):
if not opt.command:
self.Usage()
def Execute(self, opt, args):
cmd = [opt.command[0]]
shell = True
if re.compile(r'^[a-z0-9A-Z_/\.-]+$').match(cmd[0]):
shell = False
if shell:
cmd.append(cmd[0])
cmd.extend(opt.command[1:])
if opt.project_header \
and not shell \
and cmd[0] == 'git':
# If this is a direct git command that can enable colorized
# output and the user prefers coloring, add --color into the
# command line because we are going to wrap the command into
# a pipe and git won't know coloring should activate.
#
for cn in cmd[1:]:
if not cn.startswith('-'):
break
else:
cn = None
if cn and cn in _CAN_COLOR:
class ColorCmd(Coloring):
def __init__(self, config, cmd):
Coloring.__init__(self, config, cmd)
if ColorCmd(self.manifest.manifestProject.config, cn).is_on:
cmd.insert(cmd.index(cn) + 1, '--color')
mirror = self.manifest.IsMirror
rc = 0
smart_sync_manifest_name = "smart_sync_override.xml"
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, smart_sync_manifest_name)
if os.path.isfile(smart_sync_manifest_path):
self.manifest.Override(smart_sync_manifest_path)
if opt.regex:
projects = self.FindProjects(args)
elif opt.inverse_regex:
projects = self.FindProjects(args, inverse=True)
else:
projects = self.GetProjects(args, groups=opt.groups)
os.environ['REPO_COUNT'] = str(len(projects))
pool = multiprocessing.Pool(opt.jobs, InitWorker)
try:
config = self.manifest.manifestProject.config
results_it = pool.imap(
DoWorkWrapper,
self.ProjectArgs(projects, mirror, opt, cmd, shell, config))
pool.close()
for r in results_it:
rc = rc or r
if r != 0 and opt.abort_on_errors:
raise Exception('Aborting due to previous error')
except (KeyboardInterrupt, WorkerKeyboardInterrupt):
# Catch KeyboardInterrupt raised inside and outside of workers
print('Interrupted - terminating the pool')
pool.terminate()
rc = rc or errno.EINTR
except Exception as e:
# Catch any other exceptions raised
print('Got an error, terminating the pool: %s: %s' %
(type(e).__name__, e),
file=sys.stderr)
pool.terminate()
rc = rc or getattr(e, 'errno', 1)
finally:
pool.join()
if rc != 0:
sys.exit(rc)
def ProjectArgs(self, projects, mirror, opt, cmd, shell, config):
for cnt, p in enumerate(projects):
try:
project = self._SerializeProject(p)
except Exception as e:
print('Project list error on project %s: %s: %s' %
(p.name, type(e).__name__, e),
file=sys.stderr)
return
except KeyboardInterrupt:
print('Project list interrupted',
file=sys.stderr)
return
yield [mirror, opt, cmd, shell, cnt, config, project]
class WorkerKeyboardInterrupt(Exception):
""" Keyboard interrupt exception for worker processes. """
pass
def InitWorker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def DoWorkWrapper(args):
""" A wrapper around the DoWork() method.
Catch the KeyboardInterrupt exceptions here and re-raise them as a different,
``Exception``-based exception to stop it flooding the console with stacktraces
and making the parent hang indefinitely.
"""
project = args.pop()
try:
return DoWork(project, *args)
except KeyboardInterrupt:
print('%s: Worker interrupted' % project['name'])
raise WorkerKeyboardInterrupt()
def DoWork(project, mirror, opt, cmd, shell, cnt, config):
env = os.environ.copy()
def setenv(name, val):
if val is None:
val = ''
if hasattr(val, 'encode'):
val = val.encode()
env[name] = val
setenv('REPO_PROJECT', project['name'])
setenv('REPO_PATH', project['relpath'])
setenv('REPO_REMOTE', project['remote_name'])
setenv('REPO_LREV', project['lrev'])
setenv('REPO_RREV', project['rrev'])
setenv('REPO_I', str(cnt + 1))
for name in project['annotations']:
setenv("REPO__%s" % (name), project['annotations'][name])
if mirror:
setenv('GIT_DIR', project['gitdir'])
cwd = project['gitdir']
else:
cwd = project['worktree']
if not os.path.exists(cwd):
if ((opt.project_header and opt.verbose)
or not opt.project_header):
print('skipping %s/' % project['relpath'], file=sys.stderr)
return 1
if opt.project_header:
stdin = subprocess.PIPE
stdout = subprocess.PIPE
stderr = subprocess.PIPE
else:
stdin = None
stdout = None
stderr = None
p = subprocess.Popen(cmd,
cwd=cwd,
shell=shell,
env=env,
stdin=stdin,
stdout=stdout,
stderr=stderr)
if opt.project_header:
out = ForallColoring(config)
out.redirect(sys.stdout)
empty = True
errbuf = ''
p.stdin.close()
s_in = platform_utils.FileDescriptorStreams.create()
s_in.add(p.stdout, sys.stdout, 'stdout')
s_in.add(p.stderr, sys.stderr, 'stderr')
while not s_in.is_done:
in_ready = s_in.select()
for s in in_ready:
buf = s.read()
if not buf:
s.close()
s_in.remove(s)
continue
if not opt.verbose:
if s.std_name == 'stderr':
errbuf += buf
continue
if empty and out:
if not cnt == 0:
out.nl()
if mirror:
project_header_path = project['name']
else:
project_header_path = project['relpath']
out.project('project %s/', project_header_path)
out.nl()
out.flush()
if errbuf:
sys.stderr.write(errbuf)
sys.stderr.flush()
errbuf = ''
empty = False
s.dest.write(buf)
s.dest.flush()
r = p.wait()
return r
|
|
import base64
import binascii
import functools
import os
from datetime import datetime
from urllib.parse import quote_plus
from django.conf import settings
from django.contrib.auth import login, logout
from django.contrib.auth.signals import user_logged_in
from django.core import signing
from django.db.models import Q
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse
from django.utils.encoding import force_bytes, force_str
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
from django.views.decorators.cache import never_cache
import jwt
import requests
import waffle
from corsheaders.conf import conf as corsheaders_conf
from corsheaders.middleware import (
ACCESS_CONTROL_ALLOW_ORIGIN,
ACCESS_CONTROL_ALLOW_CREDENTIALS,
ACCESS_CONTROL_ALLOW_HEADERS,
ACCESS_CONTROL_ALLOW_METHODS,
ACCESS_CONTROL_MAX_AGE,
)
from django_statsd.clients import statsd
from rest_framework import exceptions
from rest_framework import serializers
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import action
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import (
DestroyModelMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
)
from rest_framework.permissions import AllowAny, BasePermission, IsAuthenticated
from rest_framework.response import Response
from rest_framework.status import HTTP_204_NO_CONTENT
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from waffle.decorators import waffle_switch
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.access.models import GroupUser
from olympia.amo.decorators import use_primary_db
from olympia.amo.reverse import get_url_prefix
from olympia.amo.utils import fetch_subscribed_newsletters, is_safe_url, use_fake_fxa
from olympia.api.authentication import (
JWTKeyAuthentication,
SessionIDAuthentication,
UnsubscribeTokenAuthentication,
WebTokenAuthentication,
)
from olympia.api.permissions import AnyOf, ByHttpMethod, GroupPermission
from olympia.users.models import UserNotification, UserProfile
from olympia.users.notifications import (
NOTIFICATIONS_COMBINED,
REMOTE_NOTIFICATIONS_BY_BASKET_ID,
)
from . import verify
from .serializers import (
AccountSuperCreateSerializer,
PublicUserProfileSerializer,
UserNotificationSerializer,
UserProfileSerializer,
)
from .tasks import clear_sessions_event, delete_user_event, primary_email_change_event
from .utils import fxa_login_url, generate_fxa_state
log = olympia.core.logger.getLogger('accounts')
ERROR_AUTHENTICATED = 'authenticated'
ERROR_NO_CODE = 'no-code'
ERROR_NO_PROFILE = 'no-profile'
ERROR_NO_USER = 'no-user'
ERROR_STATE_MISMATCH = 'state-mismatch'
ERROR_STATUSES = {
ERROR_AUTHENTICATED: 400,
ERROR_NO_CODE: 422,
ERROR_NO_PROFILE: 401,
ERROR_STATE_MISMATCH: 400,
}
LOGIN_ERROR_MESSAGES = {
ERROR_AUTHENTICATED: _('You are already logged in.'),
ERROR_NO_CODE: _('Your login attempt could not be parsed. Please try again.'),
ERROR_NO_PROFILE: _('Your Firefox Account could not be found. Please try again.'),
ERROR_STATE_MISMATCH: _('You could not be logged in. Please try again.'),
}
# Name of the cookie that contains the auth token for the API. It used to be
# "api_auth_token" but we had to change it because it wasn't set on the right
# domain, and we couldn't clear both the old and new versions at the same time,
# since sending multiple Set-Cookie headers with the same name is not allowed
# by the spec, even if they have a distinct domain attribute.
API_TOKEN_COOKIE = 'frontend_auth_token'
def safe_redirect(request, url, action):
if not is_safe_url(url, request):
url = reverse('home')
log.info(f'Redirecting after {action} to: {url}')
return HttpResponseRedirect(url)
def find_user(identity):
"""Try to find a user for a Firefox Accounts profile. If the account
hasn't been migrated we'll need to do the lookup by email but we should
use the ID after that so check both.
If we get multiple users we're in some weird state where the accounts need
to be merged but that behaviour hasn't been defined so let it raise.
If the user is deleted but we were still able to find them using their
email or fxa_id, throw an error - they are banned, they shouldn't be able
to log in anymore.
"""
try:
user = UserProfile.objects.get(
Q(fxa_id=identity['uid']) | Q(email=identity['email'])
)
is_task_user = user.id == settings.TASK_USER_ID
if user.banned or is_task_user:
# If the user was banned raise a 403, it's not the prettiest
# but should be enough.
# Alternatively if someone tried to log in as the task user then
# prevent that because that user "owns" a number of important
# addons and collections, and it's actions are special cased.
raise exceptions.PermissionDenied()
return user
except UserProfile.DoesNotExist:
return None
except UserProfile.MultipleObjectsReturned:
# This shouldn't happen, so let it raise.
log.error(
'Found multiple users for %s and %s', identity['email'], identity['uid']
)
raise
def register_user(identity):
user = UserProfile.objects.create_user(
email=identity['email'], fxa_id=identity['uid']
)
log.info(f'Created user {user} from FxA')
statsd.incr('accounts.account_created_from_fxa')
return user
def reregister_user(user):
user.update(deleted=False)
log.info(f'Re-created deleted user {user} from FxA')
statsd.incr('accounts.account_created_from_fxa')
def update_user(user, identity):
"""Update a user's info from FxA if needed, as well as generating the id
that is used as part of the session/api token generation."""
if user.fxa_id != identity['uid'] or user.email != identity['email']:
log.info(
'Updating user info from FxA for {pk}. Old {old_email} {old_uid} '
'New {new_email} {new_uid}'.format(
pk=user.pk,
old_email=user.email,
old_uid=user.fxa_id,
new_email=identity['email'],
new_uid=identity['uid'],
)
)
user.update(fxa_id=identity['uid'], email=identity['email'])
if user.auth_id is None:
# If the user didn't have an auth id (old user account created before
# we added the field), generate one for them.
user.update(auth_id=UserProfile._meta.get_field('auth_id').default())
def login_user(sender, request, user, identity, token_data=None):
update_user(user, identity)
log.info(f'Logging in user {user} from FxA')
user_logged_in.send(sender=sender, request=request, user=user)
login(request, user)
if token_data:
request.session['fxa_access_token_expiry'] = token_data['access_token_expiry']
request.session['fxa_refresh_token'] = token_data['refresh_token']
request.session['fxa_config_name'] = token_data['config_name']
def fxa_error_message(message, login_help_url):
return format_html(
'{error} <a href="{url}">{help_text}</a>',
url=login_help_url,
help_text=_('Need help?'),
error=message,
)
LOGIN_HELP_URL = 'https://support.mozilla.org/kb/access-your-add-ons-firefox-accounts'
def parse_next_path(state_parts, request=None):
next_path = None
if len(state_parts) == 2:
# The = signs will be stripped off so we need to add them back
# but it only cares if there are too few so add 4 of them.
encoded_path = state_parts[1] + '===='
try:
next_path = base64.urlsafe_b64decode(force_bytes(encoded_path)).decode(
'utf-8'
)
except (TypeError, ValueError):
log.info(f'Error decoding next_path {encoded_path}')
pass
if not is_safe_url(next_path, request):
next_path = None
return next_path
def with_user(f):
@functools.wraps(f)
@use_primary_db
def inner(self, request):
fxa_config = self.get_fxa_config(request)
if request.method == 'GET':
data = request.query_params
else:
data = request.data
state_parts = data.get('state', '').split(':', 1)
state = state_parts[0]
next_path = parse_next_path(state_parts, request)
if not data.get('code'):
log.info('No code provided.')
return safe_redirect(request, next_path, ERROR_NO_CODE)
elif (
not request.session.get('fxa_state')
or request.session['fxa_state'] != state
):
log.info(
'State mismatch. URL: {url} Session: {session}'.format(
url=data.get('state'),
session=request.session.get('fxa_state'),
)
)
return safe_redirect(request, next_path, ERROR_STATE_MISMATCH)
elif request.user.is_authenticated:
response = safe_redirect(request, next_path, ERROR_AUTHENTICATED)
# If the api token cookie is missing but we're still
# authenticated using the session, add it back.
if API_TOKEN_COOKIE not in request.COOKIES:
log.info(
'User %s was already authenticated but did not '
'have an API token cookie, adding one.',
request.user.pk,
)
response = add_api_token_to_response(response, request.user)
return response
try:
if use_fake_fxa() and 'fake_fxa_email' in data:
# Bypassing real authentication, we take the email provided
# and generate a random fxa id.
identity = {
'email': data['fake_fxa_email'],
'uid': 'fake_fxa_id-%s'
% force_str(binascii.b2a_hex(os.urandom(16))),
}
id_token, token_data = identity['email'], {}
else:
identity, token_data = verify.fxa_identify(
data['code'], config=fxa_config
)
token_data['config_name'] = self.get_config_name(request)
id_token = token_data.get('id_token')
except verify.IdentificationError:
log.info('Profile not found. Code: {}'.format(data['code']))
return safe_redirect(request, next_path, ERROR_NO_PROFILE)
else:
# The following log statement is used by foxsec-pipeline.
log.info('Logging in FxA user %s', identity['email'])
user = find_user(identity)
# We can't use waffle.flag_is_active() wrapper, because
# request.user isn't populated at this point (and we don't want
# it to be).
flag = waffle.get_waffle_flag_model().get(
'2fa-enforcement-for-developers-and-special-users'
)
enforce_2fa_for_developers_and_special_users = flag.is_active(request) or (
flag.pk and flag.is_active_for_user(user)
)
if (
user
and not identity.get('twoFactorAuthentication')
and enforce_2fa_for_developers_and_special_users
and (user.is_addon_developer or user.groups_list)
):
# https://github.com/mozilla/addons/issues/732
# The user is an add-on developer (with other types of
# add-ons than just themes) or part of any group (so they
# are special in some way, may be an admin or a reviewer),
# but hasn't logged in with a second factor. Immediately
# redirect them to start the FxA flow again, this time
# requesting 2FA to be present - they should be
# automatically logged in FxA with the existing token, and
# should be prompted to create the second factor before
# coming back to AMO.
log.info('Redirecting user %s to enforce 2FA', user)
return HttpResponseRedirect(
fxa_login_url(
config=fxa_config,
state=request.session['fxa_state'],
next_path=next_path,
action='signin',
force_two_factor=True,
request=request,
id_token=id_token,
)
)
return f(
self,
request,
user=user,
identity=identity,
next_path=next_path,
token_data=token_data,
)
return inner
def generate_api_token(user):
"""Generate a new API token for a given user."""
data = {
'auth_hash': user.get_session_auth_hash(),
'user_id': user.pk,
}
return signing.dumps(data, salt=WebTokenAuthentication.salt)
def add_api_token_to_response(response, user):
"""Generate API token and add it in a session cookie named API_TOKEN_COOKIE."""
token = generate_api_token(user)
# Include the API token in a session cookie, so that it is
# available for universal frontend apps.
response.set_cookie(
API_TOKEN_COOKIE,
token,
domain=settings.SESSION_COOKIE_DOMAIN,
max_age=settings.SESSION_COOKIE_AGE,
secure=settings.SESSION_COOKIE_SECURE,
httponly=settings.SESSION_COOKIE_HTTPONLY,
samesite=settings.SESSION_COOKIE_SAMESITE,
)
return response
class FxAConfigMixin:
DEFAULT_FXA_CONFIG_NAME = settings.DEFAULT_FXA_CONFIG_NAME
ALLOWED_FXA_CONFIGS = settings.ALLOWED_FXA_CONFIGS
def get_config_name(self, request):
config_name = request.GET.get('config', self.DEFAULT_FXA_CONFIG_NAME)
if config_name not in self.ALLOWED_FXA_CONFIGS:
log.info(f'Using default FxA config instead of {config_name}')
config_name = self.DEFAULT_FXA_CONFIG_NAME
return config_name
def get_fxa_config(self, request):
return settings.FXA_CONFIG[self.get_config_name(request)]
class LoginStartView(FxAConfigMixin, APIView):
@never_cache
def get(self, request):
request.session.setdefault('fxa_state', generate_fxa_state())
return HttpResponseRedirect(
fxa_login_url(
config=self.get_fxa_config(request),
state=request.session['fxa_state'],
next_path=request.GET.get('to'),
action=request.GET.get('action', 'signin'),
request=request,
)
)
class AuthenticateView(FxAConfigMixin, APIView):
authentication_classes = (SessionAuthentication,)
@never_cache
@with_user
def get(self, request, user, identity, next_path, token_data):
# At this point @with_user guarantees that we have a valid fxa
# identity. We are proceeding with either registering the user or
# logging them on.
if user is None or user.deleted:
action = 'register'
if user is None:
user = register_user(identity)
else:
reregister_user(user)
if not is_safe_url(next_path, request):
next_path = None
# If we just reverse() directly, we'd use a prefixer instance
# initialized from the current view, which would not contain the
# app information since it's a generic callback, the same for
# everyone. To ensure the user stays on the app/locale they were
# on, we extract that information from the next_path if present
# and set locale/app on the prefixer instance that reverse() will
# use automatically.
if next_path:
if prefixer := get_url_prefix():
splitted = prefixer.split_path(next_path)
prefixer.locale = splitted[0]
prefixer.app = splitted[1]
edit_page = reverse('users.edit')
if next_path:
next_path = f'{edit_page}?to={quote_plus(next_path)}'
else:
next_path = edit_page
else:
action = 'login'
login_user(self.__class__, request, user, identity, token_data)
response = safe_redirect(request, next_path, action)
add_api_token_to_response(response, user)
return response
def logout_user(request, response):
if request.user and request.user.is_authenticated:
# Logging out invalidates *all* user sessions. A new auth_id will be
# generated during the next login.
request.user.update(auth_id=None)
logout(request)
response.delete_cookie(
API_TOKEN_COOKIE,
domain=settings.SESSION_COOKIE_DOMAIN,
samesite=settings.SESSION_COOKIE_SAMESITE,
)
# This view is not covered by the CORS middleware, see:
# https://github.com/mozilla/addons-server/issues/11100
class SessionView(APIView):
permission_classes = [
ByHttpMethod(
{
'options': AllowAny, # Needed for CORS.
'delete': IsAuthenticated,
}
),
]
def options(self, request, *args, **kwargs):
response = Response()
response['Content-Length'] = '0'
origin = request.META.get('HTTP_ORIGIN')
if not origin:
return response
response[ACCESS_CONTROL_ALLOW_ORIGIN] = origin
response[ACCESS_CONTROL_ALLOW_CREDENTIALS] = 'true'
# Mimics the django-cors-headers middleware.
response[ACCESS_CONTROL_ALLOW_HEADERS] = ', '.join(
corsheaders_conf.CORS_ALLOW_HEADERS
)
response[ACCESS_CONTROL_ALLOW_METHODS] = ', '.join(
corsheaders_conf.CORS_ALLOW_METHODS
)
if corsheaders_conf.CORS_PREFLIGHT_MAX_AGE:
response[ACCESS_CONTROL_MAX_AGE] = corsheaders_conf.CORS_PREFLIGHT_MAX_AGE
return response
def delete(self, request, *args, **kwargs):
response = Response({'ok': True})
logout_user(request, response)
origin = request.META.get('HTTP_ORIGIN')
if not origin:
return response
response[ACCESS_CONTROL_ALLOW_ORIGIN] = origin
response[ACCESS_CONTROL_ALLOW_CREDENTIALS] = 'true'
return response
class AllowSelf(BasePermission):
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
return request.user.is_authenticated and obj == request.user
class AccountViewSet(
RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet
):
permission_classes = [
ByHttpMethod(
{
'get': AllowAny,
'head': AllowAny,
'options': AllowAny, # Needed for CORS.
# To edit a profile it has to yours, or be an admin.
'patch': AnyOf(AllowSelf, GroupPermission(amo.permissions.USERS_EDIT)),
'delete': AnyOf(AllowSelf, GroupPermission(amo.permissions.USERS_EDIT)),
}
),
]
# Periods are not allowed in username, but we still have some in the
# database so relax the lookup regexp to allow them to load their profile.
lookup_value_regex = '[^/]+'
def get_queryset(self):
return UserProfile.objects.exclude(deleted=True).all()
def get_object(self):
if hasattr(self, 'instance'):
return self.instance
identifier = self.kwargs.get('pk')
self.lookup_field = self.get_lookup_field(identifier)
self.kwargs[self.lookup_field] = identifier
self.instance = super().get_object()
# action won't exist for other classes that are using this ViewSet.
can_view_instance = (
not getattr(self, 'action', None)
or self.self_view
or self.admin_viewing
or self.instance.is_public
)
if can_view_instance:
return self.instance
else:
raise Http404
def get_lookup_field(self, identifier):
lookup_field = 'pk'
if identifier and not identifier.isdigit():
# If the identifier contains anything other than a digit, it's
# the username.
lookup_field = 'username'
return lookup_field
@property
def self_view(self):
return (
self.request.user.is_authenticated
and self.get_object() == self.request.user
)
@property
def admin_viewing(self):
return acl.action_allowed_user(self.request.user, amo.permissions.USERS_EDIT)
def get_serializer_class(self):
if self.self_view or self.admin_viewing:
return UserProfileSerializer
else:
return PublicUserProfileSerializer
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
response = Response(status=HTTP_204_NO_CONTENT)
if instance == request.user:
logout_user(request, response)
return response
@action(
detail=True,
methods=['delete'],
permission_classes=[
AnyOf(AllowSelf, GroupPermission(amo.permissions.USERS_EDIT))
],
)
def picture(self, request, pk=None):
user = self.get_object()
user.delete_picture()
log.info('User (%s) deleted photo' % user)
return self.retrieve(request)
class ProfileView(APIView):
authentication_classes = [
JWTKeyAuthentication,
WebTokenAuthentication,
SessionIDAuthentication,
]
permission_classes = [IsAuthenticated]
def get(self, request):
account_viewset = AccountViewSet(
request=request,
permission_classes=self.permission_classes,
kwargs={'pk': str(self.request.user.pk)},
)
account_viewset.format_kwarg = self.format_kwarg
return account_viewset.retrieve(request)
class AccountSuperCreate(APIView):
authentication_classes = [JWTKeyAuthentication]
permission_classes = [
IsAuthenticated,
GroupPermission(amo.permissions.ACCOUNTS_SUPER_CREATE),
]
@waffle_switch('super-create-accounts')
def post(self, request):
serializer = AccountSuperCreateSerializer(data=request.data)
if not serializer.is_valid():
return Response({'errors': serializer.errors}, status=422)
data = serializer.data
group = serializer.validated_data.get('group', None)
user_token = force_str(binascii.b2a_hex(os.urandom(4)))
username = data.get('username', f'super-created-{user_token}')
fxa_id = data.get('fxa_id', None)
email = data.get('email', f'{username}@addons.mozilla.org')
user = UserProfile.objects.create(
username=username,
email=email,
fxa_id=fxa_id,
display_name=f'Super Created {user_token}',
notes='auto-generated from API',
)
user.save()
if group:
GroupUser.objects.create(user=user, group=group)
identity = {'email': email, 'uid': fxa_id}
login_user(self.__class__, request, user, identity)
request.session.save()
log.info(
'API user {api_user} created and logged in a user from '
'the super-create API: user_id: {user.pk}; '
'user_name: {user.username}; fxa_id: {user.fxa_id}; '
'group: {group}'.format(user=user, api_user=request.user, group=group)
)
cookie = {
'name': settings.SESSION_COOKIE_NAME,
'value': request.session.session_key,
}
cookie['encoded'] = '{name}={value}'.format(**cookie)
return Response(
{
'user_id': user.pk,
'username': user.username,
'email': user.email,
'display_name': user.display_name,
'groups': list((g.pk, g.name, g.rules) for g in user.groups.all()),
'fxa_id': user.fxa_id,
'session_cookie': cookie,
},
status=201,
)
class AccountNotificationMixin:
def get_user(self):
raise NotImplementedError
def _get_default_object(self, notification):
return UserNotification(
user=self.get_user(),
notification_id=notification.id,
enabled=notification.default_checked,
)
def get_queryset(self, dev=False):
user = self.get_user()
queryset = UserNotification.objects.filter(user=user)
# Fetch all `UserNotification` instances and then,
# overwrite their value with the data from basket.
# Put it into a dict so we can easily check for existence.
set_notifications = {
user_nfn.notification.short: user_nfn
for user_nfn in queryset
if user_nfn.notification
}
out = []
newsletters = None # Lazy - fetch the first time needed.
by_basket_id = REMOTE_NOTIFICATIONS_BY_BASKET_ID
for basket_id, notification in by_basket_id.items():
if notification.group == 'dev' and not user.is_developer:
# We only return dev notifications for developers.
continue
if newsletters is None:
newsletters = fetch_subscribed_newsletters(user)
user_notification = self._get_default_object(notification)
user_notification.enabled = basket_id in newsletters
set_notifications[notification.short] = user_notification
include_dev = dev or user.is_developer
for notification in NOTIFICATIONS_COMBINED:
if notification.group == 'dev' and not include_dev:
# We only return dev notifications for developers.
continue
out.append(
set_notifications.get(
notification.short, # It's been set by the user.
self._get_default_object(notification),
)
) # Or, default.
return out
class AccountNotificationViewSet(
AccountNotificationMixin, ListModelMixin, GenericViewSet
):
"""Returns account notifications.
If not already set by the user, defaults will be returned.
"""
permission_classes = [IsAuthenticated]
# We're pushing the primary permission checking to AccountViewSet for ease.
account_permission_classes = [
AnyOf(AllowSelf, GroupPermission(amo.permissions.USERS_EDIT))
]
serializer_class = UserNotificationSerializer
paginator = None
def get_user(self):
return self.get_account_viewset().get_object()
def get_account_viewset(self):
if not hasattr(self, 'account_viewset'):
self.account_viewset = AccountViewSet(
request=self.request,
permission_classes=self.account_permission_classes,
kwargs={'pk': self.kwargs['user_pk']},
)
return self.account_viewset
def create(self, request, *args, **kwargs):
# Loop through possible notifications.
queryset = self.get_queryset()
for notification in queryset:
# Careful with ifs. `enabled` will be None|True|False.
enabled = request.data.get(notification.notification.short)
if enabled is not None:
serializer = self.get_serializer(
notification, partial=True, data={'enabled': enabled}
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(self.get_serializer(queryset, many=True).data)
class AccountNotificationUnsubscribeView(AccountNotificationMixin, GenericAPIView):
authentication_classes = (UnsubscribeTokenAuthentication,)
permission_classes = ()
serializer_class = UserNotificationSerializer
def get_user(self):
return self.request.user
def post(self, request):
notification_name = request.data.get('notification')
serializer = None
for notification in self.get_queryset(dev=True):
if notification_name == notification.notification.short:
serializer = self.get_serializer(
notification, partial=True, data={'enabled': False}
)
serializer.is_valid(raise_exception=True)
serializer.save()
if not serializer:
raise serializers.ValidationError(
_('Notification [%s] does not exist') % notification_name
)
return Response(serializer.data)
class FxaNotificationView(FxAConfigMixin, APIView):
authentication_classes = []
permission_classes = []
FXA_PROFILE_CHANGE_EVENT = (
'https://schemas.accounts.firefox.com/event/profile-change'
)
FXA_DELETE_EVENT = 'https://schemas.accounts.firefox.com/event/delete-user'
FXA_PASSWORDCHANGE_EVENT = (
'https://schemas.accounts.firefox.com/event/password-change'
)
@classmethod
def get_fxa_verifying_keys(cls):
if not hasattr(cls, 'fxa_verifying_keys'):
response = requests.get(f'{settings.FXA_OAUTH_HOST}/jwks')
cls.fxa_verifying_keys = (
response.json().get('keys') if response.status_code == 200 else []
)
if not cls.fxa_verifying_keys:
raise exceptions.AuthenticationFailed(
'FXA verifying keys are not available.'
)
return cls.fxa_verifying_keys
def get_jwt_payload(self, request):
client_id = self.get_fxa_config(request)['client_id']
authenticated_jwt = None
auth_header_split = request.headers.get('Authorization', '').split('Bearer ')
if len(auth_header_split) == 2 and auth_header_split[1]:
for verifying_key in self.get_fxa_verifying_keys():
if verifying_key.get('alg') != 'RS256':
# we only support RS256
continue
request_jwt = auth_header_split[1]
try:
algorithm = jwt.algorithms.RSAAlgorithm.from_jwk(verifying_key)
authenticated_jwt = jwt.decode(
request_jwt,
algorithm,
audience=client_id,
algorithms=[verifying_key['alg']],
)
except (ValueError, TypeError, jwt.exceptions.PyJWTError):
pass # We raise when `not authenticated_jwt` below
break
if not authenticated_jwt:
raise exceptions.AuthenticationFailed(
'Could not authenticate JWT with FXA key.'
)
return authenticated_jwt
def process_event(self, uid, event_key, event_data):
timestamp = event_data.get('changeTime') or datetime.now().timestamp()
if event_key == self.FXA_PROFILE_CHANGE_EVENT:
log.info(f'Fxa Webhook: Got profile change event for {uid}')
new_email = event_data.get('email')
if not new_email:
log.info(
'Email property missing/empty for "%s" event; ignoring' % event_key
)
else:
primary_email_change_event.delay(uid, timestamp, new_email)
elif event_key == self.FXA_DELETE_EVENT:
log.info(f'Fxa Webhook: Got delete event for {uid}')
delete_user_event.delay(uid, timestamp)
elif event_key == self.FXA_PASSWORDCHANGE_EVENT:
log.info(f'Fxa Webhook: Got password-change event for {uid}')
clear_sessions_event.delay(uid, timestamp, 'password-change')
else:
log.info('Fxa Webhook: Ignoring unknown event type %r', event_key)
def post(self, request):
payload = self.get_jwt_payload(request)
events = payload.get('events', {})
uid = payload.get('sub')
for event_key, event_data in events.items():
self.process_event(uid, event_key, event_data)
return Response('202 Accepted', status=202)
|
|
from __future__ import absolute_import
import csv
import functools
import logging
import os
import sys
import tempfile
import warnings
from pip._vendor import pkg_resources
from pip.compat import uses_pycache, WINDOWS, cache_from_source
from pip.compat import get_stdlib
from pip.exceptions import UninstallationError
from pip.locations import (
bin_py, bin_user,
)
from pip.utils import (
rmtree, ask, dist_in_usersite, is_local,
egg_link_path, FakeFile,
renames, normalize_path, dist_is_local,
)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.utils.logging import indent_log
logger = logging.getLogger(__name__)
def _script_names(dist, script_name, is_gui):
"""Create the fully qualified name of the files created by
{console,gui}_scripts for the given ``dist``.
Returns the list of file names
"""
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
exe_name = os.path.join(bin_dir, script_name)
paths_to_remove = [exe_name]
if WINDOWS:
paths_to_remove.append(exe_name + '.exe')
paths_to_remove.append(exe_name + '.exe.manifest')
if is_gui:
paths_to_remove.append(exe_name + '-script.pyw')
else:
paths_to_remove.append(exe_name + '-script.py')
return paths_to_remove
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def add(self, path):
head, tail = os.path.split(path)
# we normalize the head to resolve parent directory symlinks, but not
# the tail, since we only want to uninstall symlinks, not their targets
path = os.path.join(normalize_path(head), os.path.normcase(tail))
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created,
# due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([
(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
logger.info(
'Uninstalling %s-%s:',
self.dist.project_name, self.dist.version
)
with indent_log():
paths = sorted(self.compact(self.paths))
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.info(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.info('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.info(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info(
'Successfully uninstalled %s-%s',
self.dist.project_name, self.dist.version
)
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error(
"Can't roll back %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling back uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
@classmethod
def from_dist(cls, dist):
dist_path = normalize_path(dist.location)
if not dist_is_local(dist):
logger.info(
"Not uninstalling %s at %s, outside environment %s",
dist.key,
dist_path,
sys.prefix,
)
return cls(dist)
if dist_path in get_stdlib():
logger.info(
"Not uninstalling %s at %s, as it is in the standard library.",
dist.key,
dist_path,
)
return cls(dist)
paths_to_remove = cls(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{0}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
paths_to_remove.add(path + '.pyo')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(
dist.project_name),
RemovedInPip10Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in uninstallation_paths(dist):
paths_to_remove.add(path)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, dist.project_name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
_scripts_to_remove = []
console_scripts = dist.get_entry_map(group='console_scripts')
for name in console_scripts.keys():
_scripts_to_remove.extend(_script_names(dist, name, False))
# find gui_scripts
gui_scripts = dist.get_entry_map(group='gui_scripts')
for name in gui_scripts.keys():
_scripts_to_remove.extend(_script_names(dist, name, True))
for s in _scripts_to_remove:
paths_to_remove.add(s)
return paths_to_remove
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError(
"Cannot remove entries from nonexistent file %s" % pth_file
)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.debug('Removing pth entries from %s:', self.file)
with open(self.file, 'rb') as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b'\r\n' in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.debug('Removing entry: %s', entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, 'wb') as fh:
fh.writelines(lines)
def rollback(self):
if self._saved_lines is None:
logger.error(
'Cannot roll back changes to %s, none were made', self.file
)
return False
logger.debug('Rolling %s back to previous state', self.file)
with open(self.file, 'wb') as fh:
fh.writelines(self._saved_lines)
return True
|
|
'''
Functions for helping with serialization and deserialization of
Bokeh objects.
Certain NunPy array dtypes can be serialized to a binary format for
performance and efficiency. The list of supported dtypes is:
{binary_array_types}
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import base64
import datetime as dt
import math
from six import iterkeys
import numpy as np
from .string import format_docstring
from .dependencies import import_optional
pd = import_optional('pandas')
BINARY_ARRAY_TYPES = set([
np.dtype(np.float32),
np.dtype(np.float64),
np.dtype(np.uint8),
np.dtype(np.int8),
np.dtype(np.uint16),
np.dtype(np.int16),
np.dtype(np.uint32),
np.dtype(np.int32),
])
DATETIME_TYPES = set([
dt.datetime,
dt.timedelta,
dt.date,
dt.time,
np.datetime64,
np.timedelta64
])
if pd:
try:
_pd_timestamp = pd.Timestamp
except AttributeError:
_pd_timestamp = pd.tslib.Timestamp
DATETIME_TYPES.add(_pd_timestamp)
DATETIME_TYPES.add(pd.Timedelta)
NP_EPOCH = np.datetime64(0, 'ms')
NP_MS_DELTA = np.timedelta64(1, 'ms')
DT_EPOCH = dt.datetime.utcfromtimestamp(0)
__doc__ = format_docstring(__doc__, binary_array_types="\n".join("* ``np." + str(x) + "``" for x in BINARY_ARRAY_TYPES))
_simple_id = 1000
_dt_tuple = tuple(DATETIME_TYPES)
def is_datetime_type(obj):
''' Whether an object is any date, datetime, or time delta type
recognized by Bokeh.
Arg:
obj (object) : the object to test
Returns:
bool : True if ``obj`` is a datetime type
'''
return isinstance(obj, _dt_tuple)
def convert_datetime_type(obj):
''' Convert any recognized date, datetime or time delta value to
floating point milliseconds
Date and Datetime values are converted to milliseconds since epoch.
TimeDeleta values are converted to absolute milliseconds.
Arg:
obj (object) : the object to convert
Returns:
float : milliseconds
'''
# Pandas Timestamp
if pd and isinstance(obj, _pd_timestamp): return obj.value / 10**6.0
# Pandas Timedelta
elif pd and isinstance(obj, pd.Timedelta): return obj.value / 10**6.0
# Datetime (datetime is a subclass of date)
elif isinstance(obj, dt.datetime):
diff = obj.replace(tzinfo=None) - DT_EPOCH
return diff.total_seconds() * 1000. + obj.microsecond / 1000.
# Timedelta (timedelta is class in the datetime library)
elif isinstance(obj, dt.timedelta):
return obj.total_seconds() * 1000.
# Date
elif isinstance(obj, dt.date):
return (dt.datetime(*obj.timetuple()[:6]) - DT_EPOCH).total_seconds() * 1000
# NumPy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - NP_EPOCH
return (epoch_delta / NP_MS_DELTA)
# Numpy timedelta64
elif isinstance(obj, np.timedelta64):
return (obj / NP_MS_DELTA)
# Time
elif isinstance(obj, dt.time):
return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
def make_id():
''' Return a new unique ID for a Bokeh object.
Normally this function will return UUIDs to use for identifying Bokeh
objects. This is especally important for Bokeh objects stored on a
Bokeh server. However, it is convenient to have more human-readable
IDs during development, so this behavior can be overridden by
setting the environment variable ``BOKEH_SIMPLE_IDS=yes``.
'''
global _simple_id
import uuid
from ..settings import settings
if settings.simple_ids(False):
_simple_id += 1
new_id = _simple_id
else:
new_id = uuid.uuid4()
return str(new_id)
def array_encoding_disabled(array):
''' Determine whether an array may be binary encoded.
The NumPy array dtypes that can be encoded are:
{binary_array_types}
Args:
array (np.ndarray) : the array to check
Returns:
bool
'''
# disable binary encoding for non-supported dtypes
return array.dtype not in BINARY_ARRAY_TYPES
array_encoding_disabled.__doc__ = format_docstring(array_encoding_disabled.__doc__,
binary_array_types="\n ".join("* ``np." + str(x) + "``"
for x in BINARY_ARRAY_TYPES))
def transform_array(array, force_list=False):
''' Transform a NumPy arrays into serialized format
Converts un-serializable dtypes and returns JSON serializable
format
Args:
array (np.ndarray) : a NumPy array to be transformed
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
Returns:
JSON
'''
# Check for astype failures (putative Numpy < 1.7)
try:
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
except AttributeError as e:
if e.args == ("'module' object has no attribute 'datetime64'",):
import sys
# for compatibility with PyPy that doesn't have datetime64
if 'PyPy' in sys.version:
legacy_datetime64 = False
pass
else:
raise e
else:
raise e
# not quite correct, truncates to ms..
if array.dtype.kind == 'M':
if legacy_datetime64:
if array.dtype == np.dtype('datetime64[ns]'):
array = array.astype('int64') / 10**6.0
else:
array = array.astype('datetime64[us]').astype('int64') / 1000.
elif array.dtype.kind == 'm':
array = array.astype('timedelta64[us]').astype('int64') / 1000.
return serialize_array(array, force_list)
def transform_array_to_list(array):
''' Transforms a NumPy array into a list of values
Args:
array (np.nadarray) : the NumPy array series to transform
Returns:
list or dict
'''
if (array.dtype.kind in ('u', 'i', 'f') and (~np.isfinite(array)).any()):
transformed = array.astype('object')
transformed[np.isnan(array)] = 'NaN'
transformed[np.isposinf(array)] = 'Infinity'
transformed[np.isneginf(array)] = '-Infinity'
return transformed.tolist()
elif (array.dtype.kind == 'O' and pd and pd.isnull(array).any()):
transformed = array.astype('object')
transformed[pd.isnull(array)] = 'NaN'
return transformed.tolist()
return array.tolist()
def transform_series(series, force_list=False):
''' Transforms a Pandas series into serialized form
Args:
series (pd.Series) : the Pandas series to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
Returns:
list or dict
'''
vals = series.values
return transform_array(vals, force_list)
def serialize_array(array, force_list=False):
''' Transforms a NumPy array into serialized form.
Args:
array (np.ndarray) : the NumPy array to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
Returns:
list or dict
'''
if isinstance(array, np.ma.MaskedArray):
array = array.filled(np.nan) # Set masked values to nan
if (array_encoding_disabled(array) or force_list):
return transform_array_to_list(array)
if not array.flags['C_CONTIGUOUS']:
array = np.ascontiguousarray(array)
return encode_base64_dict(array)
def traverse_data(obj, use_numpy=True):
''' Recursively traverse an object until a flat list is found.
If NumPy is available, the flat list is converted to a numpy array
and passed to transform_array() to handle ``nan``, ``inf``, and
``-inf``.
Otherwise, iterate through all items, converting non-JSON items
Args:
obj (list) : a list of values or lists
use_numpy (bool, optional) toggle NumPy as a dependency for testing
This argument is only useful for testing (default: True)
'''
if use_numpy and all(isinstance(el, np.ndarray) for el in obj):
return [transform_array(el) for el in obj]
obj_copy = []
for item in obj:
# Check the base/common case first for performance reasons
# Also use type(x) is float because it's faster than isinstance
if type(item) is float:
if math.isnan(item):
item = 'NaN'
elif math.isinf(item):
if item > 0:
item = 'Infinity'
else:
item = '-Infinity'
obj_copy.append(item)
elif isinstance(item, (list, tuple)): # check less common type second
obj_copy.append(traverse_data(item))
else:
obj_copy.append(item)
return obj_copy
def transform_column_source_data(data):
''' Transform ColumnSourceData data to a serialized format
Args:
data (dict) : the mapping of names to data columns to transform
Returns:
JSON compatible dict
'''
data_copy = {}
for key in iterkeys(data):
if pd and isinstance(data[key], (pd.Series, pd.Index)):
data_copy[key] = transform_series(data[key])
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key])
else:
data_copy[key] = traverse_data(data[key])
return data_copy
def encode_base64_dict(array):
''' Encode a NumPy array using base64:
The encoded format is a dict with the following structure:
.. code:: python
{
'__ndarray__' : << base64 encoded array data >>,
'shape' : << array shape >>,
'dtype' : << dtype name >>,
}
Args:
array (np.ndarray) : an array to encode
Returns:
dict
'''
return {
'__ndarray__' : base64.b64encode(array.data).decode('utf-8'),
'shape' : array.shape,
'dtype' : array.dtype.name
}
def decode_base64_dict(data):
''' Decode a base64 encoded array into a NumPy array.
Args:
data (dict) : encoded array data to decode
Data should have the format encoded by :func:`encode_base64_dict`.
Returns:
np.ndarray
'''
b64 = base64.b64decode(data['__ndarray__'])
array = np.fromstring(b64, dtype=data['dtype'])
if len(data['shape']) > 1:
array = array.reshape(data['shape'])
return array
|
|
# Natural Language Toolkit: Chunk parsing API
#
# original file: http://www.nltk.org/_modules/nltk/chunk/named_entity.html
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Named entity chunker
"""
from __future__ import print_function
import os, re, pickle, json
from xml.etree import ElementTree as ET
from pprint import pprint as pp
from nltk.tag import ClassifierBasedTagger, pos_tag
try:
from nltk.classify import MaxentClassifier
except ImportError:
pass
from nltk.tree import Tree
from nltk.tokenize import word_tokenize
from nltk.data import find
from nltk.chunk.api import ChunkParserI
from nltk.chunk.util import ChunkScore
import ner_pipeline
class NEChunkParserTagger(ClassifierBasedTagger):
"""
The IOB tagger used by the chunk parser.
"""
def __init__(self, train):
ClassifierBasedTagger.__init__(
self, train=train,
classifier_builder=self._classifier_builder)
def _classifier_builder(self, train):
return MaxentClassifier.train(train, algorithm='megam',
gaussian_prior_sigma=1,
trace=2)
def _english_wordlist(self):
try:
wl = self._en_wordlist
except AttributeError:
from nltk.corpus import words
self._en_wordlist = set(words.words('en-basic'))
wl = self._en_wordlist
return wl
def _feature_detector(self, tokens, index, history):
word = tokens[index][0]
pos = simplify_pos(tokens[index][1])
if index == 0:
prevword = prevprevword = None
prevpos = prevprevpos = None
prevshape = prevtag = prevprevtag = None
elif index == 1:
prevword = tokens[index-1][0].lower()
prevprevword = None
prevpos = simplify_pos(tokens[index-1][1])
prevprevpos = None
prevtag = history[index-1][0]
prevshape = prevprevtag = None
else:
prevword = tokens[index-1][0].lower()
prevprevword = tokens[index-2][0].lower()
prevpos = simplify_pos(tokens[index-1][1])
prevprevpos = simplify_pos(tokens[index-2][1])
prevtag = history[index-1]
prevprevtag = history[index-2]
prevshape = shape(prevword)
if index == len(tokens)-1:
nextword = nextnextword = None
nextpos = nextnextpos = None
elif index == len(tokens)-2:
nextword = tokens[index+1][0].lower()
nextpos = tokens[index+1][1].lower()
nextnextword = None
nextnextpos = None
else:
nextword = tokens[index+1][0].lower()
nextpos = tokens[index+1][1].lower()
nextnextword = tokens[index+2][0].lower()
nextnextpos = tokens[index+2][1].lower()
# 89.6
features = {
'bias': True,
'shape': shape(word),
'wordlen': len(word),
'prefix3': word[:3].lower(),
'suffix3': word[-3:].lower(),
'pos': pos,
'word': word,
'en-wordlist': (word in self._english_wordlist()),
'prevtag': prevtag,
'prevpos': prevpos,
'nextpos': nextpos,
'prevword': prevword,
'nextword': nextword,
'word+nextpos': '%s+%s' % (word.lower(), nextpos),
'pos+prevtag': '%s+%s' % (pos, prevtag),
'shape+prevtag': '%s+%s' % (prevshape, prevtag),
}
return features
class NEChunkParser(ChunkParserI):
"""
Expected input: list of pos-tagged words
"""
def __init__(self, train):
self._train(train)
def parse(self, tokens):
"""
Each token should be a pos-tagged word
"""
tagged = self._tagger.tag(tokens)
tree = self._tagged_to_parse(tagged)
return tree
def _train(self, corpus):
# Convert to tagged sequence
corpus = [self._parse_to_tagged(s) for s in corpus]
self._tagger = NEChunkParserTagger(train=corpus)
def _tagged_to_parse(self, tagged_tokens):
"""
Convert a list of tagged tokens to a chunk-parse tree.
"""
sent = Tree('S', [])
for (tok,tag) in tagged_tokens:
if tag == 'O':
sent.append(tok)
elif tag.startswith('B-'):
sent.append(Tree(tag[2:], [tok]))
elif tag.startswith('I-'):
if (sent and isinstance(sent[-1], Tree) and
sent[-1].label() == tag[2:]):
sent[-1].append(tok)
else:
sent.append(Tree(tag[2:], [tok]))
return sent
@staticmethod
def _parse_to_tagged(sent):
"""
Convert a chunk-parse tree to a list of tagged tokens.
"""
toks = []
for child in sent:
if isinstance(child, Tree):
if len(child) == 0:
print("Warning -- empty chunk in sentence")
continue
toks.append((child[0], 'B-%s' % child.label()))
for tok in child[1:]:
toks.append((tok, 'I-%s' % child.label()))
else:
toks.append((child, 'O'))
return toks
def shape(word):
if re.match('[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$', word, re.UNICODE):
return 'number'
elif re.match('\W+$', word, re.UNICODE):
return 'punct'
elif re.match('\w+$', word, re.UNICODE):
if word.istitle():
return 'upcase'
elif word.islower():
return 'downcase'
else:
return 'mixedcase'
else:
return 'other'
def simplify_pos(s):
if s.startswith('V'): return "V"
else: return s.split('-')[0]
def postag_tree(tree):
# Part-of-speech tagging.
words = tree.leaves()
tagged_words = ner_pipeline.part_of_speech_tagging(words)
tag_iter = (pos for (word, pos) in tagged_words)
newtree = Tree('S', [])
for child in tree:
if isinstance(child, Tree):
newtree.append(Tree(child.label(), []))
for subchild in child:
newtree[-1].append( (subchild, next(tag_iter)) )
else:
newtree.append( (child, next(tag_iter)) )
return newtree
# if __name__ == '__main__':
# training_corpus = ner_pipeline.load_annotated_raw_datum('raw_data.json')
# tree = nltk_tree_converter.corpus_to_tree(training_corpus)
# pos_tagged_tree = postag_tree(tree)
# pp(pos_tagged_tree)
|
|
import sys
import types
import inspect
import re
import optparse
from collections import defaultdict
from textwrap import wrap
from scriptine import misc, log
def parse_and_run_function(function, args=None, command_name=None,
add_dry_run_option=True, add_verbosity_option=True):
#TODO refactor me, I'm too long
if args is None:
args = sys.argv
required_args, optional_args = inspect_args(function)
func_doc = function.__doc__ or ''
params_doc = parse_rst_params(func_doc)
usage = 'usage: %prog '
if command_name:
usage += command_name.replace('_', '-') + ' '
usage += '[options] ' + ' '.join(required_args)
if func_doc:
first_paragraph = re.findall('(.*?)((\n[ \t]*\n)|$)', func_doc,
re.DOTALL)[0][0]
first_paragraph = ' '.join(l.strip() for l in
first_paragraph.split('\n'))
usage += '\n\n' + '\n'.join(wrap(first_paragraph, 60))
if set(required_args).intersection(list(params_doc.keys())):
usage += '\n\nRequired arguments:'
for arg in required_args:
usage += '\n%s' % arg
if arg in params_doc:
usage += ': %s' % params_doc[arg]
add_help_option = True
if getattr(function, 'no_help', False):
add_help_option = False
fetch_all = None
if hasattr(function, 'fetch_all'):
fetch_all = function.fetch_all
optional_args = [(arg, default) for arg, default in optional_args
if arg != fetch_all]
parser = optparse.OptionParser
if getattr(function, 'non_strict', False):
parser = NonStrictOptionParser
parser = parser(usage, add_help_option=add_help_option)
for arg_name, default in optional_args:
options = {}
if isinstance(default, bool):
if default:
options = {'action': 'store_false'}
else:
options = {'action': 'store_true'}
elif isinstance(default, int):
options = {'type': 'int'}
elif isinstance(default, float):
options = {'type': 'float'}
parser.add_option('--' + arg_name.replace('_', '-'),
help=params_doc.get(arg_name, None),
dest=arg_name, default=default, metavar=default, **options)
if add_dry_run_option:
parser.add_option('--dry-run', '-n', dest='dry_run', default=False,
action='store_true', help='don\'t actually do anything')
if getattr(function, 'no_verbosity', False):
add_verbosity_option = False
if add_verbosity_option:
parser.add_option('--verbose', '-v', dest='verbose',
action='count', help='be more verbose')
parser.add_option('--quite', '-q', dest='quite',
action='count', help='be more silent')
(options, args) = parser.parse_args(args)
if add_verbosity_option:
verbosity = (options.verbose or 0) - (options.quite or 0)
log.inc_log_level(verbosity)
if add_dry_run_option and options.dry_run:
misc.options.dry = True
log.inc_log_level(1)
log.warn('running in dry-mode. don\'t actually do anything')
args = args[1:]
if len(args) < len(required_args):
parser.error('number of arguments does not match')
kw = {}
for arg_name, _default in optional_args:
kw[arg_name] = getattr(options, arg_name)
if fetch_all:
kw[fetch_all] = args[len(required_args):]
return function(*args[:len(required_args)], **kw)
def no_help(cmd):
cmd.no_help = True
return cmd
def no_verbosity(cmd):
cmd.no_verbosity = True
return cmd
def non_strict(cmd):
cmd.non_strict = True
return cmd
def fetch_all(arg_name):
def _fetch_all(cmd):
cmd.fetch_all = arg_name
return cmd
return _fetch_all
def group(name):
def _group(cmd):
cmd.group = name
return cmd
return _group
class NonStrictOptionParser(optparse.OptionParser):
def _process_args(self, largs, rargs, values):
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
try:
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return
except optparse.BadOptionError:
largs.append(arg)
def inspect_args(function):
(args, _varargs, _varkw, defaults) = inspect.getargspec(function)
optional_args = []
if defaults is not None:
for default in defaults[::-1]:
optional_args.append((args.pop(), default))
optional_args.reverse()
return args, optional_args
def autocmds(namespace=None, args=None, command_suffix='_command',
add_dry_run_option=True, add_verbosity_option=True):
"""
Parse and run commands.
Will search ``namespace`` for functions that end with ``command_suffix``.
:param namespace: the namespace/module to search for commands
:param args: the arguments for the command parser. defaults to
:data:`sys.argv`
:param command_suffix: function name suffix that indicates that a
function is a command.
"""
if namespace is None:
namespace = inspect.currentframe().f_back.f_globals
elif type(namespace) is types.ModuleType:
namespace = namespace.__dict__
if args is None:
args = sys.argv
if len(args) < 2 or args[1] in ('-h', '--help'):
print_help(namespace, command_suffix)
return
command_name = args.pop(1).replace('-', '_')
function = namespace[command_name + command_suffix]
parse_and_run_function(function, args, command_name,
add_dry_run_option=add_dry_run_option,
add_verbosity_option=add_verbosity_option)
run = autocmds
def cmd(function, args=None):
if args is None:
args = sys.argv
parse_and_run_function(function, args, '',)
def print_help(namespace, command_suffix):
group_commands = defaultdict(list)
for func_name, func in namespace.items():
if func_name.endswith(command_suffix):
func = namespace[func_name]
group = getattr(func, 'group', None)
command_name = func_name[:-len(command_suffix)].replace('_', '-')
group_commands[group].append((command_name, func.__doc__))
if not group_commands:
print('no commands found in', sys.argv[0], file=sys.stderr)
return
usage = 'usage: %prog command [options]'
parser = optparse.OptionParser(usage)
parser.print_help(sys.stderr)
default_commands = group_commands.pop(None, None)
if default_commands:
print_commands(None, default_commands)
for group_name, commands in group_commands.items():
print_commands(group_name, commands)
def print_commands(group_name, commands):
if group_name:
print('\n%s commands:' % group_name.title(), file=sys.stderr)
else:
print('\nCommands:', file=sys.stderr)
cmd_len = max(len(cmd) for cmd, _ in commands)
for cmd, doc in commands:
if doc is not None:
doc = doc.strip().split('\n')[0]
else:
doc = ''
print((' %-' + str(cmd_len) + 's %s') % (cmd, doc), file=sys.stderr)
def parse_rst_params(doc):
"""
Parse a reStructuredText docstring and return a dictionary
with parameter names and descriptions.
>>> doc = '''
... :param foo: foo parameter
... foo parameter
...
... :param bar: bar parameter
... :param baz: baz parameter
... baz parameter
... baz parameter
... Some text.
... '''
>>> params = parse_rst_params(doc)
>>> params['foo']
'foo parameter foo parameter'
>>> params['bar']
'bar parameter'
>>> params['baz']
'baz parameter baz parameter baz parameter'
"""
param_re = re.compile(r"""^([ \t]*):param\
(?P<param>\w+):\
(?P<body>.*\n(\1[ \t]+\w.*\n)*)""",
re.MULTILINE|re.VERBOSE)
params = {}
for match in param_re.finditer(doc):
parts = match.groupdict()
body_lines = parts['body'].strip().split('\n')
params[parts['param']] = ' '.join(s.strip() for s in body_lines)
return params
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Tests for plugins."""
import os.path
import coverage
from coverage import env
from coverage.backward import StringIO
from coverage.control import Plugins
from coverage.misc import CoverageException
import coverage.plugin
from tests.coveragetest import CoverageTest
from tests.helpers import CheckUniqueFilenames
class FakeConfig(object):
"""A fake config for use in tests."""
def __init__(self, plugin, options):
self.plugin = plugin
self.options = options
self.asked_for = []
def get_plugin_options(self, module):
"""Just return the options for `module` if this is the right module."""
self.asked_for.append(module)
if module == self.plugin:
return self.options
else:
return {}
class LoadPluginsTest(CoverageTest):
"""Test Plugins.load_plugins directly."""
def test_implicit_boolean(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
config = FakeConfig("plugin1", {})
plugins = Plugins.load_plugins([], config)
self.assertFalse(plugins)
plugins = Plugins.load_plugins(["plugin1"], config)
self.assertTrue(plugins)
def test_importing_and_configuring(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
config = FakeConfig("plugin1", {'a': 'hello'})
plugins = list(Plugins.load_plugins(["plugin1"], config))
self.assertEqual(len(plugins), 1)
self.assertEqual(plugins[0].this_is, "me")
self.assertEqual(plugins[0].options, {'a': 'hello'})
self.assertEqual(config.asked_for, ['plugin1'])
def test_importing_and_configuring_more_than_one(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
self.make_file("plugin2.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
config = FakeConfig("plugin1", {'a': 'hello'})
plugins = list(Plugins.load_plugins(["plugin1", "plugin2"], config))
self.assertEqual(len(plugins), 2)
self.assertEqual(plugins[0].this_is, "me")
self.assertEqual(plugins[0].options, {'a': 'hello'})
self.assertEqual(plugins[1].options, {})
self.assertEqual(config.asked_for, ['plugin1', 'plugin2'])
# The order matters...
config = FakeConfig("plugin1", {'a': 'second'})
plugins = list(Plugins.load_plugins(["plugin2", "plugin1"], config))
self.assertEqual(len(plugins), 2)
self.assertEqual(plugins[0].options, {})
self.assertEqual(plugins[1].this_is, "me")
self.assertEqual(plugins[1].options, {'a': 'second'})
def test_cant_import(self):
with self.assertRaises(ImportError):
_ = Plugins.load_plugins(["plugin_not_there"], None)
def test_plugin_must_define_coverage_init(self):
self.make_file("no_plugin.py", """\
from coverage import CoveragePlugin
Nothing = 0
""")
msg_pat = "Plugin module 'no_plugin' didn't define a coverage_init function"
with self.assertRaisesRegex(CoverageException, msg_pat):
list(Plugins.load_plugins(["no_plugin"], None))
class PluginTest(CoverageTest):
"""Test plugins through the Coverage class."""
def test_plugin_imported(self):
# Prove that a plugin will be imported.
self.make_file("my_plugin.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(Plugin())
with open("evidence.out", "w") as f:
f.write("we are here!")
""")
self.assert_doesnt_exist("evidence.out")
cov = coverage.Coverage()
cov.set_option("run:plugins", ["my_plugin"])
cov.start()
cov.stop() # pragma: nested
with open("evidence.out") as f:
self.assertEqual(f.read(), "we are here!")
def test_missing_plugin_raises_import_error(self):
# Prove that a missing plugin will raise an ImportError.
with self.assertRaises(ImportError):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["does_not_exist_woijwoicweo"])
cov.start()
cov.stop()
def test_bad_plugin_isnt_hidden(self):
# Prove that a plugin with an error in it will raise the error.
self.make_file("plugin_over_zero.py", """\
1/0
""")
with self.assertRaises(ZeroDivisionError):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["plugin_over_zero"])
cov.start()
cov.stop()
def test_plugin_sys_info(self):
self.make_file("plugin_sys_info.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def sys_info(self):
return [("hello", "world")]
def coverage_init(reg, options):
reg.add_noop(Plugin())
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=["sys"])
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_sys_info"])
cov.load()
out_lines = debug_out.getvalue().splitlines()
expected_end = [
"-- sys: plugin_sys_info.Plugin -------------------------------",
" hello: world",
"-- end -------------------------------------------------------",
]
self.assertEqual(expected_end, out_lines[-len(expected_end):])
def test_plugin_with_no_sys_info(self):
self.make_file("plugin_no_sys_info.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(Plugin())
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=["sys"])
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_no_sys_info"])
cov.load()
out_lines = debug_out.getvalue().splitlines()
expected_end = [
"-- sys: plugin_no_sys_info.Plugin ----------------------------",
"-- end -------------------------------------------------------",
]
self.assertEqual(expected_end, out_lines[-len(expected_end):])
def test_local_files_are_importable(self):
self.make_file("importing_plugin.py", """\
from coverage import CoveragePlugin
import local_module
class MyPlugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(MyPlugin())
""")
self.make_file("local_module.py", "CONST = 1")
self.make_file(".coveragerc", """\
[run]
plugins = importing_plugin
""")
self.make_file("main_file.py", "print('MAIN')")
out = self.run_command("coverage run main_file.py")
self.assertEqual(out, "MAIN\n")
out = self.run_command("coverage html")
self.assertEqual(out, "")
class PluginWarningOnPyTracer(CoverageTest):
"""Test that we get a controlled exception with plugins on PyTracer."""
def test_exception_if_plugins_on_pytracer(self):
if env.C_TRACER:
self.skip("This test is only about PyTracer.")
self.make_file("simple.py", """a = 1""")
cov = coverage.Coverage()
cov.set_option("run:plugins", ["tests.plugin1"])
warnings = []
def capture_warning(msg):
"""A fake implementation of Coverage._warn, to capture warnings."""
warnings.append(msg)
cov._warn = capture_warning
self.start_import_stop(cov, "simple")
self.assertIn(
"Plugin file tracers (tests.plugin1.Plugin) aren't supported with PyTracer",
warnings
)
class FileTracerTest(CoverageTest):
"""Tests of plugins that implement file_tracer."""
def setUp(self):
super(FileTracerTest, self).setUp()
if not env.C_TRACER:
self.skip("Plugins are only supported with the C tracer.")
class GoodPluginTest(FileTracerTest):
"""Tests of plugin happy paths."""
def test_plugin1(self):
self.make_file("simple.py", """\
import try_xyz
a = 1
b = 2
""")
self.make_file("try_xyz.py", """\
c = 3
d = 4
""")
cov = coverage.Coverage()
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin1"])
# Import the Python file, executing it.
self.start_import_stop(cov, "simple")
_, statements, missing, _ = cov.analysis("simple.py")
self.assertEqual(statements, [1, 2, 3])
self.assertEqual(missing, [])
zzfile = os.path.abspath(os.path.join("/src", "try_ABC.zz"))
_, statements, _, _ = cov.analysis(zzfile)
self.assertEqual(statements, [105, 106, 107, 205, 206, 207])
def make_render_and_caller(self):
"""Make the render.py and caller.py files we need."""
# plugin2 emulates a dynamic tracing plugin: the caller's locals
# are examined to determine the source file and line number.
# The plugin is in tests/plugin2.py.
self.make_file("render.py", """\
def render(filename, linenum):
# This function emulates a template renderer. The plugin
# will examine the `filename` and `linenum` locals to
# determine the source file and line number.
fiddle_around = 1 # not used, just chaff.
return "[{0} @ {1}]".format(filename, linenum)
def helper(x):
# This function is here just to show that not all code in
# this file will be part of the dynamic tracing.
return x+1
""")
self.make_file("caller.py", """\
import sys
from render import helper, render
assert render("foo_7.html", 4) == "[foo_7.html @ 4]"
# Render foo_7.html again to try the CheckUniqueFilenames asserts.
render("foo_7.html", 4)
assert helper(42) == 43
assert render("bar_4.html", 2) == "[bar_4.html @ 2]"
assert helper(76) == 77
# quux_5.html will be omitted from the results.
assert render("quux_5.html", 3) == "[quux_5.html @ 3]"
# In Python 2, either kind of string should be OK.
if sys.version_info[0] == 2:
assert render(u"uni_3.html", 2) == "[uni_3.html @ 2]"
""")
# will try to read the actual source files, so make some
# source files.
def lines(n):
"""Make a string with n lines of text."""
return "".join("line %d\n" % i for i in range(n))
self.make_file("bar_4.html", lines(4))
self.make_file("foo_7.html", lines(7))
def test_plugin2(self):
self.make_render_and_caller()
cov = coverage.Coverage(omit=["*quux*"])
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
# The way plugin2 works, a file named foo_7.html will be claimed to
# have 7 lines in it. If render() was called with line number 4,
# then the plugin will claim that lines 4 and 5 were executed.
_, statements, missing, _ = cov.analysis("foo_7.html")
self.assertEqual(statements, [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(missing, [1, 2, 3, 6, 7])
self.assertIn("foo_7.html", cov.data.line_counts())
_, statements, missing, _ = cov.analysis("bar_4.html")
self.assertEqual(statements, [1, 2, 3, 4])
self.assertEqual(missing, [1, 4])
self.assertIn("bar_4.html", cov.data.line_counts())
self.assertNotIn("quux_5.html", cov.data.line_counts())
if env.PY2:
_, statements, missing, _ = cov.analysis("uni_3.html")
self.assertEqual(statements, [1, 2, 3])
self.assertEqual(missing, [1])
self.assertIn("uni_3.html", cov.data.line_counts())
def test_plugin2_with_branch(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
# The way plugin2 works, a file named foo_7.html will be claimed to
# have 7 lines in it. If render() was called with line number 4,
# then the plugin will claim that lines 4 and 5 were executed.
analysis = cov._analyze("foo_7.html")
self.assertEqual(analysis.statements, set([1, 2, 3, 4, 5, 6, 7]))
# Plugins don't do branch coverage yet.
self.assertEqual(analysis.has_arcs(), True)
self.assertEqual(analysis.arc_possibilities(), [])
self.assertEqual(analysis.missing, set([1, 2, 3, 6, 7]))
def test_plugin2_with_text_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
repout = StringIO()
total = cov.report(file=repout, include=["*.html"], omit=["uni*.html"])
report = repout.getvalue().splitlines()
expected = [
'Name Stmts Miss Branch BrPart Cover Missing',
'--------------------------------------------------------',
'bar_4.html 4 2 0 0 50% 1, 4',
'foo_7.html 7 5 0 0 29% 1-3, 6-7',
'--------------------------------------------------------',
'TOTAL 11 7 0 0 36% ',
]
self.assertEqual(report, expected)
self.assertAlmostEqual(total, 36.36, places=2)
def test_plugin2_with_html_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
total = cov.html_report(include=["*.html"], omit=["uni*.html"])
self.assertAlmostEqual(total, 36.36, places=2)
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/bar_4_html.html")
self.assert_exists("htmlcov/foo_7_html.html")
def test_plugin2_with_xml_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
total = cov.xml_report(include=["*.html"], omit=["uni*.html"])
self.assertAlmostEqual(total, 36.36, places=2)
with open("coverage.xml") as fxml:
xml = fxml.read()
for snip in [
'filename="bar_4.html" line-rate="0.5" name="bar_4.html"',
'filename="foo_7.html" line-rate="0.2857" name="foo_7.html"',
]:
self.assertIn(snip, xml)
def test_defer_to_python(self):
# A plugin that measures, but then wants built-in python reporting.
self.make_file("fairly_odd_plugin.py", """\
# A plugin that claims all the odd lines are executed, and none of
# the even lines, and then punts reporting off to the built-in
# Python reporting.
import coverage.plugin
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
return OddTracer(filename)
def file_reporter(self, filename):
return "python"
class OddTracer(coverage.plugin.FileTracer):
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return self.filename
def line_number_range(self, frame):
lineno = frame.f_lineno
if lineno % 2:
return (lineno, lineno)
else:
return (-1, -1)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.make_file("unsuspecting.py", """\
a = 1
b = 2
c = 3
d = 4
e = 5
f = 6
""")
cov = coverage.Coverage(include=["unsuspecting.py"])
cov.set_option("run:plugins", ["fairly_odd_plugin"])
self.start_import_stop(cov, "unsuspecting")
repout = StringIO()
total = cov.report(file=repout)
report = repout.getvalue().splitlines()
expected = [
'Name Stmts Miss Cover Missing',
'-----------------------------------------------',
'unsuspecting.py 6 3 50% 2, 4, 6',
]
self.assertEqual(report, expected)
self.assertEqual(total, 50)
class BadPluginTest(FileTracerTest):
"""Test error handling around plugins."""
def run_plugin(self, module_name):
"""Run a plugin with the given module_name.
Uses a few fixed Python files.
Returns the Coverage object.
"""
self.make_file("simple.py", """\
import other, another
a = other.f(2)
b = other.f(3)
c = another.g(4)
d = another.g(5)
""")
# The names of these files are important: some plugins apply themselves
# to "*other.py".
self.make_file("other.py", """\
def f(x):
return x+1
""")
self.make_file("another.py", """\
def g(x):
return x-1
""")
cov = coverage.Coverage()
cov.set_option("run:plugins", [module_name])
self.start_import_stop(cov, "simple")
return cov
def run_bad_plugin(self, module_name, plugin_name, our_error=True, excmsg=None):
"""Run a file, and see that the plugin failed.
`module_name` and `plugin_name` is the module and name of the plugin to
use.
`our_error` is True if the error reported to the user will be an
explicit error in our test code, marked with an '# Oh noes!' comment.
`excmsg`, if provided, is text that should appear in the stderr.
The plugin will be disabled, and we check that a warning is output
explaining why.
"""
self.run_plugin(module_name)
stderr = self.stderr()
print(stderr) # for diagnosing test failures.
if our_error:
errors = stderr.count("# Oh noes!")
# The exception we're causing should only appear once.
self.assertEqual(errors, 1)
# There should be a warning explaining what's happening, but only one.
# The message can be in two forms:
# Disabling plugin '...' due to previous exception
# or:
# Disabling plugin '...' due to an exception:
msg = "Disabling plugin '%s.%s' due to " % (module_name, plugin_name)
warnings = stderr.count(msg)
self.assertEqual(warnings, 1)
if excmsg:
self.assertIn(excmsg, stderr)
def test_file_tracer_has_no_file_tracer_method(self):
self.make_file("bad_plugin.py", """\
class Plugin(object):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_file_tracer_has_inherited_sourcefilename_method(self):
self.make_file("bad_plugin.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False,
excmsg="Class 'bad_plugin.FileTracer' needs to implement source_filename()",
)
def test_plugin_has_inherited_filereporter_method(self):
self.make_file("bad_plugin.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
def source_filename(self):
return "foo.xxx"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
cov = self.run_plugin("bad_plugin")
expected_msg = "Plugin 'bad_plugin.Plugin' needs to implement file_reporter()"
with self.assertRaisesRegex(NotImplementedError, expected_msg):
cov.report()
def test_file_tracer_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
17/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_file_tracer_returns_wrong(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return 3.14159
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_has_dynamic_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
23/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
42/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_returns_wrong(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return 17.3
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_dynamic_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
101/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_line_number_range_returns_non_tuple(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return 42.23
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_line_number_range_returns_triple(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return (1, 2, 3)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_line_number_range_returns_pair_of_strings(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return ("5", "7")
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import mock
import time
import unittest
import logging
import functools
from nose.tools import * # noqa: F403
import pytest
from framework.auth.core import Auth
from website import settings
import website.search.search as search
from website.search import elastic_search
from website.search.util import build_query
from website.search_migration.migrate import migrate
from osf.models import (
Retraction,
NodeLicense,
OSFGroup,
Tag,
Preprint,
QuickFilesNode,
)
from addons.wiki.models import WikiPage
from addons.osfstorage.models import OsfStorageFile
from scripts.populate_institutions import main as populate_institutions
from osf_tests import factories
from tests.base import OsfTestCase
from tests.test_features import requires_search
from tests.utils import run_celery_tasks
TEST_INDEX = 'test'
def query(term, raw=False):
results = search.search(build_query(term), index=elastic_search.INDEX, raw=raw)
return results
def query_collections(name):
term = 'category:collectionSubmission AND "{}"'.format(name)
return query(term, raw=True)
def query_user(name):
term = 'category:user AND "{}"'.format(name)
return query(term)
def query_file(name):
term = 'category:file AND "{}"'.format(name)
return query(term)
def query_tag_file(name):
term = 'category:file AND (tags:u"{}")'.format(name)
return query(term)
def retry_assertion(interval=0.3, retries=3):
def test_wrapper(func):
t_interval = interval
t_retries = retries
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
func(*args, **kwargs)
except AssertionError as e:
if retries:
time.sleep(t_interval)
retry_assertion(interval=t_interval, retries=t_retries - 1)(func)(*args, **kwargs)
else:
raise e
return wrapped
return test_wrapper
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestCollectionsSearch(OsfTestCase):
def setUp(self):
super(TestCollectionsSearch, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='Salif Keita')
self.node_private = factories.NodeFactory(creator=self.user, title='Salif Keita: Madan', is_public=False)
self.node_public = factories.NodeFactory(creator=self.user, title='Salif Keita: Yamore', is_public=True)
self.node_one = factories.NodeFactory(creator=self.user, title='Salif Keita: Mandjou', is_public=True)
self.node_two = factories.NodeFactory(creator=self.user, title='Salif Keita: Tekere', is_public=True)
self.reg_private = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=False, archive=True)
self.reg_public = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=True, archive=True)
self.reg_one = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=True, archive=True)
self.provider = factories.CollectionProviderFactory()
self.reg_provider = factories.RegistrationProviderFactory()
self.collection_one = factories.CollectionFactory(creator=self.user, is_public=True, provider=self.provider)
self.collection_public = factories.CollectionFactory(creator=self.user, is_public=True, provider=self.provider)
self.collection_private = factories.CollectionFactory(creator=self.user, is_public=False, provider=self.provider)
self.reg_collection = factories.CollectionFactory(creator=self.user, provider=self.reg_provider, is_public=True)
self.reg_collection_private = factories.CollectionFactory(creator=self.user, provider=self.reg_provider, is_public=False)
def test_only_public_collections_submissions_are_searchable(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_private, self.user)
self.reg_collection.collect_object(self.reg_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
assert_false(self.node_one.is_collected)
assert_false(self.node_public.is_collected)
self.collection_one.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_public.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
self.collection_private.collect_object(self.node_two, self.user)
self.reg_collection_private.collect_object(self.reg_one, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 3)
def test_index_on_submission_privacy_changes(self):
# test_submissions_turned_private_are_deleted_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_one.collect_object(self.node_one, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
with run_celery_tasks():
self.node_one.is_public = False
self.node_one.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_turned_public_are_added_to_index
self.collection_public.collect_object(self.node_private, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.node_private.is_public = True
self.node_private.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 1)
def test_index_on_collection_privacy_changes(self):
# test_submissions_of_collection_turned_private_are_removed_from_index
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
with run_celery_tasks():
self.collection_public.is_public = False
self.collection_public.save()
self.reg_collection.is_public = False
self.reg_collection.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
# test_submissions_of_collection_turned_public_are_added_to_index
self.collection_private.collect_object(self.node_one, self.user)
self.collection_private.collect_object(self.node_two, self.user)
self.collection_private.collect_object(self.node_public, self.user)
self.reg_collection_private.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.node_two.is_collected)
assert_true(self.node_public.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.collection_private.is_public = True
self.collection_private.save()
self.reg_collection.is_public = True
self.reg_collection.save()
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
def test_collection_submissions_are_removed_from_index_on_delete(self):
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
self.collection_public.collect_object(self.node_one, self.user)
self.collection_public.collect_object(self.node_two, self.user)
self.collection_public.collect_object(self.node_public, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 4)
self.collection_public.delete()
self.reg_collection.delete()
assert_true(self.collection_public.deleted)
assert_true(self.reg_collection.deleted)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_removed_submission_are_removed_from_index(self):
self.collection_public.collect_object(self.node_one, self.user)
self.reg_collection.collect_object(self.reg_public, self.user)
assert_true(self.node_one.is_collected)
assert_true(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 2)
self.collection_public.remove_object(self.node_one)
self.reg_collection.remove_object(self.reg_public)
assert_false(self.node_one.is_collected)
assert_false(self.reg_public.is_collected)
docs = query_collections('Salif Keita')['results']
assert_equal(len(docs), 0)
def test_collection_submission_doc_structure(self):
self.collection_public.collect_object(self.node_one, self.user)
docs = query_collections('Keita')['results']
assert_equal(docs[0]['_source']['title'], self.node_one.title)
with run_celery_tasks():
self.node_one.title = 'Keita Royal Family of Mali'
self.node_one.save()
docs = query_collections('Keita')['results']
assert_equal(docs[0]['_source']['title'], self.node_one.title)
assert_equal(docs[0]['_source']['abstract'], self.node_one.description)
assert_equal(docs[0]['_source']['contributors'][0]['url'], self.user.url)
assert_equal(docs[0]['_source']['contributors'][0]['fullname'], self.user.fullname)
assert_equal(docs[0]['_source']['url'], self.node_one.url)
assert_equal(docs[0]['_source']['id'], '{}-{}'.format(self.node_one._id,
self.node_one.collecting_metadata_list[0].collection._id))
assert_equal(docs[0]['_source']['category'], 'collectionSubmission')
def test_search_updated_after_id_change(self):
self.provider.primary_collection.collect_object(self.node_one, self.node_one.creator)
with run_celery_tasks():
self.node_one.save()
term = f'provider:{self.provider._id}'
docs = search.search(build_query(term), index=elastic_search.INDEX, raw=True)
assert_equal(len(docs['results']), 1)
self.provider._id = 'new_id'
self.provider.save()
docs = query(f'provider:new_id', raw=True)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestUserUpdate(OsfTestCase):
def setUp(self):
super(TestUserUpdate, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='David Bowie')
def test_new_user(self):
# Verify that user has been added to Elastic Search
docs = query_user(self.user.fullname)['results']
assert_equal(len(docs), 1)
def test_new_user_unconfirmed(self):
user = factories.UnconfirmedUserFactory()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 0)
token = user.get_confirmation_token(user.username)
user.confirm_email(token)
user.save()
docs = query_user(user.fullname)['results']
assert_equal(len(docs), 1)
@retry_assertion
def test_change_name(self):
# Add a user, change her name, and verify that only the new name is
# found in search.
user = factories.UserFactory(fullname='Barry Mitchell')
fullname_original = user.fullname
user.fullname = user.fullname[::-1]
user.save()
docs_original = query_user(fullname_original)['results']
assert_equal(len(docs_original), 0)
docs_current = query_user(user.fullname)['results']
assert_equal(len(docs_current), 1)
def test_disabled_user(self):
# Test that disabled users are not in search index
user = factories.UserFactory(fullname='Bettie Page')
user.save()
# Ensure user is in search index
assert_equal(len(query_user(user.fullname)['results']), 1)
# Disable the user
user.is_disabled = True
user.save()
# Ensure user is not in search index
assert_equal(len(query_user(user.fullname)['results']), 0)
@pytest.mark.enable_quickfiles_creation
def test_merged_user(self):
user = factories.UserFactory(fullname='Annie Lennox')
merged_user = factories.UserFactory(fullname='Lisa Stansfield')
user.save()
merged_user.save()
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 1)
user.merge_user(merged_user)
assert_equal(len(query_user(user.fullname)['results']), 1)
assert_equal(len(query_user(merged_user.fullname)['results']), 0)
def test_employment(self):
user = factories.UserFactory(fullname='Helga Finn')
user.save()
institution = 'Finn\'s Fine Filers'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.jobs.append({
'institution': institution,
'title': 'The Big Finn',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_education(self):
user = factories.UserFactory(fullname='Henry Johnson')
user.save()
institution = 'Henry\'s Amazing School!!!'
docs = query_user(institution)['results']
assert_equal(len(docs), 0)
user.schools.append({
'institution': institution,
'degree': 'failed all classes',
})
user.save()
docs = query_user(institution)['results']
assert_equal(len(docs), 1)
def test_name_fields(self):
names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great']
user = factories.UserFactory(fullname=names[0])
user.given_name = names[1]
user.middle_names = names[2]
user.family_name = names[3]
user.suffix = names[4]
user.save()
docs = [query_user(name)['results'] for name in names]
assert_equal(sum(map(len, docs)), len(docs)) # 1 result each
assert_true(all([user._id == doc[0]['id'] for doc in docs]))
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestProject(OsfTestCase):
def setUp(self):
super(TestProject, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.project = factories.ProjectFactory(title='Red Special', creator=self.user)
def test_new_project_private(self):
# Verify that a private project is not present in Elastic Search.
docs = query(self.project.title)['results']
assert_equal(len(docs), 0)
def test_make_public(self):
# Make project public, and verify that it is present in Elastic
# Search.
with run_celery_tasks():
self.project.set_privacy('public')
docs = query(self.project.title)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestOSFGroup(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestOSFGroup, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.user_two = factories.UserFactory(fullname='Grapes McGee')
self.group = OSFGroup(
name='Cornbread',
creator=self.user,
)
self.group.save()
self.project = factories.ProjectFactory(is_public=True, creator=self.user, title='Biscuits')
self.project.save()
def test_create_osf_group(self):
title = 'Butter'
group = OSFGroup(name=title, creator=self.user)
group.save()
docs = query(title)['results']
assert_equal(len(docs), 1)
def test_set_group_name(self):
title = 'Eggs'
self.group.set_group_name(title)
self.group.save()
docs = query(title)['results']
assert_equal(len(docs), 1)
docs = query('Cornbread')['results']
assert_equal(len(docs), 0)
def test_add_member(self):
self.group.make_member(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 1)
self.group.make_manager(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 1)
self.group.remove_member(self.user_two)
docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results']
assert_equal(len(docs), 0)
def test_connect_to_node(self):
self.project.add_osf_group(self.group)
docs = query('category:project AND "{}"'.format(self.group.name))['results']
assert_equal(len(docs), 1)
self.project.remove_osf_group(self.group)
docs = query('category:project AND "{}"'.format(self.group.name))['results']
assert_equal(len(docs), 0)
def test_remove_group(self):
group_name = self.group.name
self.project.add_osf_group(self.group)
docs = query('category:project AND "{}"'.format(group_name))['results']
assert_equal(len(docs), 1)
self.group.remove_group()
docs = query('category:project AND "{}"'.format(group_name))['results']
assert_equal(len(docs), 0)
docs = query(group_name)['results']
assert_equal(len(docs), 0)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestPreprint(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestPreprint, self).setUp()
search.delete_index(elastic_search.INDEX)
search.create_index(elastic_search.INDEX)
self.user = factories.UserFactory(fullname='John Deacon')
self.preprint = Preprint(
title='Red Special',
description='We are the champions',
creator=self.user,
provider=factories.PreprintProviderFactory()
)
self.preprint.save()
self.file = OsfStorageFile.create(
target=self.preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
self.published_preprint = factories.PreprintFactory(
creator=self.user,
title='My Fairy King',
description='Under pressure',
)
def test_new_preprint_unsubmitted(self):
# Verify that an unsubmitted preprint is not present in Elastic Search.
title = 'Apple'
self.preprint.title = title
self.preprint.save()
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_new_preprint_unpublished(self):
# Verify that an unpublished preprint is not present in Elastic Search.
title = 'Banana'
self.preprint = factories.PreprintFactory(creator=self.user, is_published=False, title=title)
assert self.preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_unsubmitted_preprint_primary_file(self):
# Unpublished preprint's primary_file not showing up in Elastic Search
title = 'Cantaloupe'
self.preprint.title = title
self.preprint.set_primary_file(self.file, auth=Auth(self.user), save=True)
assert self.preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 0)
def test_publish_preprint(self):
title = 'Date'
self.preprint = factories.PreprintFactory(creator=self.user, is_published=False, title=title)
self.preprint.set_published(True, auth=Auth(self.preprint.creator), save=True)
assert self.preprint.title == title
docs = query(title)['results']
# Both preprint and primary_file showing up in Elastic
assert_equal(len(docs), 2)
def test_preprint_title_change(self):
title_original = self.published_preprint.title
new_title = 'New preprint title'
self.published_preprint.set_title(new_title, auth=Auth(self.user), save=True)
docs = query('category:preprint AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:preprint AND ' + new_title)['results']
assert_equal(len(docs), 1)
def test_preprint_description_change(self):
description_original = self.published_preprint.description
new_abstract = 'My preprint abstract'
self.published_preprint.set_description(new_abstract, auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
docs = query('category:preprint AND ' + description_original)['results']
assert_equal(len(docs), 0)
docs = query('category:preprint AND ' + new_abstract)['results']
assert_equal(len(docs), 1)
def test_set_preprint_private(self):
# Not currently an option for users, but can be used for spam
self.published_preprint.set_privacy('private', auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
# Both preprint and primary_file showing up in Elastic
assert_equal(len(docs), 0)
def test_set_primary_file(self):
# Only primary_file should be in index, if primary_file is changed, other files are removed from index.
self.file = OsfStorageFile.create(
target=self.published_preprint,
path='/panda.txt',
name='panda.txt',
materialized_path='/panda.txt')
self.file.save()
self.published_preprint.set_primary_file(self.file, auth=Auth(self.user), save=True)
docs = query(self.published_preprint.title)['results']
assert_equal(len(docs), 2)
assert_equal(docs[1]['name'], self.file.name)
def test_set_license(self):
license_details = {
'id': 'NONE',
'year': '2015',
'copyrightHolders': ['Iron Man']
}
title = 'Elderberry'
self.published_preprint.title = title
self.published_preprint.set_preprint_license(license_details, Auth(self.user), save=True)
assert self.published_preprint.title == title
docs = query(title)['results']
assert_equal(len(docs), 2)
assert_equal(docs[0]['license']['copyright_holders'][0], 'Iron Man')
assert_equal(docs[0]['license']['name'], 'No license')
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
self.published_preprint.remove_tag(tag, Auth(self.user), save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = factories.UserFactory(fullname='Adam Lambert')
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
# with run_celery_tasks():
self.published_preprint.add_contributor(user2, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2, save=True)
self.published_preprint.remove_contributor(user2, Auth(self.user))
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2)
self.published_preprint.set_visible(user2, False, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
self.published_preprint.set_visible(user2, True, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_move_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.published_preprint.add_contributor(user2, save=True)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
docs[0]['contributors'][0]['fullname'] == self.user.fullname
docs[0]['contributors'][1]['fullname'] == user2.fullname
self.published_preprint.move_contributor(user2, Auth(self.user), 0)
docs = query('category:preprint AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
docs[0]['contributors'][0]['fullname'] == user2.fullname
docs[0]['contributors'][1]['fullname'] == self.user.fullname
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.published_preprint.add_tag(tag, Auth(self.user), save=True)
docs = query(self.published_preprint.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestNodeSearch(OsfTestCase):
def setUp(self):
super(TestNodeSearch, self).setUp()
with run_celery_tasks():
self.node = factories.ProjectFactory(is_public=True, title='node')
self.public_child = factories.ProjectFactory(parent=self.node, is_public=True, title='public_child')
self.private_child = factories.ProjectFactory(parent=self.node, title='private_child')
self.public_subchild = factories.ProjectFactory(parent=self.private_child, is_public=True)
self.node.node_license = factories.NodeLicenseRecordFactory()
self.node.save()
self.query = 'category:project & category:component'
@retry_assertion()
def test_node_license_added_to_search(self):
docs = query(self.query)['results']
node = [d for d in docs if d['title'] == self.node.title][0]
assert_in('license', node)
assert_equal(node['license']['id'], self.node.node_license.license_id)
@unittest.skip('Elasticsearch latency seems to be causing theses tests to fail randomly.')
@retry_assertion(retries=10)
def test_node_license_propogates_to_children(self):
docs = query(self.query)['results']
child = [d for d in docs if d['title'] == self.public_child.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
child = [d for d in docs if d['title'] == self.public_subchild.title][0]
assert_in('license', child)
assert_equal(child['license'].get('id'), self.node.node_license.license_id)
@unittest.skip('Elasticsearch latency seems to be causing theses tests to fail randomly.')
@retry_assertion(retries=10)
def test_node_license_updates_correctly(self):
other_license = NodeLicense.objects.get(name='MIT License')
new_license = factories.NodeLicenseRecordFactory(node_license=other_license)
self.node.node_license = new_license
self.node.save()
docs = query(self.query)['results']
for doc in docs:
assert_equal(doc['license'].get('id'), new_license.license_id)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestRegistrationRetractions(OsfTestCase):
def setUp(self):
super(TestRegistrationRetractions, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.registration = factories.RegistrationFactory(project=self.project, is_public=True)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_is_searchable(self):
self.registration.retract_registration(self.user)
self.registration.retraction.state = Retraction.APPROVED
self.registration.retraction.save()
self.registration.save()
self.registration.retraction._on_complete(self.user)
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_pending_retraction_wiki_content_is_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.registration, key, value, self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
with run_celery_tasks():
self.registration.save()
self.registration.reload()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False))
def test_retraction_wiki_content_is_not_searchable(self):
# Add unique string to wiki
wiki_content = {'home': 'public retraction test'}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.registration, key, value, self.consolidate_auth)
# Query and ensure unique string shows up
docs = query(value)['results']
assert_equal(len(docs), 1)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
# Retract registration
self.registration.retract_registration(self.user, '')
self.registration.retraction.state = Retraction.APPROVED
with run_celery_tasks():
self.registration.retraction.save()
self.registration.save()
self.registration.update_search()
# Query and ensure unique string in wiki doesn't show up
docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results']
assert_equal(len(docs), 0)
# Query and ensure registration does show up
docs = query('category:registration AND ' + self.title)['results']
assert_equal(len(docs), 1)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestPublicNodes(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestPublicNodes, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.title = 'Red Special'
self.consolidate_auth = Auth(user=self.user)
self.project = factories.ProjectFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.component = factories.NodeFactory(
parent=self.project,
description='',
title=self.title,
creator=self.user,
is_public=True
)
self.registration = factories.RegistrationFactory(
title=self.title,
description='',
creator=self.user,
is_public=True,
)
self.registration.archive_job.target_addons.clear()
self.registration.archive_job.status = 'SUCCESS'
self.registration.archive_job.save()
def test_make_private(self):
# Make project public, then private, and verify that it is not present
# in search.
with run_celery_tasks():
self.project.set_privacy('private')
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.component.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_search_node_partial(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Blue')['results']
assert_equal(len(find), 1)
def test_search_node_partial_with_sep(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Express')['results']
assert_equal(len(find), 1)
def test_search_node_not_name(self):
self.project.set_title('Blue Rider-Express', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
find = query('Green Flyer-Slow')['results']
assert_equal(len(find), 0)
def test_public_parent_title(self):
self.project.set_title('hello & world', self.consolidate_auth)
with run_celery_tasks():
self.project.save()
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_equal(docs[0]['parent_title'], 'hello & world')
assert_true(docs[0]['parent_url'])
def test_make_parent_private(self):
# Make parent of component, public, then private, and verify that the
# component still appears but doesn't link to the parent in search.
with run_celery_tasks():
self.project.set_privacy('private')
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 1)
assert_false(docs[0]['parent_title'])
assert_false(docs[0]['parent_url'])
def test_delete_project(self):
with run_celery_tasks():
self.component.remove_node(self.consolidate_auth)
docs = query('category:component AND ' + self.title)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.remove_node(self.consolidate_auth)
docs = query('category:project AND ' + self.title)['results']
assert_equal(len(docs), 0)
def test_change_title(self):
title_original = self.project.title
with run_celery_tasks():
self.project.set_title(
'Blue Ordinary', self.consolidate_auth, save=True
)
docs = query('category:project AND ' + title_original)['results']
assert_equal(len(docs), 0)
docs = query('category:project AND ' + self.project.title)['results']
assert_equal(len(docs), 1)
def test_add_tags(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
with run_celery_tasks():
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
self.project.add_tag(tag, self.consolidate_auth, save=True)
for tag in tags:
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 1)
def test_remove_tag(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
self.project.remove_tag(tag, self.consolidate_auth, save=True)
docs = query('tags:"{}"'.format(tag))['results']
assert_equal(len(docs), 0)
def test_update_wiki(self):
"""Add text to a wiki page, then verify that project is found when
searching for wiki text.
"""
wiki_content = {
'home': 'Hammer to fall',
'swag': '#YOLO'
}
for key, value in wiki_content.items():
docs = query(value)['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
WikiPage.objects.create_for_node(self.project, key, value, self.consolidate_auth)
docs = query(value)['results']
assert_equal(len(docs), 1)
def test_clear_wiki(self):
# Add wiki text to page, then delete, then verify that project is not
# found when searching for wiki text.
wiki_content = 'Hammer to fall'
wp = WikiPage.objects.create_for_node(self.project, 'home', wiki_content, self.consolidate_auth)
with run_celery_tasks():
wp.update(self.user, '')
docs = query(wiki_content)['results']
assert_equal(len(docs), 0)
def test_add_contributor(self):
# Add a contributor, then verify that project is found when searching
# for contributor.
user2 = factories.UserFactory(fullname='Adam Lambert')
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.add_contributor(user2, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_remove_contributor(self):
# Add and remove a contributor, then verify that project is not found
# when searching for contributor.
user2 = factories.UserFactory(fullname='Brian May')
self.project.add_contributor(user2, save=True)
self.project.remove_contributor(user2, self.consolidate_auth)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
def test_hide_contributor(self):
user2 = factories.UserFactory(fullname='Brian May')
self.project.add_contributor(user2)
with run_celery_tasks():
self.project.set_visible(user2, False, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 0)
with run_celery_tasks():
self.project.set_visible(user2, True, save=True)
docs = query('category:project AND "{}"'.format(user2.fullname))['results']
assert_equal(len(docs), 1)
def test_wrong_order_search(self):
title_parts = self.title.split(' ')
title_parts.reverse()
title_search = ' '.join(title_parts)
docs = query(title_search)['results']
assert_equal(len(docs), 3)
def test_tag_aggregation(self):
tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family']
with run_celery_tasks():
for tag in tags:
self.project.add_tag(tag, self.consolidate_auth, save=True)
docs = query(self.title)['tags']
assert len(docs) == 3
for doc in docs:
assert doc['key'] in tags
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestAddContributor(OsfTestCase):
# Tests of the search.search_contributor method
def setUp(self):
self.name1 = 'Roger1 Taylor1'
self.name2 = 'John2 Deacon2'
self.name3 = u'j\xc3\xb3ebert3 Smith3'
self.name4 = u'B\xc3\xb3bbert4 Jones4'
with run_celery_tasks():
super(TestAddContributor, self).setUp()
self.user = factories.UserFactory(fullname=self.name1)
self.user3 = factories.UserFactory(fullname=self.name3)
def test_unreg_users_dont_show_in_search(self):
unreg = factories.UnregUserFactory()
contribs = search.search_contributor(unreg.fullname)
assert_equal(len(contribs['users']), 0)
def test_unreg_users_do_show_on_projects(self):
with run_celery_tasks():
unreg = factories.UnregUserFactory(fullname='Robert Paulson')
self.project = factories.ProjectFactory(
title='Glamour Rock',
creator=unreg,
is_public=True,
)
results = query(unreg.fullname)['results']
assert_equal(len(results), 1)
def test_search_fullname(self):
# Searching for full name yields exactly one result.
contribs = search.search_contributor(self.name1)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2)
assert_equal(len(contribs['users']), 0)
def test_search_firstname(self):
# Searching for first name yields exactly one result.
contribs = search.search_contributor(self.name1.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial(self):
# Searching for part of first name yields exactly one
# result.
contribs = search.search_contributor(self.name1.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name2.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_fullname_special_character(self):
# Searching for a fullname with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3)
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4)
assert_equal(len(contribs['users']), 0)
def test_search_firstname_special_charcter(self):
# Searching for a first name with a special character yields
# exactly one result.
contribs = search.search_contributor(self.name3.split(' ')[0])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0])
assert_equal(len(contribs['users']), 0)
def test_search_partial_special_character(self):
# Searching for a partial name with a special character yields
# exctly one result.
contribs = search.search_contributor(self.name3.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 1)
contribs = search.search_contributor(self.name4.split(' ')[0][:-1])
assert_equal(len(contribs['users']), 0)
def test_search_profile(self):
orcid = '123456'
user = factories.UserFactory()
user.social['orcid'] = orcid
user.save()
contribs = search.search_contributor(orcid)
assert_equal(len(contribs['users']), 1)
assert_equal(len(contribs['users'][0]['social']), 1)
assert_equal(contribs['users'][0]['social']['orcid'], user.social_links['orcid'])
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestProjectSearchResults(OsfTestCase):
def setUp(self):
self.singular = 'Spanish Inquisition'
self.plural = 'Spanish Inquisitions'
self.possessive = 'Spanish\'s Inquisition'
with run_celery_tasks():
super(TestProjectSearchResults, self).setUp()
self.user = factories.UserFactory(fullname='Doug Bogie')
self.project_singular = factories.ProjectFactory(
title=self.singular,
creator=self.user,
is_public=True,
)
self.project_plural = factories.ProjectFactory(
title=self.plural,
creator=self.user,
is_public=True,
)
self.project_possessive = factories.ProjectFactory(
title=self.possessive,
creator=self.user,
is_public=True,
)
self.project_unrelated = factories.ProjectFactory(
title='Cardinal Richelieu',
creator=self.user,
is_public=True,
)
def test_singular_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
time.sleep(1)
results = query(self.singular)['results']
assert_equal(len(results), 3)
def test_plural_query(self):
# Verify searching for singular term includes singular,
# possessive and plural versions in results.
results = query(self.plural)['results']
assert_equal(len(results), 3)
def test_possessive_query(self):
# Verify searching for possessive term includes singular,
# possessive and plural versions in results.
results = query(self.possessive)['results']
assert_equal(len(results), 3)
def job(**kwargs):
keys = [
'title',
'institution',
'department',
'location',
'startMonth',
'startYear',
'endMonth',
'endYear',
'ongoing',
]
job = {}
for key in keys:
if key[-5:] == 'Month':
job[key] = kwargs.get(key, 'December')
elif key[-4:] == 'Year':
job[key] = kwargs.get(key, '2000')
else:
job[key] = kwargs.get(key, 'test_{}'.format(key))
return job
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestUserSearchResults(OsfTestCase):
def setUp(self):
with run_celery_tasks():
super(TestUserSearchResults, self).setUp()
self.user_one = factories.UserFactory(jobs=[job(institution='Oxford'),
job(institution='Star Fleet')],
fullname='Date Soong')
self.user_two = factories.UserFactory(jobs=[job(institution='Grapes la Picard'),
job(institution='Star Fleet')],
fullname='Jean-Luc Picard')
self.user_three = factories.UserFactory(jobs=[job(institution='Star Fleet'),
job(institution='Federation Medical')],
fullname='Beverly Crusher')
self.user_four = factories.UserFactory(jobs=[job(institution='Star Fleet')],
fullname='William Riker')
self.user_five = factories.UserFactory(jobs=[job(institution='Traveler intern'),
job(institution='Star Fleet Academy'),
job(institution='Star Fleet Intern')],
fullname='Wesley Crusher')
for i in range(25):
factories.UserFactory(jobs=[job()])
self.current_starfleet = [
self.user_three,
self.user_four,
]
self.were_starfleet = [
self.user_one,
self.user_two,
self.user_three,
self.user_four,
self.user_five
]
@unittest.skip('Cannot guarentee always passes')
def test_current_job_first_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
current_starfleet_names = [u.fullname for u in self.current_starfleet]
for name in result_names[:2]:
assert_in(name, current_starfleet_names)
def test_had_job_in_results(self):
results = query_user('Star Fleet')['results']
result_names = [r['names']['fullname'] for r in results]
were_starfleet_names = [u.fullname for u in self.were_starfleet]
for name in result_names:
assert_in(name, were_starfleet_names)
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchExceptions(OsfTestCase):
# Verify that the correct exception is thrown when the connection is lost
@classmethod
def setUpClass(cls):
logging.getLogger('website.project.model').setLevel(logging.CRITICAL)
super(TestSearchExceptions, cls).setUpClass()
if settings.SEARCH_ENGINE == 'elastic':
cls._client = search.search_engine.CLIENT
search.search_engine.CLIENT = None
@classmethod
def tearDownClass(cls):
super(TestSearchExceptions, cls).tearDownClass()
if settings.SEARCH_ENGINE == 'elastic':
search.search_engine.CLIENT = cls._client
@requires_search
def test_connection_error(self):
# Ensures that saving projects/users doesn't break as a result of connection errors
self.user = factories.UserFactory(fullname='Doug Bogie')
self.project = factories.ProjectFactory(
title='Tom Sawyer',
creator=self.user,
is_public=True,
)
self.user.save()
self.project.save()
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchMigration(OsfTestCase):
# Verify that the correct indices are created/deleted during migration
@classmethod
def tearDownClass(cls):
super(TestSearchMigration, cls).tearDownClass()
search.create_index(settings.ELASTIC_INDEX)
def setUp(self):
super(TestSearchMigration, self).setUp()
populate_institutions(default_args=True)
self.es = search.search_engine.CLIENT
search.delete_index(settings.ELASTIC_INDEX)
search.create_index(settings.ELASTIC_INDEX)
self.user = factories.UserFactory(fullname='David Bowie')
self.project = factories.ProjectFactory(
title=settings.ELASTIC_INDEX,
creator=self.user,
is_public=True
)
self.preprint = factories.PreprintFactory(
creator=self.user
)
def test_first_migration_no_remove(self):
migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_no_remove(self):
for n in range(1, 21):
migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_first_migration_with_remove(self):
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys())[0], settings.ELASTIC_INDEX)
def test_multiple_migrations_with_remove(self):
for n in range(1, 21, 2):
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app)
var = self.es.indices.get_aliases()
assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys())[0], settings.ELASTIC_INDEX)
assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n))
def test_migration_institutions(self):
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
count_query = {}
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
institution_bucket_found = False
res = self.es.search(index=settings.ELASTIC_INDEX, doc_type=None, search_type='count', body=count_query)
for bucket in res['aggregations']['counts']['buckets']:
if bucket['key'] == u'institution':
institution_bucket_found = True
assert_equal(institution_bucket_found, True)
def test_migration_collections(self):
provider = factories.CollectionProviderFactory()
collection_one = factories.CollectionFactory(is_public=True, provider=provider)
collection_two = factories.CollectionFactory(is_public=True, provider=provider)
node = factories.NodeFactory(creator=self.user, title='Ali Bomaye', is_public=True)
collection_one.collect_object(node, self.user)
collection_two.collect_object(node, self.user)
assert node.is_collected
docs = query_collections('*')['results']
assert len(docs) == 2
docs = query_collections('Bomaye')['results']
assert len(docs) == 2
count_query = {}
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app)
docs = query_collections('*')['results']
assert len(docs) == 2
docs = query_collections('Bomaye')['results']
assert len(docs) == 2
res = self.es.search(index=settings.ELASTIC_INDEX, doc_type='collectionSubmission', search_type='count', body=count_query)
assert res['hits']['total'] == 2
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchFiles(OsfTestCase):
def setUp(self):
super(TestSearchFiles, self).setUp()
self.node = factories.ProjectFactory(is_public=True, title='Otis')
self.osf_storage = self.node.get_addon('osfstorage')
self.root = self.osf_storage.get_root()
def test_search_file(self):
self.root.append_file('Shake.wav')
find = query_file('Shake.wav')['results']
assert_equal(len(find), 1)
def test_search_file_name_without_separator(self):
self.root.append_file('Shake.wav')
find = query_file('Shake')['results']
assert_equal(len(find), 1)
def test_delete_file(self):
file_ = self.root.append_file('I\'ve Got Dreams To Remember.wav')
find = query_file('I\'ve Got Dreams To Remember.wav')['results']
assert_equal(len(find), 1)
file_.delete()
find = query_file('I\'ve Got Dreams To Remember.wav')['results']
assert_equal(len(find), 0)
def test_add_tag(self):
file_ = self.root.append_file('That\'s How Strong My Love Is.mp3')
tag = Tag(name='Redding')
tag.save()
file_.tags.add(tag)
file_.save()
find = query_tag_file('Redding')['results']
assert_equal(len(find), 1)
def test_remove_tag(self):
file_ = self.root.append_file('I\'ve Been Loving You Too Long.mp3')
tag = Tag(name='Blue')
tag.save()
file_.tags.add(tag)
file_.save()
find = query_tag_file('Blue')['results']
assert_equal(len(find), 1)
file_.tags.remove(tag)
file_.save()
find = query_tag_file('Blue')['results']
assert_equal(len(find), 0)
def test_make_node_private(self):
self.root.append_file('Change_Gonna_Come.wav')
find = query_file('Change_Gonna_Come.wav')['results']
assert_equal(len(find), 1)
self.node.is_public = False
with run_celery_tasks():
self.node.save()
find = query_file('Change_Gonna_Come.wav')['results']
assert_equal(len(find), 0)
def test_make_private_node_public(self):
self.node.is_public = False
self.node.save()
self.root.append_file('Try a Little Tenderness.flac')
find = query_file('Try a Little Tenderness.flac')['results']
assert_equal(len(find), 0)
self.node.is_public = True
with run_celery_tasks():
self.node.save()
find = query_file('Try a Little Tenderness.flac')['results']
assert_equal(len(find), 1)
def test_delete_node(self):
node = factories.ProjectFactory(is_public=True, title='The Soul Album')
osf_storage = node.get_addon('osfstorage')
root = osf_storage.get_root()
root.append_file('The Dock of the Bay.mp3')
find = query_file('The Dock of the Bay.mp3')['results']
assert_equal(len(find), 1)
node.is_deleted = True
with run_celery_tasks():
node.save()
find = query_file('The Dock of the Bay.mp3')['results']
assert_equal(len(find), 0)
def test_file_download_url_guid(self):
file_ = self.root.append_file('Timber.mp3')
file_guid = file_.get_guid(create=True)
file_.save()
find = query_file('Timber.mp3')['results']
assert_equal(find[0]['guid_url'], '/' + file_guid._id + '/')
def test_file_download_url_no_guid(self):
file_ = self.root.append_file('Timber.mp3')
path = file_.path
deep_url = '/' + file_.target._id + '/files/osfstorage' + path + '/'
find = query_file('Timber.mp3')['results']
assert_not_equal(file_.path, '')
assert_equal(file_.path, path)
assert_equal(find[0]['guid_url'], None)
assert_equal(find[0]['deep_url'], deep_url)
@pytest.mark.enable_quickfiles_creation
def test_quickfiles_files_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
quickfiles_root.append_file('GreenLight.mp3')
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 1)
assert find[0]['node_url'] == '/{}/quickfiles/'.format(quickfiles.creator._id)
@pytest.mark.enable_quickfiles_creation
def test_qatest_quickfiles_files_not_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
file = quickfiles_root.append_file('GreenLight.mp3')
tag = Tag(name='qatest')
tag.save()
file.tags.add(tag)
file.save()
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 0)
@pytest.mark.enable_quickfiles_creation
def test_quickfiles_spam_user_files_do_not_appear_in_search(self):
quickfiles = QuickFilesNode.objects.get(creator=self.node.creator)
quickfiles_osf_storage = quickfiles.get_addon('osfstorage')
quickfiles_root = quickfiles_osf_storage.get_root()
quickfiles_root.append_file('GreenLight.mp3')
self.node.creator.disable_account()
self.node.creator.confirm_spam()
self.node.creator.save()
find = query_file('GreenLight.mp3')['results']
assert_equal(len(find), 0)
|
|
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
from ext import *
class Main(QtGui.QMainWindow):
def __init__(self,parent=None):
QtGui.QMainWindow.__init__(self,parent)
self.filename = ""
self.changesSaved = True
self.initUI()
def initToolbar(self):
self.newAction = QtGui.QAction(QtGui.QIcon("icons/new.png"),"New",self)
self.newAction.setShortcut("Ctrl+N")
self.newAction.setStatusTip("Create a new document from scratch.")
self.newAction.triggered.connect(self.new)
self.openAction = QtGui.QAction(QtGui.QIcon("icons/open.png"),"Open file",self)
self.openAction.setStatusTip("Open existing document")
self.openAction.setShortcut("Ctrl+O")
self.openAction.triggered.connect(self.open)
self.saveAction = QtGui.QAction(QtGui.QIcon("icons/save.png"),"Save",self)
self.saveAction.setStatusTip("Save document")
self.saveAction.setShortcut("Ctrl+S")
self.saveAction.triggered.connect(self.save)
self.printAction = QtGui.QAction(QtGui.QIcon("icons/print.png"),"Print document",self)
self.printAction.setStatusTip("Print document")
self.printAction.setShortcut("Ctrl+P")
self.printAction.triggered.connect(self.printHandler)
self.previewAction = QtGui.QAction(QtGui.QIcon("icons/preview.png"),"Page view",self)
self.previewAction.setStatusTip("Preview page before printing")
self.previewAction.setShortcut("Ctrl+Shift+P")
self.previewAction.triggered.connect(self.preview)
self.findAction = QtGui.QAction(QtGui.QIcon("icons/find.png"),"Find and replace",self)
self.findAction.setStatusTip("Find and replace words in your document")
self.findAction.setShortcut("Ctrl+F")
self.findAction.triggered.connect(find.Find(self).show)
self.cutAction = QtGui.QAction(QtGui.QIcon("icons/cut.png"),"Cut to clipboard",self)
self.cutAction.setStatusTip("Delete and copy text to clipboard")
self.cutAction.setShortcut("Ctrl+X")
self.cutAction.triggered.connect(self.text.cut)
self.copyAction = QtGui.QAction(QtGui.QIcon("icons/copy.png"),"Copy to clipboard",self)
self.copyAction.setStatusTip("Copy text to clipboard")
self.copyAction.setShortcut("Ctrl+C")
self.copyAction.triggered.connect(self.text.copy)
self.pasteAction = QtGui.QAction(QtGui.QIcon("icons/paste.png"),"Paste from clipboard",self)
self.pasteAction.setStatusTip("Paste text from clipboard")
self.pasteAction.setShortcut("Ctrl+V")
self.pasteAction.triggered.connect(self.text.paste)
self.undoAction = QtGui.QAction(QtGui.QIcon("icons/undo.png"),"Undo last action",self)
self.undoAction.setStatusTip("Undo last action")
self.undoAction.setShortcut("Ctrl+Z")
self.undoAction.triggered.connect(self.text.undo)
self.redoAction = QtGui.QAction(QtGui.QIcon("icons/redo.png"),"Redo last undone thing",self)
self.redoAction.setStatusTip("Redo last undone thing")
self.redoAction.setShortcut("Ctrl+Y")
self.redoAction.triggered.connect(self.text.redo)
dateTimeAction = QtGui.QAction(QtGui.QIcon("icons/calender.png"),"Insert current date/time",self)
dateTimeAction.setStatusTip("Insert current date/time")
dateTimeAction.setShortcut("Ctrl+D")
dateTimeAction.triggered.connect(datetime.DateTime(self).show)
wordCountAction = QtGui.QAction(QtGui.QIcon("icons/count.png"),"See word/symbol count",self)
wordCountAction.setStatusTip("See word/symbol count")
wordCountAction.setShortcut("Ctrl+W")
wordCountAction.triggered.connect(self.wordCount)
tableAction = QtGui.QAction(QtGui.QIcon("icons/table.png"),"Insert table",self)
tableAction.setStatusTip("Insert table")
tableAction.setShortcut("Ctrl+T")
tableAction.triggered.connect(table.Table(self).show)
imageAction = QtGui.QAction(QtGui.QIcon("icons/image.png"),"Insert image",self)
imageAction.setStatusTip("Insert image")
imageAction.setShortcut("Ctrl+Shift+I")
imageAction.triggered.connect(self.insertImage)
bulletAction = QtGui.QAction(QtGui.QIcon("icons/bullet.png"),"Insert bullet List",self)
bulletAction.setStatusTip("Insert bullet list")
bulletAction.setShortcut("Ctrl+Shift+B")
bulletAction.triggered.connect(self.bulletList)
numberedAction = QtGui.QAction(QtGui.QIcon("icons/number.png"),"Insert numbered List",self)
numberedAction.setStatusTip("Insert numbered list")
numberedAction.setShortcut("Ctrl+Shift+L")
numberedAction.triggered.connect(self.numberList)
self.toolbar = self.addToolBar("Options")
self.toolbar.addAction(self.newAction)
self.toolbar.addAction(self.openAction)
self.toolbar.addAction(self.saveAction)
self.toolbar.addSeparator()
self.toolbar.addAction(self.printAction)
self.toolbar.addAction(self.previewAction)
self.toolbar.addSeparator()
self.toolbar.addAction(self.cutAction)
self.toolbar.addAction(self.copyAction)
self.toolbar.addAction(self.pasteAction)
self.toolbar.addAction(self.undoAction)
self.toolbar.addAction(self.redoAction)
self.toolbar.addSeparator()
self.toolbar.addAction(self.findAction)
self.toolbar.addAction(dateTimeAction)
self.toolbar.addAction(wordCountAction)
self.toolbar.addAction(tableAction)
self.toolbar.addAction(imageAction)
self.toolbar.addSeparator()
self.toolbar.addAction(bulletAction)
self.toolbar.addAction(numberedAction)
self.addToolBarBreak()
def initFormatbar(self):
fontBox = QtGui.QFontComboBox(self)
fontBox.currentFontChanged.connect(lambda font: self.text.setCurrentFont(font))
fontSize = QtGui.QSpinBox(self)
# Will display " pt" after each value
fontSize.setSuffix(" pt")
fontSize.valueChanged.connect(lambda size: self.text.setFontPointSize(size))
fontSize.setValue(14)
fontColor = QtGui.QAction(QtGui.QIcon("icons/font-color.png"),"Change font color",self)
fontColor.triggered.connect(self.fontColorChanged)
boldAction = QtGui.QAction(QtGui.QIcon("icons/bold.png"),"Bold",self)
boldAction.triggered.connect(self.bold)
italicAction = QtGui.QAction(QtGui.QIcon("icons/italic.png"),"Italic",self)
italicAction.triggered.connect(self.italic)
underlAction = QtGui.QAction(QtGui.QIcon("icons/underline.png"),"Underline",self)
underlAction.triggered.connect(self.underline)
strikeAction = QtGui.QAction(QtGui.QIcon("icons/strike.png"),"Strike-out",self)
strikeAction.triggered.connect(self.strike)
superAction = QtGui.QAction(QtGui.QIcon("icons/superscript.png"),"Superscript",self)
superAction.triggered.connect(self.superScript)
subAction = QtGui.QAction(QtGui.QIcon("icons/subscript.png"),"Subscript",self)
subAction.triggered.connect(self.subScript)
alignLeft = QtGui.QAction(QtGui.QIcon("icons/align-left.png"),"Align left",self)
alignLeft.triggered.connect(self.alignLeft)
alignCenter = QtGui.QAction(QtGui.QIcon("icons/align-center.png"),"Align center",self)
alignCenter.triggered.connect(self.alignCenter)
alignRight = QtGui.QAction(QtGui.QIcon("icons/align-right.png"),"Align right",self)
alignRight.triggered.connect(self.alignRight)
alignJustify = QtGui.QAction(QtGui.QIcon("icons/align-justify.png"),"Align justify",self)
alignJustify.triggered.connect(self.alignJustify)
indentAction = QtGui.QAction(QtGui.QIcon("icons/indent.png"),"Indent Area",self)
indentAction.setShortcut("Ctrl+Tab")
indentAction.triggered.connect(self.indent)
dedentAction = QtGui.QAction(QtGui.QIcon("icons/dedent.png"),"Dedent Area",self)
dedentAction.setShortcut("Shift+Tab")
dedentAction.triggered.connect(self.dedent)
backColor = QtGui.QAction(QtGui.QIcon("icons/highlight.png"),"Change background color",self)
backColor.triggered.connect(self.highlight)
self.formatbar = self.addToolBar("Format")
self.formatbar.addWidget(fontBox)
self.formatbar.addWidget(fontSize)
self.formatbar.addSeparator()
self.formatbar.addAction(fontColor)
self.formatbar.addAction(backColor)
self.formatbar.addSeparator()
self.formatbar.addAction(boldAction)
self.formatbar.addAction(italicAction)
self.formatbar.addAction(underlAction)
self.formatbar.addAction(strikeAction)
self.formatbar.addAction(superAction)
self.formatbar.addAction(subAction)
self.formatbar.addSeparator()
self.formatbar.addAction(alignLeft)
self.formatbar.addAction(alignCenter)
self.formatbar.addAction(alignRight)
self.formatbar.addAction(alignJustify)
self.formatbar.addSeparator()
self.formatbar.addAction(indentAction)
self.formatbar.addAction(dedentAction)
def initMenubar(self):
menubar = self.menuBar()
file = menubar.addMenu("File")
edit = menubar.addMenu("Edit")
view = menubar.addMenu("View")
# Add the most important actions to the menubar
file.addAction(self.newAction)
file.addAction(self.openAction)
file.addAction(self.saveAction)
file.addAction(self.printAction)
file.addAction(self.previewAction)
edit.addAction(self.undoAction)
edit.addAction(self.redoAction)
edit.addAction(self.cutAction)
edit.addAction(self.copyAction)
edit.addAction(self.pasteAction)
edit.addAction(self.findAction)
# Toggling actions for the various bars
toolbarAction = QtGui.QAction("Toggle Toolbar",self)
toolbarAction.triggered.connect(self.toggleToolbar)
formatbarAction = QtGui.QAction("Toggle Formatbar",self)
formatbarAction.triggered.connect(self.toggleFormatbar)
statusbarAction = QtGui.QAction("Toggle Statusbar",self)
statusbarAction.triggered.connect(self.toggleStatusbar)
view.addAction(toolbarAction)
view.addAction(formatbarAction)
view.addAction(statusbarAction)
def initUI(self):
self.text = QtGui.QTextEdit(self)
# Set the tab stop width to around 33 pixels which is
# more or less 8 spaces
self.text.setTabStopWidth(33)
self.initToolbar()
self.initFormatbar()
self.initMenubar()
self.setCentralWidget(self.text)
# Initialize a statusbar for the window
self.statusbar = self.statusBar()
# If the cursor position changes, call the function that displays
# the line and column number
self.text.cursorPositionChanged.connect(self.cursorPosition)
# We need our own context menu for tables
self.text.setContextMenuPolicy(Qt.CustomContextMenu)
self.text.customContextMenuRequested.connect(self.context)
self.text.textChanged.connect(self.changed)
self.setGeometry(100,100,1030,800)
self.setWindowTitle("Writer")
self.setWindowIcon(QtGui.QIcon("icons/icon.png"))
def changed(self):
self.changesSaved = False
def closeEvent(self,event):
if self.changesSaved:
event.accept()
else:
popup = QtGui.QMessageBox(self)
popup.setIcon(QtGui.QMessageBox.Warning)
popup.setText("The document has been modified")
popup.setInformativeText("Do you want to save your changes?")
popup.setStandardButtons(QtGui.QMessageBox.Save |
QtGui.QMessageBox.Cancel |
QtGui.QMessageBox.Discard)
popup.setDefaultButton(QtGui.QMessageBox.Save)
answer = popup.exec_()
if answer == QtGui.QMessageBox.Save:
self.save()
elif answer == QtGui.QMessageBox.Discard:
event.accept()
else:
event.ignore()
def context(self,pos):
# Grab the cursor
cursor = self.text.textCursor()
# Grab the current table, if there is one
table = cursor.currentTable()
# Above will return 0 if there is no current table, in which case
# we call the normal context menu. If there is a table, we create
# our own context menu specific to table interaction
if table:
menu = QtGui.QMenu(self)
appendRowAction = QtGui.QAction("Append row",self)
appendRowAction.triggered.connect(lambda: table.appendRows(1))
appendColAction = QtGui.QAction("Append column",self)
appendColAction.triggered.connect(lambda: table.appendColumns(1))
removeRowAction = QtGui.QAction("Remove row",self)
removeRowAction.triggered.connect(self.removeRow)
removeColAction = QtGui.QAction("Remove column",self)
removeColAction.triggered.connect(self.removeCol)
insertRowAction = QtGui.QAction("Insert row",self)
insertRowAction.triggered.connect(self.insertRow)
insertColAction = QtGui.QAction("Insert column",self)
insertColAction.triggered.connect(self.insertCol)
mergeAction = QtGui.QAction("Merge cells",self)
mergeAction.triggered.connect(lambda: table.mergeCells(cursor))
# Only allow merging if there is a selection
if not cursor.hasSelection():
mergeAction.setEnabled(False)
splitAction = QtGui.QAction("Split cells",self)
cell = table.cellAt(cursor)
# Only allow splitting if the current cell is larger
# than a normal cell
if cell.rowSpan() > 1 or cell.columnSpan() > 1:
splitAction.triggered.connect(lambda: table.splitCell(cell.row(),cell.column(),1,1))
else:
splitAction.setEnabled(False)
menu.addAction(appendRowAction)
menu.addAction(appendColAction)
menu.addSeparator()
menu.addAction(removeRowAction)
menu.addAction(removeColAction)
menu.addSeparator()
menu.addAction(insertRowAction)
menu.addAction(insertColAction)
menu.addSeparator()
menu.addAction(mergeAction)
menu.addAction(splitAction)
# Convert the widget coordinates into global coordinates
pos = self.mapToGlobal(pos)
# Add pixels for the tool and formatbars, which are not included
# in mapToGlobal(), but only if the two are currently visible and
# not toggled by the user
if self.toolbar.isVisible():
pos.setY(pos.y() + 45)
if self.formatbar.isVisible():
pos.setY(pos.y() + 45)
# Move the menu to the new position
menu.move(pos)
menu.show()
else:
event = QtGui.QContextMenuEvent(QtGui.QContextMenuEvent.Mouse,QtCore.QPoint())
self.text.contextMenuEvent(event)
def removeRow(self):
# Grab the cursor
cursor = self.text.textCursor()
# Grab the current table (we assume there is one, since
# this is checked before calling)
table = cursor.currentTable()
# Get the current cell
cell = table.cellAt(cursor)
# Delete the cell's row
table.removeRows(cell.row(),1)
def removeCol(self):
# Grab the cursor
cursor = self.text.textCursor()
# Grab the current table (we assume there is one, since
# this is checked before calling)
table = cursor.currentTable()
# Get the current cell
cell = table.cellAt(cursor)
# Delete the cell's column
table.removeColumns(cell.column(),1)
def insertRow(self):
# Grab the cursor
cursor = self.text.textCursor()
# Grab the current table (we assume there is one, since
# this is checked before calling)
table = cursor.currentTable()
# Get the current cell
cell = table.cellAt(cursor)
# Insert a new row at the cell's position
table.insertRows(cell.row(),1)
def insertCol(self):
# Grab the cursor
cursor = self.text.textCursor()
# Grab the current table (we assume there is one, since
# this is checked before calling)
table = cursor.currentTable()
# Get the current cell
cell = table.cellAt(cursor)
# Insert a new row at the cell's position
table.insertColumns(cell.column(),1)
def toggleToolbar(self):
state = self.toolbar.isVisible()
# Set the visibility to its inverse
self.toolbar.setVisible(not state)
def toggleFormatbar(self):
state = self.formatbar.isVisible()
# Set the visibility to its inverse
self.formatbar.setVisible(not state)
def toggleStatusbar(self):
state = self.statusbar.isVisible()
# Set the visibility to its inverse
self.statusbar.setVisible(not state)
def new(self):
spawn = Main()
spawn.show()
def open(self):
# Get filename and show only .writer files
self.filename = QtGui.QFileDialog.getOpenFileName(self, 'Open File',".","(*.writer)")
if self.filename:
with open(self.filename,"rt") as file:
self.text.setText(file.read())
def save(self):
# Only open dialog if there is no filename yet
if not self.filename:
self.filename = QtGui.QFileDialog.getSaveFileName(self, 'Save File')
if self.filename:
# Append extension if not there yet
if not self.filename.endswith(".writer"):
self.filename += ".writer"
# We just store the contents of the text file along with the
# format in html, which Qt does in a very nice way for us
with open(self.filename,"wt") as file:
file.write(self.text.toHtml())
self.changesSaved = True
def preview(self):
# Open preview dialog
preview = QtGui.QPrintPreviewDialog()
# If a print is requested, open print dialog
preview.paintRequested.connect(lambda p: self.text.print_(p))
preview.exec_()
def printHandler(self):
# Open printing dialog
dialog = QtGui.QPrintDialog()
if dialog.exec_() == QtGui.QDialog.Accepted:
self.text.document().print_(dialog.printer())
def cursorPosition(self):
cursor = self.text.textCursor()
# Mortals like 1-indexed things
line = cursor.blockNumber() + 1
col = cursor.columnNumber()
self.statusbar.showMessage("Line: {} | Column: {}".format(line,col))
def wordCount(self):
wc = wordcount.WordCount(self)
wc.getText()
wc.show()
def insertImage(self):
# Get image file name
filename = QtGui.QFileDialog.getOpenFileName(self, 'Insert image',".","Images (*.png *.xpm *.jpg *.bmp *.gif)")
if filename:
# Create image object
image = QtGui.QImage(filename)
# Error if unloadable
if image.isNull():
popup = QtGui.QMessageBox(QtGui.QMessageBox.Critical,
"Image load error",
"Could not load image file!",
QtGui.QMessageBox.Ok,
self)
popup.show()
else:
cursor = self.text.textCursor()
cursor.insertImage(image,filename)
def fontColorChanged(self):
# Get a color from the text dialog
color = QtGui.QColorDialog.getColor()
# Set it as the new text color
self.text.setTextColor(color)
def highlight(self):
color = QtGui.QColorDialog.getColor()
self.text.setTextBackgroundColor(color)
def bold(self):
if self.text.fontWeight() == QtGui.QFont.Bold:
self.text.setFontWeight(QtGui.QFont.Normal)
else:
self.text.setFontWeight(QtGui.QFont.Bold)
def italic(self):
state = self.text.fontItalic()
self.text.setFontItalic(not state)
def underline(self):
state = self.text.fontUnderline()
self.text.setFontUnderline(not state)
def strike(self):
# Grab the text's format
fmt = self.text.currentCharFormat()
# Set the fontStrikeOut property to its opposite
fmt.setFontStrikeOut(not fmt.fontStrikeOut())
# And set the next char format
self.text.setCurrentCharFormat(fmt)
def superScript(self):
# Grab the current format
fmt = self.text.currentCharFormat()
# And get the vertical alignment property
align = fmt.verticalAlignment()
# Toggle the state
if align == QtGui.QTextCharFormat.AlignNormal:
fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignSuperScript)
else:
fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal)
# Set the new format
self.text.setCurrentCharFormat(fmt)
def subScript(self):
# Grab the current format
fmt = self.text.currentCharFormat()
# And get the vertical alignment property
align = fmt.verticalAlignment()
# Toggle the state
if align == QtGui.QTextCharFormat.AlignNormal:
fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignSubScript)
else:
fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal)
# Set the new format
self.text.setCurrentCharFormat(fmt)
def alignLeft(self):
self.text.setAlignment(Qt.AlignLeft)
def alignRight(self):
self.text.setAlignment(Qt.AlignRight)
def alignCenter(self):
self.text.setAlignment(Qt.AlignCenter)
def alignJustify(self):
self.text.setAlignment(Qt.AlignJustify)
def indent(self):
# Grab the cursor
cursor = self.text.textCursor()
if cursor.hasSelection():
# Store the current line/block number
temp = cursor.blockNumber()
# Move to the selection's end
cursor.setPosition(cursor.anchor())
# Calculate range of selection
diff = cursor.blockNumber() - temp
direction = QtGui.QTextCursor.Up if diff > 0 else QtGui.QTextCursor.Down
# Iterate over lines (diff absolute value)
for n in range(abs(diff) + 1):
# Move to start of each line
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
# Insert tabbing
cursor.insertText("\t")
# And move back up
cursor.movePosition(direction)
# If there is no selection, just insert a tab
else:
cursor.insertText("\t")
def handleDedent(self,cursor):
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
# Grab the current line
line = cursor.block().text()
# If the line starts with a tab character, delete it
if line.startswith("\t"):
# Delete next character
cursor.deleteChar()
# Otherwise, delete all spaces until a non-space character is met
else:
for char in line[:8]:
if char != " ":
break
cursor.deleteChar()
def dedent(self):
cursor = self.text.textCursor()
if cursor.hasSelection():
# Store the current line/block number
temp = cursor.blockNumber()
# Move to the selection's last line
cursor.setPosition(cursor.anchor())
# Calculate range of selection
diff = cursor.blockNumber() - temp
direction = QtGui.QTextCursor.Up if diff > 0 else QtGui.QTextCursor.Down
# Iterate over lines
for n in range(abs(diff) + 1):
self.handleDedent(cursor)
# Move up
cursor.movePosition(direction)
else:
self.handleDedent(cursor)
def bulletList(self):
cursor = self.text.textCursor()
# Insert bulleted list
cursor.insertList(QtGui.QTextListFormat.ListDisc)
def numberList(self):
cursor = self.text.textCursor()
# Insert list with numbers
cursor.insertList(QtGui.QTextListFormat.ListDecimal)
def main():
app = QtGui.QApplication(sys.argv)
main = Main()
main.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
|
import os
import sys
import pytest
from os.path import join
from sqlalchemy.exc import InvalidRequestError
from textwrap import dedent
from ...api import Gradebook
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderAssign(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["assign", "--help-all"])
def test_no_args(self):
"""Is there an error if no arguments are given?"""
run_nbgrader(["assign"], retcode=1)
def test_conflicting_args(self):
"""Is there an error if assignment is specified both in config and as an argument?"""
run_nbgrader(["assign", "--assignment", "foo", "foo"], retcode=1)
def test_multiple_args(self):
"""Is there an error if multiple arguments are given?"""
run_nbgrader(["assign", "foo", "bar"], retcode=1)
def test_no_assignment(self, course_dir):
"""Is an error thrown if the assignment doesn't exist?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
run_nbgrader(["assign", "ps1"], retcode=1)
# check that the --create flag is properly deprecated
run_nbgrader(["assign", "ps1", "--create"], retcode=1)
def test_single_file(self, course_dir, temp_cwd):
"""Can a single file be assigned?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb"))
def test_multiple_files(self, course_dir):
"""Can multiple files be assigned?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
self._empty_notebook(join(course_dir, 'source', 'ps1', 'bar.ipynb'))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.ipynb'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'bar.ipynb'))
def test_dependent_files(self, course_dir):
"""Are dependent files properly linked?"""
self._make_file(join(course_dir, 'source', 'ps1', 'data', 'foo.csv'), 'foo')
self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.csv'), 'bar')
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
self._empty_notebook(join(course_dir, 'source', 'ps1', 'bar.ipynb'))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.ipynb'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'bar.ipynb'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'foo.csv'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'bar.csv'))
with open(join(course_dir, 'release', 'ps1', 'data', 'foo.csv'), 'r') as fh:
assert fh.read() == 'foo'
with open(join(course_dir, 'release', 'ps1', 'data', 'bar.csv'), 'r') as fh:
assert fh.read() == 'bar'
def test_save_cells(self, db, course_dir):
"""Ensure cells are saved into the database"""
self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb'))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--db", db])
gb = Gradebook(db)
notebook = gb.find_notebook("test", "ps1")
assert len(notebook.grade_cells) == 6
gb.close()
def test_force(self, course_dir):
"""Ensure the force option works properly"""
self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb'))
self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), "foo")
self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.txt'), "bar")
self._make_file(join(course_dir, 'source', 'ps1', 'blah.pyc'), "asdf")
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'test.ipynb'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'bar.txt'))
assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'blah.pyc'))
# check that it skips the existing directory
os.remove(join(course_dir, 'release', 'ps1', 'foo.txt'))
run_nbgrader(["assign", "ps1"])
assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
# force overwrite the supplemental files
run_nbgrader(["assign", "ps1", "--force"])
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
# force overwrite
os.remove(join(course_dir, 'source', 'ps1', 'foo.txt'))
run_nbgrader(["assign", "ps1", "--force"])
assert os.path.isfile(join(course_dir, "release", "ps1", "test.ipynb"))
assert os.path.isfile(join(course_dir, "release", "ps1", "data", "bar.txt"))
assert not os.path.isfile(join(course_dir, "release", "ps1", "foo.txt"))
assert not os.path.isfile(join(course_dir, "release", "ps1", "blah.pyc"))
def test_permissions(self, course_dir):
"""Are permissions properly set?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), 'foo')
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
if sys.platform == 'win32':
perms = '666'
else:
perms = '644'
assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb"))
assert os.path.isfile(join(course_dir, "release", "ps1", "foo.txt"))
assert self._get_permissions(join(course_dir, "release", "ps1", "foo.ipynb")) == perms
assert self._get_permissions(join(course_dir, "release", "ps1", "foo.txt")) == perms
def test_custom_permissions(self, course_dir):
"""Are custom permissions properly set?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), 'foo')
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--AssignApp.permissions=444"])
assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb"))
assert os.path.isfile(join(course_dir, "release", "ps1", "foo.txt"))
assert self._get_permissions(join(course_dir, "release", "ps1", "foo.ipynb")) == "444"
assert self._get_permissions(join(course_dir, "release", "ps1", "foo.txt")) == "444"
def test_add_remove_extra_notebooks(self, db, course_dir):
"""Are extra notebooks added and removed?"""
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb"))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--db", db])
gb = Gradebook(db)
assignment = gb.find_assignment("ps1")
assert len(assignment.notebooks) == 1
notebook1 = gb.find_notebook("test", "ps1")
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test2.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db, "--force"])
gb.db.refresh(assignment)
assert len(assignment.notebooks) == 2
gb.db.refresh(notebook1)
notebook2 = gb.find_notebook("test2", "ps1")
os.remove(join(course_dir, "source", "ps1", "test2.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db, "--force"])
gb.db.refresh(assignment)
assert len(assignment.notebooks) == 1
gb.db.refresh(notebook1)
with pytest.raises(InvalidRequestError):
gb.db.refresh(notebook2)
gb.close()
def test_add_extra_notebooks_with_submissions(self, db, course_dir):
"""Is an error thrown when new notebooks are added and there are existing submissions?"""
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb"))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--db", db])
gb = Gradebook(db)
assignment = gb.find_assignment("ps1")
assert len(assignment.notebooks) == 1
gb.add_student("hacker123")
gb.add_submission("ps1", "hacker123")
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test2.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db, "--force"], retcode=1)
gb.close()
def test_remove_extra_notebooks_with_submissions(self, db, course_dir):
"""Is an error thrown when notebooks are removed and there are existing submissions?"""
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test2.ipynb"))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--db", db])
gb = Gradebook(db)
assignment = gb.find_assignment("ps1")
assert len(assignment.notebooks) == 2
gb.add_student("hacker123")
gb.add_submission("ps1", "hacker123")
os.remove(join(course_dir, "source", "ps1", "test2.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db, "--force"], retcode=1)
gb.close()
def test_same_notebooks_with_submissions(self, db, course_dir):
"""Is it ok to run nbgrader assign with the same notebooks and existing submissions?"""
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb"))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--db", db])
gb = Gradebook(db)
assignment = gb.find_assignment("ps1")
assert len(assignment.notebooks) == 1
notebook = assignment.notebooks[0]
gb.add_student("hacker123")
submission = gb.add_submission("ps1", "hacker123")
submission_notebook = submission.notebooks[0]
run_nbgrader(["assign", "ps1", "--db", db, "--force"])
gb.db.refresh(assignment)
assert len(assignment.notebooks) == 1
gb.db.refresh(notebook)
gb.db.refresh(submission)
gb.db.refresh(submission_notebook)
gb.close()
def test_force_single_notebook(self, course_dir):
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb"))
assert os.path.exists(join(course_dir, "release", "ps1", "p2.ipynb"))
p1 = self._file_contents(join(course_dir, "release", "ps1", "p1.ipynb"))
p2 = self._file_contents(join(course_dir, "release", "ps1", "p2.ipynb"))
assert p1 == p2
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
run_nbgrader(["assign", "ps1", "--notebook", "p1", "--force"])
assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb"))
assert os.path.exists(join(course_dir, "release", "ps1", "p2.ipynb"))
assert p1 != self._file_contents(join(course_dir, "release", "ps1", "p1.ipynb"))
assert p2 == self._file_contents(join(course_dir, "release", "ps1", "p2.ipynb"))
def test_fail_no_notebooks(self):
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"], retcode=1)
def test_no_metadata(self, course_dir):
self._copy_file(join("files", "test-no-metadata.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
# it should fail because of the solution regions
run_nbgrader(["assign", "ps1", "--no-db"], retcode=1)
# it should pass now that we're not enforcing metadata
run_nbgrader(["assign", "ps1", "--no-db", "--no-metadata"])
assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb"))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Only (de)serialization utils hasn't been removed to decrease requirements
# number.
"""Utility methods for working with WSGI servers."""
import datetime
from xml.dom import minidom
from xml.parsers import expat
from sahara.openstack.common import exception
from sahara.openstack.common.gettextutils import _
from sahara.openstack.common import jsonutils
from sahara.openstack.common import log as logging
from sahara.openstack.common import xmlutils
LOG = logging.getLogger(__name__)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization"""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization"""
def default(self, data):
def sanitizer(obj):
if isinstance(obj, datetime.datetime):
_dtime = obj - datetime.timedelta(microseconds=obj.microsecond)
return _dtime.isoformat()
return unicode(obj)
return jsonutils.dumps(data, default=sanitizer)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
:param xmlns: XML namespace to include with serialized xml
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
self.xmlns = xmlns
def default(self, data):
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
return self.to_xml_string(node)
def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toprettyxml(indent=' ', encoding='UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
if has_atom:
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
# Set the xml namespace if one is specified
# TODO(justinsb): We could also use prefixes on the keys
xmlns = metadata.get('xmlns', None)
if xmlns:
result.setAttribute('xmlns', xmlns)
#TODO(bcwaldon): accomplish this without a type-check
if type(data) is list:
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
#TODO(bcwaldon): accomplish this without a type-check
elif type(data) is dict:
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:
# Type is atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
def _create_link_nodes(self, xml_doc, links):
link_nodes = []
for link in links:
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
if 'type' in link:
link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization"""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
try:
node = xmlutils.safe_minidom_parse_string(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
listnames)
return result
def find_first_child_named(self, parent, name):
"""Search a nodes children for the first child with a given name"""
for node in parent.childNodes:
if node.nodeName == name:
return node
return None
def find_children_named(self, parent, name):
"""Return all of a nodes children who have the given name"""
for node in parent.childNodes:
if node.nodeName == name:
yield node
def extract_text(self, node):
"""Get the text field contained by the given node"""
if len(node.childNodes) == 1:
child = node.childNodes[0]
if child.nodeType == child.TEXT_NODE:
return child.nodeValue
return ""
def default(self, datastring):
return {'body': self._from_xml(datastring)}
|
|
#!/usr/bin/python
# convert LLVM GenSystemRegister.inc for Capstone disassembler.
# by Nguyen Anh Quynh, 2019
import sys
if len(sys.argv) == 1:
print("Syntax: %s <GenSystemRegister.inc>" %sys.argv[0])
sys.exit(1)
f = open(sys.argv[1])
lines = f.readlines()
f.close()
#arch = sys.argv[2].upper()
print("""
/* Capstone Disassembly Engine, http://www.capstone-engine.org */
/* By Nguyen Anh Quynh <[email protected]>, 2013-2019 */
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* GenSystemRegister Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
""")
# extract BankedRegValues enum
count = 0
for line in lines:
line = line.rstrip()
if len(line.strip()) == 0:
continue
if line.strip() == 'enum BankedRegValues {':
count += 1
print(line.strip())
continue
line = line.strip()
if count == 1:
if line == '};':
# done with first enum
break
else:
# skip pseudo instructions
print("\t%s" %(line))
print('};\n')
# extract MClassSysRegsList
count = 0
for line in lines:
line = line.rstrip()
if len(line.strip()) == 0:
continue
if 'MClassSysRegsList[]' in line:
count += 1
print('static const MClassSysReg MClassSysRegsList[] = {')
continue
if count == 1:
if line.strip() == '};':
# done with first enum
break
else:
# enum items
# { "apsr_g", 0x400, 0x0, 0x400, {ARM::FeatureDSP} }, // 0
line2 = line.replace('::', '_')
sysreg = line2[line2.index('"') + 1 : line2.index('",')]
tmp = line2.split(',')
print("%s, ARM_SYSREG_%s%s" %(line2[:line2.index('",') + 1], sysreg.upper(), line2[line2.index('",') + 1 :]))
print('};\n')
# extract BankedRegsList
count = 0
for line in lines:
line = line.rstrip()
if len(line.strip()) == 0:
continue
if 'BankedRegsList[]' in line:
count += 1
print('static const BankedReg BankedRegsList[] = {')
continue
if count == 1:
if line.strip() == '};':
# done with first enum
break
else:
# enum items
line2 = line.replace('::', '_')
sysreg = line2[line2.index('"') + 1 : line2.index('",')]
tmp = line2.split(',')
print("%s, ARM_SYSREG_%s%s" %(line2[:line2.index('",') + 1], sysreg.upper(), line2[line2.index('",') + 1 :]))
print('};\n')
# lookupMClassSysRegByM2M3Encoding8
count = 0
for line in lines:
line = line.rstrip()
if len(line.strip()) == 0:
continue
if 'lookupMClassSysRegByM2M3Encoding8' in line and '{' in line:
count += 1
print('const MClassSysReg *lookupMClassSysRegByM2M3Encoding8(uint16_t encoding)\n{')
print(' unsigned int i;')
continue
if count == 1 and 'IndexType Index[] = {' in line:
count += 1
if count == 2:
if line.strip() == '};':
# done with array, or this function?
print(line)
break
else:
# enum items
print(line)
print("""
i = binsearch_IndexTypeEncoding(Index, ARR_SIZE(Index), encoding);
if (i == -1)
return NULL;
else
return &MClassSysRegsList[Index[i].index];
}
""")
# lookupMClassSysRegByM1Encoding12
count = 0
for line in lines:
line = line.rstrip()
if len(line.strip()) == 0:
continue
if 'lookupMClassSysRegByM1Encoding12' in line and '{' in line:
count += 1
print('const MClassSysReg *lookupMClassSysRegByM1Encoding12(uint16_t encoding)\n{')
print(' unsigned int i;')
continue
if count == 1 and 'IndexType Index[] = {' in line:
count += 1
if count == 2:
if line.strip() == '};':
# done with array, or this function?
print(line)
break
else:
# enum items
print(line)
print("""
i = binsearch_IndexTypeEncoding(Index, ARR_SIZE(Index), encoding);
if (i == -1)
return NULL;
else
return &MClassSysRegsList[Index[i].index];
}
""")
# lookupBankedRegByEncoding
count = 0
for line in lines:
line = line.rstrip()
if len(line.strip()) == 0:
continue
if 'lookupBankedRegByEncoding' in line and '{' in line:
count += 1
print('const BankedReg *lookupBankedRegByEncoding(uint8_t encoding)\n{')
print(' unsigned int i;')
continue
if count == 1 and 'IndexType Index[] = {' in line:
count += 1
if count == 2:
if line.strip() == '};':
# done with array, or this function?
print(line)
break
else:
# enum items
print(line)
print("""
i = binsearch_IndexTypeEncoding(Index, ARR_SIZE(Index), encoding);
if (i == -1)
return NULL;
else
return &BankedRegsList[Index[i].index];
}
""")
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2015 Ralph Hempel <[email protected]>
# Copyright (c) 2015 Anton Vanhoucke <[email protected]>
# Copyright (c) 2015 Denis Demidov <[email protected]>
# Copyright (c) 2015 Eric Pascual <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
import sys
import os
import io
import fnmatch
import re
import stat
import errno
from os.path import abspath
try:
# if we are in a released build, there will be an auto-generated "version"
# module
from .version import __version__
except ImportError:
__version__ = "<unknown>"
if sys.version_info < (3, 4):
raise SystemError('Must be using Python 3.4 or higher')
def is_micropython():
return sys.implementation.name == "micropython"
def chain_exception(exception, cause):
if is_micropython():
raise exception
else:
raise exception from cause
def get_current_platform():
"""
Look in /sys/class/board-info/ to determine the platform type.
This can return 'ev3', 'evb', 'pistorms', 'brickpi', 'brickpi3' or 'fake'.
"""
board_info_dir = '/sys/class/board-info/'
if not os.path.exists(board_info_dir) or os.environ.get("FAKE_SYS"):
return 'fake'
for board in os.listdir(board_info_dir):
uevent_filename = os.path.join(board_info_dir, board, 'uevent')
if os.path.exists(uevent_filename):
with open(uevent_filename, 'r') as fh:
for line in fh.readlines():
(key, value) = line.strip().split('=')
if key == 'BOARD_INFO_MODEL':
if value == 'LEGO MINDSTORMS EV3':
return 'ev3'
elif value in ('FatcatLab EVB', 'QuestCape'):
return 'evb'
elif value == 'PiStorms':
return 'pistorms'
# This is the same for both BrickPi and BrickPi+.
# There is not a way to tell the difference.
elif value == 'Dexter Industries BrickPi':
return 'brickpi'
elif value == 'Dexter Industries BrickPi3':
return 'brickpi3'
elif value == 'FAKE-SYS':
return 'fake'
return None
# -----------------------------------------------------------------------------
def list_device_names(class_path, name_pattern, **kwargs):
"""
This is a generator function that lists names of all devices matching the
provided parameters.
Parameters:
class_path: class path of the device, a subdirectory of /sys/class.
For example, '/sys/class/tacho-motor'.
name_pattern: pattern that device name should match.
For example, 'sensor*' or 'motor*'. Default value: '*'.
keyword arguments: used for matching the corresponding device
attributes. For example, address='outA', or
driver_name=['lego-ev3-us', 'lego-nxt-us']. When argument value
is a list, then a match against any entry of the list is
enough.
"""
if not os.path.isdir(class_path):
return
def matches(attribute, pattern):
try:
with io.FileIO(attribute) as f:
value = f.read().strip().decode()
except Exception:
return False
if isinstance(pattern, list):
return any([value.find(p) >= 0 for p in pattern])
else:
return value.find(pattern) >= 0
for f in os.listdir(class_path):
if fnmatch.fnmatch(f, name_pattern):
path = class_path + '/' + f
if all([matches(path + '/' + k, kwargs[k]) for k in kwargs]):
yield f
def library_load_warning_message(library_name, dependent_class):
return 'Import warning: Failed to import "{}". {} will be unusable!'.format(library_name, dependent_class)
class DeviceNotFound(Exception):
pass
class DeviceNotDefined(Exception):
pass
class ThreadNotRunning(Exception):
pass
# -----------------------------------------------------------------------------
# Define the base class from which all other ev3dev classes are defined.
class Device(object):
"""The ev3dev device base class"""
__slots__ = [
'_path',
'_device_index',
'_attr_cache',
'kwargs',
]
DEVICE_ROOT_PATH = '/sys/class'
_DEVICE_INDEX = re.compile(r'^.*(\d+)$')
def __init__(self, class_name, name_pattern='*', name_exact=False, **kwargs):
"""Spin through the Linux sysfs class for the device type and find
a device that matches the provided name pattern and attributes (if any).
Parameters:
class_name: class name of the device, a subdirectory of /sys/class.
For example, 'tacho-motor'.
name_pattern: pattern that device name should match.
For example, 'sensor*' or 'motor*'. Default value: '*'.
name_exact: when True, assume that the name_pattern provided is the
exact device name and use it directly.
keyword arguments: used for matching the corresponding device
attributes. For example, address='outA', or
driver_name=['lego-ev3-us', 'lego-nxt-us']. When argument value
is a list, then a match against any entry of the list is
enough.
Example::
d = ev3dev.Device('tacho-motor', address='outA')
s = ev3dev.Device('lego-sensor', driver_name=['lego-ev3-us', 'lego-nxt-us'])
If there was no valid connected device, an error is thrown.
"""
classpath = abspath(Device.DEVICE_ROOT_PATH + '/' + class_name)
self.kwargs = kwargs
self._attr_cache = {}
def get_index(file):
match = Device._DEVICE_INDEX.match(file)
if match:
return int(match.group(1))
else:
return None
if name_exact:
self._path = classpath + '/' + name_pattern
self._device_index = get_index(name_pattern)
else:
try:
name = next(list_device_names(classpath, name_pattern, **kwargs))
self._path = classpath + '/' + name
self._device_index = get_index(name)
except StopIteration:
self._path = None
self._device_index = None
chain_exception(DeviceNotFound("%s is not connected." % self), None)
def __str__(self):
if 'address' in self.kwargs:
return "%s(%s)" % (self.__class__.__name__, self.kwargs.get('address'))
else:
return self.__class__.__name__
def __repr__(self):
return self.__str__()
# This allows us to sort lists of Device objects
def __lt__(self, other):
return str(self) < str(other)
def _attribute_file_open(self, name):
path = os.path.join(self._path, name)
mode = stat.S_IMODE(os.stat(path)[stat.ST_MODE])
r_ok = mode & stat.S_IRGRP
w_ok = mode & stat.S_IWGRP
if r_ok and w_ok:
mode_str = 'r+'
elif w_ok:
mode_str = 'w'
else:
mode_str = 'r'
return io.FileIO(path, mode_str)
def _get_attribute(self, attribute, name):
"""Device attribute getter"""
try:
if attribute is None:
attribute = self._attribute_file_open(name)
else:
attribute.seek(0)
return attribute, attribute.read().strip().decode()
except Exception as ex:
self._raise_friendly_access_error(ex, name, None)
def _set_attribute(self, attribute, name, value):
"""Device attribute setter"""
try:
if attribute is None:
attribute = self._attribute_file_open(name)
else:
attribute.seek(0)
if isinstance(value, str):
value = value.encode()
attribute.write(value)
attribute.flush()
except Exception as ex:
self._raise_friendly_access_error(ex, name, value)
return attribute
def _raise_friendly_access_error(self, driver_error, attribute, value):
if not isinstance(driver_error, OSError):
raise driver_error
driver_errorno = driver_error.args[0] if is_micropython() else driver_error.errno
if driver_errorno == errno.EINVAL:
if attribute == "speed_sp":
try:
max_speed = self.max_speed
except (AttributeError, Exception):
chain_exception(ValueError("The given speed value {} was out of range".format(value)), driver_error)
else:
chain_exception(
ValueError("The given speed value {} was out of range. Max speed: +/-{}".format(
value, max_speed)), driver_error)
chain_exception(ValueError("One or more arguments were out of range or invalid, value {}".format(value)),
driver_error)
elif driver_errorno == errno.ENODEV or driver_errorno == errno.ENOENT:
# We will assume that a file-not-found error is the result of a disconnected device
# rather than a library error. If that isn't the case, at a minimum the underlying
# error info will be printed for debugging.
chain_exception(DeviceNotFound("%s is no longer connected" % self), driver_error)
raise driver_error
def get_attr_int(self, attribute, name):
attribute, value = self._get_attribute(attribute, name)
return attribute, int(value)
def get_cached_attr_int(self, filehandle, keyword):
value = self._attr_cache.get(keyword)
if value is None:
(filehandle, value) = self.get_attr_int(filehandle, keyword)
self._attr_cache[keyword] = value
return (filehandle, value)
def set_attr_int(self, attribute, name, value):
return self._set_attribute(attribute, name, str(int(value)))
def set_attr_raw(self, attribute, name, value):
return self._set_attribute(attribute, name, value)
def get_attr_string(self, attribute, name):
return self._get_attribute(attribute, name)
def get_cached_attr_string(self, filehandle, keyword):
value = self._attr_cache.get(keyword)
if value is None:
(filehandle, value) = self.get_attr_string(filehandle, keyword)
self._attr_cache[keyword] = value
return (filehandle, value)
def set_attr_string(self, attribute, name, value):
return self._set_attribute(attribute, name, value)
def get_attr_line(self, attribute, name):
return self._get_attribute(attribute, name)
def get_attr_set(self, attribute, name):
attribute, value = self.get_attr_line(attribute, name)
return attribute, [v.strip('[]') for v in value.split()]
def get_cached_attr_set(self, filehandle, keyword):
value = self._attr_cache.get(keyword)
if value is None:
(filehandle, value) = self.get_attr_set(filehandle, keyword)
self._attr_cache[keyword] = value
return (filehandle, value)
def get_attr_from_set(self, attribute, name):
attribute, value = self.get_attr_line(attribute, name)
for a in value.split():
v = a.strip('[]')
if v != a:
return v
return ""
@property
def device_index(self):
return self._device_index
def list_devices(class_name, name_pattern, **kwargs):
"""
This is a generator function that takes same arguments as `Device` class
and enumerates all devices present in the system that match the provided
arguments.
Parameters:
class_name: class name of the device, a subdirectory of /sys/class.
For example, 'tacho-motor'.
name_pattern: pattern that device name should match.
For example, 'sensor*' or 'motor*'. Default value: '*'.
keyword arguments: used for matching the corresponding device
attributes. For example, address='outA', or
driver_name=['lego-ev3-us', 'lego-nxt-us']. When argument value
is a list, then a match against any entry of the list is
enough.
"""
classpath = abspath(Device.DEVICE_ROOT_PATH + '/' + class_name)
return (Device(class_name, name, name_exact=True) for name in list_device_names(classpath, name_pattern, **kwargs))
|
|
"""
This module contains objects related to experimental design abstractions.
Public objects are imported in ``__init__.py``.
"""
from collections.abc import Iterable
import itertools
import collections
from copy import copy
import numpy as np
from schema import Schema, Or, Optional, And, Use
import experimentator.order as order
Level = collections.namedtuple('Level', ('name', 'design'))
class Design:
"""
|Design| instances specify the experimental design at one level of the experimental hierarchy.
They guide the creation of |ExperimentSection| instances
by parsing design matrices or crossing independent variables (IVs).
Parameters
----------
ivs : dict or list of tuple, optional
Independent variables can be specified as a dictionary mapping names to possible values,
or as a list of ``(name, values)`` tuples.
If an IV takes continuous values, use ``None`` for its levels.
This only works when specifying values using `design_matrix`.
See the |IV docs| for more information.
design_matrix : array-like, optional
A |numpy array| (or convertible, e.g. a list-of-lists)
representing a design matrix specifying how IV values should be grouped to form conditions.
When no `design_matrix` is passed, IVs are fully crossed.
See the |design matrix docs| for more details.
Note that a design matrix may also specify the order of the conditions.
For this reason, the default `ordering` changes from |Shuffle| to |Ordering|,
preserving the order of the conditions.
ordering : |Ordering|, optional
An instance of |Ordering| or one of its subclasses defining the behavior
for duplicating and ordering the conditions of the |Design|.
The default is |Shuffle| unless a `design_matrix` is passed.
extra_data : dict, optional
Items from this dictionary will be included in the |data| attribute
of any |ExperimentSection| instances created with this |Design|.
Attributes
----------
iv_names : list of str
iv_values : list of tuple
design_matrix : array-like
extra_data : dict
ordering : |Ordering|
heterogeneous_design_iv_name : str
The IV name that triggers a heterogeneous (i.e., branching) tree structure when it is encountered.
``'design'`` by default.
is_heterogeneous : bool
True if this |Design| is the lowest level before the tree structure diverges.
branches : dict
The IV values corresponding to named heterogeneous branches in the tree structure following this |Design|.
See Also
--------
experimentator.order
experimentator.DesignTree
Examples
--------
>>> from experimentator.order import Shuffle
>>> design = Design(ivs={'side': ['left', 'right'], 'difficulty': ['easy', 'hard']}, ordering=Shuffle(2))
>>> design.first_pass()
IndependentVariable(name=(), values=())
>>> design.get_order()
[{'difficulty': 'easy', 'side': 'left'},
{'difficulty': 'hard', 'side': 'left'},
{'difficulty': 'easy', 'side': 'left'},
{'difficulty': 'hard', 'side': 'right'},
{'difficulty': 'easy', 'side': 'right'},
{'difficulty': 'easy', 'side': 'right'},
{'difficulty': 'hard', 'side': 'left'},
{'difficulty': 'hard', 'side': 'right'}]
"""
heterogeneous_design_iv_name = 'design'
def __init__(self, ivs=None, design_matrix=None, ordering=None, extra_data=None):
if isinstance(ivs, dict):
ivs = list(ivs.items())
if ivs:
iv_names, iv_values = zip(*ivs)
self.iv_names = list(iv_names)
self.iv_values = list(iv_values)
else:
self.iv_names = []
self.iv_values = []
self.design_matrix = design_matrix
self.extra_data = extra_data or {}
if ordering:
self.ordering = ordering
elif design_matrix is None:
self.ordering = order.Shuffle()
else:
self.ordering = order.Ordering()
if self.design_matrix is None and any(iv_values is None for iv_values in self.iv_values):
raise TypeError('Must specify a design matrix if using continuous IVs (values=None)')
@classmethod
def from_dict(cls, spec):
"""Construct a |Design| instance from a specification based on dictionaries (e.g., parsed from a YAML file).
Parameters
----------
spec : dict
A dictionary containing some of the following keys (all optional):
``'name'``, the name of the level;
``'ivs'``, ``'design_matrix'``, ``'extra_data'``, keyword arguments to the |Design| constructor;
``'order'`` or ``'ordering'``, a string, dictionary, or list determining the ordering method; and
``'n'`` or ``'number'``, the ``number`` argument to the specified ordering.
A dictionary containing any fields not otherwise used
is passed to the |Design| constructor as the ``extra_data`` argument.
See the |description in the docs| for more information.
Returns
-------
name : str
Only returned if `spec` contains a field ``'name'``.
design : |Design|
See Also
--------
experimentator.DesignTree.from_spec
Examples
--------
>>> design_spec = {
...'name': 'block',
...'ivs': {'speed': [1, 2, 3], 'size': [15, 30]},
...'ordering': 'Shuffle',
...'n': 3}
>>> Design.from_dict(design_spec)
Level(name='block', design=Design(ivs=[('speed', [1, 2, 3]), ('size', [15, 30])], design_matrix=None, ordering=Shuffle(number=3, avoid_repeats=False), extra_data={}))
"""
inputs = Schema({
Optional('name'): And(str, len),
Optional('ivs'): And(Use(dict), {Optional(And(str, len)): Iterable}),
Optional('design_matrix'): Use(np.asarray),
Optional(Or('order', 'ordering')): Use(order.OrderSchema.from_any),
Optional(Or('n', 'number')): int,
Optional(
lambda x: x not in {'name', 'ivs', 'design_matrix', 'order', 'ordering', 'n', 'number'}
# Necessary due to https://github.com/keleshev/schema/issues/57
): object,
}).validate(spec)
if 'n' in inputs:
inputs['number'] = inputs.pop('n')
if 'order' in inputs:
inputs['ordering'] = inputs.pop('order')
if 'ordering' not in inputs:
inputs['ordering'] = order.Ordering() if 'design_matrix' in inputs else order.Shuffle()
if 'number' in inputs:
inputs['ordering'].number = inputs.pop('number')
name = inputs.pop('name', None)
extra_keys = set(inputs) - {'ivs', 'design_matrix', 'ordering'}
if extra_keys:
inputs['extra_data'] = {key: inputs.pop(key) for key in extra_keys}
self = cls(**inputs)
return Level(name, self) if name else self
def __repr__(self):
return 'Design(ivs={}, design_matrix={}, ordering={}, extra_data={})'.format(
list(zip(self.iv_names, self.iv_values)), self.design_matrix, self.ordering, self.extra_data)
def __eq__(self, other):
if isinstance(other, type(self)):
return self.__dict__ == other.__dict__
return False
def get_order(self, data=None):
"""Order the conditions.
Returns
-------
list of dict
A list of dictionaries, each specifying a condition (a mapping from IV names to values).
"""
condition_order = self.ordering.get_order(data)
for condition in condition_order:
condition.update(self.extra_data)
return condition_order
def first_pass(self):
"""Initialize design.
Initializes the design by parsing the design matrix or crossing the IVs
If a |NonAtomicOrdering| is used, an additional IV will be returned
which should be incorporated into the design one level up in the experimental hierarchy.
For this reason, the |first_pass| methods in a hierarchy of |Design| instances
should be called in reverse order, from bottom up.
Use a |DesignTree| to ensure this occurs properly.
Returns
-------
iv_name : str or tuple
The name of the IV, for |non-atomic orderings|.
Otherwise, an empty tuple.
iv_values : tuple
The possible values of the IV.
Empty for atomic orderings.
"""
if self.design_matrix is not None:
if not np.shape(self.design_matrix)[1] == len(self.iv_names):
raise TypeError("Size of design matrix doesn't match number of IVs")
all_conditions = self._parse_design_matrix(self.design_matrix)
else:
all_conditions = self.full_cross(self.iv_names, self.iv_values)
return self.ordering.first_pass(all_conditions)
def update(self, names, values):
"""
Add additional independent variables to the |Design|.
This will have no effect after |Design.first_pass| has been called.
Parameters
----------
names : list of str
Names of IVs to add.
values : list of list
For each IV, a list of possible values.
"""
self.iv_names.extend(names)
self.iv_values.extend(values)
@staticmethod
def full_cross(iv_names, iv_values):
"""
Perform a full factorial cross of the independent variables.
Yields dictionaries, each describing one condition, a mapping from IV names to IV values.
One dictionary is yielded for every possible combination of IV values.
Parameters
----------
iv_names : list of str
Names of IVs.
iv_values : list of list
Each element defines the possible values of an IV.
Must be the same length as `iv_names`.
Its elements must be hashable.
"""
iv_combinations = itertools.product(*iv_values)
yield from (dict(zip(iv_names, iv_combination)) for iv_combination in iv_combinations)
def _parse_design_matrix(self, design_matrix):
values_per_factor = [np.unique(column) for column in np.transpose(design_matrix)]
if any(iv_values and not len(iv_values) == len(values)
for iv_values, values in zip(self.iv_values, values_per_factor)):
raise ValueError('Unique elements in design matrix do not match number of values in IV definition')
conditions = []
for row in design_matrix:
condition = self.extra_data.copy()
for iv_name, iv_values, factor_values, design_matrix_value in zip(
self.iv_names, self.iv_values, values_per_factor, row):
if iv_values:
condition.update({iv_name: np.array(iv_values)[factor_values == design_matrix_value][0]})
else:
condition.update({iv_name: design_matrix_value})
conditions.append(condition)
return conditions
@property
def is_heterogeneous(self):
return self.heterogeneous_design_iv_name in self.iv_names
@property
def branches(self):
return dict(zip(self.iv_names, self.iv_values)).get(self.heterogeneous_design_iv_name, ())
class DesignTree:
"""
A container for |Design| instances, describing the entire hierarchy of a basic |Experiment|.
|DesignTree| instances are iterators; calling ``next`` on one
will return another |DesignTree| with the top level removed.
In this way, the entire experimental hierarchy can be created by recursively calling ``next``.
Use |DesignTree.new| to create a new tree, the generic constructor is for instantiating trees
whose attributes have already been processed (i.e., reloading already-created trees).
Attributes
----------
levels_and_designs : list of tuple
other_designs : dict
branches : dict
Only those items from `other_designs` that follow directly from this tree.
Notes
-----
Calling ``next`` on the last level of a heterogeneous |DesignTree|
will return a dictionary of named |DesignTree| instances
(rather than a single |DesignTree| instance).
The keys are the possible values of the IV ``'design'``
and the values are the corresponding |DesignTree| instances.
"""
def __init__(self, levels_and_designs=None, other_designs=None, branches=None):
self.levels_and_designs = levels_and_designs or []
self.other_designs = other_designs or {}
self.branches = branches or {}
@classmethod
def new(cls, levels_and_designs, **other_designs):
"""Create a new |DesignTree|.
Parameters
----------
levels_and_designs : |OrderedDict| or list of tuple
This input defines the structure of the tree, and is either an |OrderedDict| or a list of 2-tuples.
Keys (or first element of each tuple) are level names.
Values (or second element of each tuple) are design specifications,
in the form of either a |Design| instance, or a list of |Design| instances to occur in sequence.
**other_designs
Named design trees, can be other |DesignTree| instances or suitable `levels_and_designs` inputs
(i.e., |OrderedDict| or list of tuples).
These designs allow for heterogeneous design structures
(i.e. not every section at the same level has the same |Design|).
To make a heterogeneous |DesignTree|,
use an IV named ``'design'`` at the level where the heterogeneity should occur.
Values of this IV should be strings,
each corresponding to the name of a |DesignTree| from` other_designs`.
The value of the IV ``'design'`` at each section
determines which |DesignTree| is used for children of that section.
"""
if isinstance(levels_and_designs, collections.OrderedDict):
levels_and_designs = list(levels_and_designs.items())
# Check for singleton Designs.
for i, (level, design) in enumerate(levels_and_designs):
if isinstance(design, Design):
levels_and_designs[i] = (level, [design])
# Convert to namedtuples.
levels_and_designs = [Level(*level) for level in levels_and_designs]
# Handle heterogeneous trees.
bottom_level_design = levels_and_designs[-1].design[0]
if bottom_level_design.is_heterogeneous:
branches = {name: branch for name, branch in other_designs.items()
if branch in bottom_level_design.branches and isinstance(branch, DesignTree)}
for branch_name in bottom_level_design.branches:
if branch_name not in branches:
designs_to_pass = other_designs.copy()
del designs_to_pass[branch_name]
tree = DesignTree.new(other_designs[branch_name], **designs_to_pass)
branches[branch_name] = tree
else:
branches = {}
self = cls(levels_and_designs, other_designs, branches)
self.first_pass(self.levels_and_designs)
return self
@classmethod
def from_spec(cls, spec):
"""
Constructs a |DesignTree| instance from a specification (e.g., parsed from a YAML file).
spec : dict or list of dict
The |DesignTree| specification.
A dictionary with keys as tree names and values as lists of dictionaries.
Each sub-dictionary should specify a |Design| according to |Design.from_dict|.
The main tree should be named ``'main'``.
Other names are used for generating heterogeneous trees
(see |DesignTree| docs).
A homogeneous tree can be specified as a dictionary with only a single key ``'main'``,
or directly as a list of dictionaries
Returns
-------
|DesignTree|
"""
if isinstance(spec, dict):
# The normal case.
main_tree = list(cls._design_specs_to_designs(spec.pop('main')))
other_trees = {name: list(cls._design_specs_to_designs(specs)) for name, specs in spec.items()}
else:
# Only a main design.
main_tree = list(cls._design_specs_to_designs(spec))
other_trees = {}
return cls.new(main_tree, **other_trees)
@staticmethod
def _design_specs_to_designs(specs):
for spec in specs:
if isinstance(spec, dict):
name_and_design = Design.from_dict(spec)
if isinstance(name_and_design, Design):
yield None, name_and_design
else:
yield name_and_design
else:
name = None
designs = []
for design_spec in spec:
name_and_design = Design.from_dict(design_spec)
if isinstance(name_and_design, Design):
designs.append(name_and_design)
else:
if name and name_and_design[0] != name:
raise ValueError('Designs at the same level must have the same name')
name = name_and_design[0]
designs.append(name_and_design[1])
yield name, designs
def __next__(self):
if len(self) == 1:
raise StopIteration
if len(self.levels_and_designs) == 1:
return self.branches
next_design = copy(self)
next_design.levels_and_designs = next_design.levels_and_designs[1:]
return next_design
def __len__(self):
length = len(self.levels_and_designs)
if self.branches:
length += len(list(self.branches.values())[0])
return length
def __getitem__(self, item):
return self.levels_and_designs[item]
def __eq__(self, other):
if isinstance(other, type(self)):
return self.__dict__ == other.__dict__
return False
@staticmethod
def first_pass(levels_and_designs):
"""
Make a first pass of all designs in a |DesignTree|, from bottom to top.
This calls |Design.first_pass| on every |Design| instance in the tree in the proper order,
updating designs when a new IV is returned.
This is necessary for |non-atomic orderings| because they modify the parent |Design|.
"""
for (level, designs), (level_above, designs_above) in \
zip(reversed(levels_and_designs[1:]), reversed(levels_and_designs[:-1])):
# Call first_pass and add new IVs.
new_iv_names = []
new_iv_values = []
for design in designs:
iv_name, iv_values = design.first_pass()
if iv_name:
new_iv_names.append(iv_name)
new_iv_values.append(iv_values)
for design in designs_above:
design.update(new_iv_names, new_iv_values)
# And call first pass of the top level.
iv_names, iv_values = (), ()
for design in levels_and_designs[0].design:
iv_names, iv_values = design.first_pass()
if iv_names != () or iv_values != ():
raise ValueError('Cannot have a non-atomic ordering at the top level of a DesignTree. ')
def add_base_level(self):
"""
Adds a section to the top of the tree called ``'_base'``.
This makes the |DesignTree| suitable for constructing an |Experiment|.
Notes
-----
The |Experiment| constructor calls this automatically,
and this shouldn't be called when appending a tree to an existing |Experiment|,
so there is no use case for manually calling this method.
"""
self.levels_and_designs.insert(0, Level('_base', Design()))
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
from mock import patch
from mock import Mock
from mock import ANY
import os
import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
try:
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../../')
from sanji.publish import Publish
from sanji.publish import Retry
from sanji.session import Session
from sanji.session import TimeoutError
from sanji.session import StatusError
from sanji.session import Status
from sanji.message import Message
from sanji.connection.mockup import Mockup
except ImportError:
print("Please check the python PATH for import test module. (%s)"
% __file__)
exit(1)
class TestFunctionClass(unittest.TestCase):
def test_retry(self):
# case 1: timeout
def target():
raise TimeoutError
Retry(target=target, options={"retry": False, "interval": 0})
# case 2: normal
msg = Message({})
def target2():
setattr(msg, "code", 200)
return msg
res = Retry(target=target2, options={"retry": False, "interval": 0})
self.assertEqual(res, msg)
# case 3: retry
msg = Message({})
def target2():
setattr(msg, "code", 500)
return msg
res = Retry(target=target2, options={"retry": 3, "interval": 0})
self.assertEqual(res, None)
class TestPublishClass(unittest.TestCase):
@patch("sanji.session.Thread")
def setUp(self, Thread):
self.conn = Mockup()
self.session = Session()
self.publish = Publish(self.conn, self.session)
def tearDown(self):
self.session.stop()
self.conn = None
self.session = None
self.publish = None
def test_crud(self): # noqa
self.conn.publish = Mock(return_value=1)
self.session.create = Mock(return_value={})
# CRUD: block
with patch("sanji.publish.Publish._wait_published") as _wait_published:
_wait_published.return_value = None
for method in ["get", "put", "post", "delete"]:
self.publish.__getattribute__(method)("/test/resource",
{"test": method}, False)
self.conn.publish.assert_called_once_with(topic="/controller",
qos=0,
payload=ANY)
self.conn.publish.reset_mock()
self.session.create.assert_called_once_with(ANY, mid=1, age=60)
self.session.create.reset_mock()
# CRUD: non-block
with patch("sanji.publish.Publish._wait_resolved") as _wait_resolved:
# Normal case
_wait_resolved.return_value = None
for method in ["get", "put", "post", "delete"]:
self.publish.__getattribute__(method)("/test/resource",
{"test": method}, True)
# Timeout
_wait_resolved.side_effect = TimeoutError
for method in ["get", "put", "post", "delete"]:
with self.assertRaises(TimeoutError):
self.publish.__getattribute__(method)("/test/resource",
{"test": method},
True, 0)
# StatusError
_wait_resolved.side_effect = StatusError
for method in ["get", "put", "post", "delete"]:
with self.assertRaises(StatusError):
self.publish.__getattribute__(method)("/test/resource",
{"test": method},
True)
def test_event(self):
with patch("sanji.publish.Publish._wait_published") as _wait_published:
_wait_published.return_value = None
self.publish._conn.publish = Mock()
self.publish.event.get("/test/event2",
{"type": "notify2", "message": "hi"})
self.publish._conn.publish.assert_called_once_with(
topic="/controller", qos=0, payload=ANY)
def test_direct(self):
with patch("sanji.publish.Publish._wait_published") as _wait_published:
_wait_published.return_value = None
self.publish.direct.get("/test/direct1", {
"type": "direct1",
"message": "hi"},
block=False)
_wait_published.assert_called_once_with(ANY)
def test_create_response(self):
messages = [
Message({"test": "block"}, generate_id=True),
Message({"query": {}, "param": {}, "sign": ["controller"]},
generate_id=True)
]
def check_message(topic, qos, payload):
self.assertNotIn("query", payload)
self.assertNotIn("param", payload)
self.assertIn("sign", payload)
self.assertIn("code", payload)
self.assertIn("this is sign", payload["sign"])
self.publish._wait_published = Mock(return_value=None)
self.conn.publish = check_message
for message in messages:
resp = self.publish.create_response(message, "this is sign")
resp(500, {"ccc": "moxa best"})
self.publish._wait_published.assert_called_once_with(
ANY, no_response=True)
self.publish._wait_published.reset_mock()
def test__create_message(self):
# input dict
msg = self.publish._create_message({}, None)
self.assertIsInstance(msg, Message)
msg = self.publish._create_message(
{'method': 'get', 'sign': ['aaa', 'bbb']}, {'test': 1234}
)
self.assertEqual(msg.method, 'get')
self.assertEqual(msg.data['test'], 1234)
self.assertEqual(msg.sign, ['aaa', 'bbb'])
# input Messgae
in_msg = Message({'method': 'post', 'resource': '/test'})
out_msg = self.publish._create_message(data=in_msg)
self.assertDictEqual(in_msg.__dict__, out_msg.__dict__)
def test__wait_resolved(self):
# RESPONSE_TIMEOUT
session = self.session.create(Message({}, generate_id=True))
session["is_resolved"].set()
session["status"] = Status.RESPONSE_TIMEOUT
with self.assertRaises(TimeoutError):
self.publish._wait_resolved(session)
# RESOLVED
session = self.session.create(Message({}, generate_id=True))
session["is_resolved"].set()
session["status"] = Status.RESOLVED
session["resolve_message"] = True
self.assertTrue(self.publish._wait_resolved(session))
# UNKNOWN
session = self.session.create(Message({}, generate_id=True))
session["is_resolved"].set()
session["status"] = 999
with self.assertRaises(StatusError):
self.publish._wait_resolved(session)
def test__wait_published(self):
# SEND_TIMEOUT
session = self.session.create(Message({}, generate_id=True))
session["status"] = Status.SEND_TIMEOUT
session["is_published"].set()
with self.assertRaises(TimeoutError):
self.publish._wait_published(session)
# SENT
session = self.session.create(Message({}, generate_id=True))
session["status"] = Status.SENT
session["is_published"].set()
self.assertDictEqual(self.publish._wait_published(session),
session)
# UNKNOWN
session = self.session.create(Message({}, generate_id=True))
session["status"] = 999
session["is_published"].set()
with self.assertRaises(StatusError):
self.publish._wait_published(session)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright (c) 2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the simplified plotting interface."""
from datetime import datetime
from io import BytesIO
import warnings
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib
import pytest
import xarray as xr
from metpy.cbook import get_test_data
from metpy.io import GiniFile
from metpy.plots import ContourPlot, ImagePlot, MapPanel, PanelContainer
# Fixtures to make sure we have the right backend
from metpy.testing import set_agg_backend # noqa: F401, I202
from metpy.units import units
MPL_VERSION = matplotlib.__version__[:3]
@pytest.mark.mpl_image_compare(remove_text=True,
tolerance={'2.0': 3.09}.get(MPL_VERSION, 0.005))
def test_declarative_image():
"""Test making an image plot."""
data = xr.open_dataset(GiniFile(get_test_data('NHEM-MULTICOMP_1km_IR_20151208_2100.gini')))
img = ImagePlot()
img.data = data.metpy.parse_cf('IR')
img.colormap = 'Greys_r'
panel = MapPanel()
panel.title = 'Test'
panel.plots = [img]
pc = PanelContainer()
pc.panel = panel
pc.draw()
assert panel.ax.get_title() == 'Test'
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.022)
def test_declarative_contour():
"""Test making a contour plot."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 700 * units.hPa
contour.contours = 30
contour.linewidth = 1
contour.linecolor = 'red'
panel = MapPanel()
panel.area = 'us'
panel.proj = 'lcc'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [contour]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.016)
def test_declarative_events():
"""Test that resetting traitlets properly propagates."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.field = 'Temperature'
contour.level = 850 * units.hPa
contour.contours = 30
contour.linewidth = 1
contour.linecolor = 'red'
img = ImagePlot()
img.data = data
img.field = 'v_wind'
img.level = 700 * units.hPa
img.colormap = 'hot'
img.image_range = (3000, 5000)
panel = MapPanel()
panel.area = 'us'
panel.proj = 'lcc'
panel.layers = ['coastline', 'borders', 'states']
panel.plots = [contour, img]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
# Update some properties to make sure it regenerates the figure
contour.linewidth = 2
contour.linecolor = 'green'
contour.level = 700 * units.hPa
contour.field = 'Specific_humidity'
img.field = 'Geopotential_height'
img.colormap = 'plasma'
return pc.figure
def test_no_field_error():
"""Make sure we get a useful error when the field is not set."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.level = 700 * units.hPa
with pytest.raises(ValueError):
contour.draw()
def test_projection_object():
"""Test that we can pass a custom map projection."""
data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
contour = ContourPlot()
contour.data = data
contour.level = 700 * units.hPa
contour.field = 'Temperature'
panel = MapPanel()
panel.area = (-110, -60, 25, 55)
panel.projection = ccrs.Mercator()
panel.layers = [cfeature.LAKES]
panel.plots = [contour]
pc = PanelContainer()
pc.panel = panel
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=1.23)
def test_global():
"""Test that we can set global extent."""
data = xr.open_dataset(GiniFile(get_test_data('NHEM-MULTICOMP_1km_IR_20151208_2100.gini')))
img = ImagePlot()
img.data = data
img.field = 'IR'
panel = MapPanel()
panel.area = 'global'
panel.plots = [img]
pc = PanelContainer()
pc.panel = panel
pc.draw()
return pc.figure
@pytest.mark.mpl_image_compare(remove_text=True)
@pytest.mark.xfail(xr.__version__ < '0.11.0', reason='Does not work with older xarray.')
def test_latlon():
"""Test our handling of lat/lon information."""
data = xr.open_dataset(get_test_data('irma_gfs_example.nc', as_file_obj=False))
img = ImagePlot()
img.data = data
img.field = 'Temperature_isobaric'
img.level = 500 * units.hPa
img.time = datetime(2017, 9, 5, 15, 0, 0)
contour = ContourPlot()
contour.data = data
contour.field = 'Geopotential_height_isobaric'
contour.level = img.level
contour.time = img.time
panel = MapPanel()
panel.projection = 'lcc'
panel.area = 'us'
panel.plots = [img, contour]
pc = PanelContainer()
pc.panel = panel
pc.draw()
return pc.figure
def test_save():
"""Test that our saving function works."""
pc = PanelContainer()
fobj = BytesIO()
pc.save(fobj, format='png')
fobj.seek(0)
# Test that our file object had something written to it.
assert fobj.read()
def test_show():
"""Test that show works properly."""
pc = PanelContainer()
# Matplotlib warns when using show with Agg
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
pc.show()
def test_panel():
"""Test the functionality of the panel property."""
panel = MapPanel()
pc = PanelContainer()
pc.panels = [panel]
assert pc.panel is panel
pc.panel = panel
assert pc.panel is panel
|
|
"""
Copyright (C) 2010 David Fong and Michael Saunders
Distributed under the same license as SciPy
Testing Code for LSMR.
03 Jun 2010: First version release with lsmr.py
David Chin-lung Fong [email protected]
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders [email protected]
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
from numpy import array, arange, eye, zeros, ones, sqrt, transpose, hstack
from numpy.linalg import norm
from numpy.testing import assert_allclose
import pytest
from scipy.sparse import coo_matrix
from scipy.sparse.linalg._interface import aslinearoperator
from scipy.sparse.linalg import lsmr
from .test_lsqr import G, b
class TestLSMR:
def setup_method(self):
self.n = 10
self.m = 10
def assertCompatibleSystem(self, A, xtrue):
Afun = aslinearoperator(A)
b = Afun.matvec(xtrue)
x = lsmr(A, b)[0]
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def testIdentityACase1(self):
A = eye(self.n)
xtrue = zeros((self.n, 1))
self.assertCompatibleSystem(A, xtrue)
def testIdentityACase2(self):
A = eye(self.n)
xtrue = ones((self.n,1))
self.assertCompatibleSystem(A, xtrue)
def testIdentityACase3(self):
A = eye(self.n)
xtrue = transpose(arange(self.n,0,-1))
self.assertCompatibleSystem(A, xtrue)
def testBidiagonalA(self):
A = lowerBidiagonalMatrix(20,self.n)
xtrue = transpose(arange(self.n,0,-1))
self.assertCompatibleSystem(A,xtrue)
def testScalarB(self):
A = array([[1.0, 2.0]])
b = 3.0
x = lsmr(A, b)[0]
assert norm(A.dot(x) - b) == pytest.approx(0)
def testComplexX(self):
A = eye(self.n)
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
self.assertCompatibleSystem(A, xtrue)
def testComplexX0(self):
A = 4 * eye(self.n) + ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1))
b = aslinearoperator(A).matvec(xtrue)
x0 = zeros(self.n, dtype=complex)
x = lsmr(A, b, x0=x0)[0]
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def testComplexA(self):
A = 4 * eye(self.n) + 1j * ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1).astype(complex))
self.assertCompatibleSystem(A, xtrue)
def testComplexB(self):
A = 4 * eye(self.n) + ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
b = aslinearoperator(A).matvec(xtrue)
x = lsmr(A, b)[0]
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def testColumnB(self):
A = eye(self.n)
b = ones((self.n, 1))
x = lsmr(A, b)[0]
assert norm(A.dot(x) - b.ravel()) == pytest.approx(0)
def testInitialization(self):
# Test that the default setting is not modified
x_ref, _, itn_ref, normr_ref, *_ = lsmr(G, b)
assert_allclose(norm(b - G@x_ref), normr_ref, atol=1e-6)
# Test passing zeros yields similiar result
x0 = zeros(b.shape)
x = lsmr(G, b, x0=x0)[0]
assert_allclose(x, x_ref)
# Test warm-start with single iteration
x0 = lsmr(G, b, maxiter=1)[0]
x, _, itn, normr, *_ = lsmr(G, b, x0=x0)
assert_allclose(norm(b - G@x), normr, atol=1e-6)
# NOTE(gh-12139): This doesn't always converge to the same value as
# ref because error estimates will be slightly different when calculated
# from zeros vs x0 as a result only compare norm and itn (not x).
# x generally converges 1 iteration faster because it started at x0.
# itn == itn_ref means that lsmr(x0) took an extra iteration see above.
# -1 is technically possible but is rare (1 in 100000) so it's more
# likely to be an error elsewhere.
assert itn - itn_ref in (0, 1)
# If an extra iteration is performed normr may be 0, while normr_ref
# may be much larger.
assert normr < normr_ref * (1 + 1e-6)
class TestLSMRReturns:
def setup_method(self):
self.n = 10
self.A = lowerBidiagonalMatrix(20, self.n)
self.xtrue = transpose(arange(self.n, 0, -1))
self.Afun = aslinearoperator(self.A)
self.b = self.Afun.matvec(self.xtrue)
self.x0 = ones(self.n)
self.x00 = self.x0.copy()
self.returnValues = lsmr(self.A, self.b)
self.returnValuesX0 = lsmr(self.A, self.b, x0=self.x0)
def test_unchanged_x0(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValuesX0
assert_allclose(self.x00, self.x0)
def testNormr(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert norm(self.b - self.Afun.matvec(x)) == pytest.approx(normr)
def testNormar(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert (norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x)))
== pytest.approx(normar))
def testNormx(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert norm(x) == pytest.approx(normx)
def lowerBidiagonalMatrix(m, n):
# This is a simple example for testing LSMR.
# It uses the leading m*n submatrix from
# A = [ 1
# 1 2
# 2 3
# 3 4
# ...
# n ]
# suitably padded by zeros.
#
# 04 Jun 2010: First version for distribution with lsmr.py
if m <= n:
row = hstack((arange(m, dtype=int),
arange(1, m, dtype=int)))
col = hstack((arange(m, dtype=int),
arange(m-1, dtype=int)))
data = hstack((arange(1, m+1, dtype=float),
arange(1,m, dtype=float)))
return coo_matrix((data, (row, col)), shape=(m,n))
else:
row = hstack((arange(n, dtype=int),
arange(1, n+1, dtype=int)))
col = hstack((arange(n, dtype=int),
arange(n, dtype=int)))
data = hstack((arange(1, n+1, dtype=float),
arange(1,n+1, dtype=float)))
return coo_matrix((data,(row, col)), shape=(m,n))
def lsmrtest(m, n, damp):
"""Verbose testing of lsmr"""
A = lowerBidiagonalMatrix(m,n)
xtrue = arange(n,0,-1, dtype=float)
Afun = aslinearoperator(A)
b = Afun.matvec(xtrue)
atol = 1.0e-7
btol = 1.0e-7
conlim = 1.0e+10
itnlim = 10*n
show = 1
x, istop, itn, normr, normar, norma, conda, normx \
= lsmr(A, b, damp, atol, btol, conlim, itnlim, show)
j1 = min(n,5)
j2 = max(n-4,1)
print(' ')
print('First elements of x:')
str = ['%10.4f' % (xi) for xi in x[0:j1]]
print(''.join(str))
print(' ')
print('Last elements of x:')
str = ['%10.4f' % (xi) for xi in x[j2-1:]]
print(''.join(str))
r = b - Afun.matvec(x)
r2 = sqrt(norm(r)**2 + (damp*norm(x))**2)
print(' ')
str = 'normr (est.) %17.10e' % (normr)
str2 = 'normr (true) %17.10e' % (r2)
print(str)
print(str2)
print(' ')
if __name__ == "__main__":
lsmrtest(20,10,0)
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "densitymapbox.colorbar"
_path_str = "densitymapbox.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.densitymapbox.
colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.densitymapbox.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.densitymapbox.colorbar.Tickformatstop`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
# -*- coding: utf-8 -*-
"""User views."""
from datetime import date
from dateutil.relativedelta import relativedelta
from flask import Blueprint, abort, flash, jsonify, request, url_for
from flask_login import current_user, login_required
from shop.cart.models import Sale
from shop.user.forms import AccountForm, AddressForm, ChangePasswordForm
from shop.user.models import Address, ContactMechanism
from shop.utils import render_theme_template as render_template
from werkzeug import redirect
blueprint = Blueprint(
'user', __name__,
url_prefix='/my', static_folder='../static'
)
@blueprint.route('/change-password', methods=["GET", "POST"])
@login_required
def change_password():
"""
Change user's password
"""
form = ChangePasswordForm(request.form)
if form.validate_on_submit():
current_user.set_password(form.new_password.data)
flash("Your password was successfully updated!", "success")
return redirect(url_for('public.home'))
return render_template('users/change-password.html', form=form)
@blueprint.route('/addresses')
@login_required
def addresses():
"""List Addresses."""
addresses = current_user.get_addresses()
if request.is_xhr or request.is_json:
return jsonify(addresses=[address.serialize() for address in addresses])
return render_template('users/addresses.html', addresses=addresses)
@blueprint.route("/addresses/new", methods=["GET", "POST"])
@login_required
def create_address():
"""
Create an address for the current nereid_user
GET
~~~
Return an address creation form
POST
~~~~
Creates an address and redirects to the address view
"""
address_name = "" if current_user.is_anonymous else \
current_user.name
form = AddressForm(
name=address_name
)
if form.validate_on_submit():
address = Address(party=current_user.party.id)
form.populate(address)
address.save()
if request.is_xhr or request.is_json:
return jsonify(address.serialize())
flash("The new address has been added to your address book", 'success')
return redirect(url_for('user.addresses'))
elif request.is_xhr or request.is_json:
return jsonify({
"error": form.errors,
"message": "Could not create address."
})
return render_template('users/address-form.html', form=form)
@blueprint.route("/addresses/<int:address_id>/edit", methods=["GET", "POST"])
@login_required
def edit_address(address_id):
"""
Edit an Address
POST will update an existing address.
GET will return a existing address edit form.
:param address_id: ID of the address
"""
address = Address.query.get(address_id)
if not address.party == current_user.party:
abort(403)
form = AddressForm(request.form, obj=address)
if form.validate_on_submit():
form.populate(address)
if request.form.get('skip_validation', None):
address.validation_status = 'skipped'
else:
# Editing address will set validation status back to unknown
address.validation_status = 'unknown'
address.save()
if request.is_xhr or request.is_json:
return jsonify({
"address": address.serialize()
})
flash('Your address has been updated', 'success')
return redirect(url_for('user.addresses'))
if form.errors and (request.is_xhr or request.is_json):
return jsonify(errors=form.errors), 400
return render_template('users/address-edit.html', form=form, address=address)
@blueprint.route("/address/<int:address_id>/delete", methods=["POST"])
@login_required
def delete_address(address_id):
"""
Delete an address
POST deletes the address with the address_id
"""
address_query = Address.query.filter_by_domain(
[
('id', '=', address_id),
('party', '=', current_user.party.id)
]
)
if address_query.first():
address_query.archive()
flash("Address deleted", 'warning')
return redirect(url_for('user.addresses'))
else:
abort(404)
@blueprint.route('/orders')
@login_required
def orders():
"""Render all orders
"""
filter_by = request.args.get('filter_by', None)
page = request.args.get('page', type=int) or None
per_page = request.args.get('per_page', type=int) or 10
domain = [
('party', '=', current_user.party.id),
]
req_date = (
date.today() + relativedelta(months=-3)
)
if filter_by == 'done':
domain.append(('state', '=', 'done'))
elif filter_by == 'cancelled':
domain.append(('state', '=', 'cancel'))
elif filter_by == 'archived':
# only done and cancelled orders should be in archive
# irrespective of the date. Pre orders for example
# could be over 3 months old and still be in the
# processing state
domain.append(
('state', 'in', ('done', 'cancel'))
)
# Add a sale_date domain for recent orders.
domain.append((
'sale_date', '<', req_date
))
elif filter_by == 'open':
# All orders which are in a state of processing
domain.append(
('state', 'in', ['confirmed', 'processing'])
)
else:
domain.append([
'OR',
('state', 'in', ('confirmed', 'processing')),
[
('state', 'in', ('done', 'cancel')),
('sale_date', '>=', req_date),
]
])
# Handle order duration
shop_query = Sale.get_shop_query().filter_by_domain(domain)
paginate = shop_query.paginate(page=page, per_page=per_page)
return render_template(
'users/orders.html',
sales=paginate.items,
paginate=paginate
)
@blueprint.route('/account', methods=["GET", "POST"])
@login_required
def account():
"""Render account details
"""
form = AccountForm(
request.form,
name=current_user.name,
email=current_user.email,
phone=current_user.phone
)
if form.validate_on_submit():
current_user.name = form.name.data
if form.phone.data:
# Search for existing phone
contact_mechanism = ContactMechanism.query.filter_by_domain([
('party', '=', current_user.party.id),
('type', '=', 'phone'),
('value', '=', current_user.phone)
]).first()
if contact_mechanism:
contact_mechanism.value = form.phone.data
else:
contact_mechanism = ContactMechanism(
party=current_user.party.id,
type='phone', value=form.phone.data
)
contact_mechanism.save()
current_user.save()
return redirect(url_for('user.account'))
return render_template('users/account.html', form=form)
@login_required
@blueprint.route('/order/<int:sale_id>')
def order(sale_id):
"""Render given sale order
:param sale: ID of the sale Order
"""
sale = Sale.get_by_id(sale_id)
if sale.party.id != current_user.party.id:
# Order does not belong to the user
abort(403)
return render_template('users/order.html', sale=sale)
|
|
# Copyright 2018 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hamcrest import assert_that, is_
from tests.arista import enable, assert_interface_configuration, configuring_interface, with_eapi, create_vlan, \
remove_vlan, create_interface_vlan, remove_interface_vlan
from tests.util.protocol_util import ProtocolTest, SshTester, with_protocol
class TestAristaInterfaceVlans(ProtocolTest):
tester_class = SshTester
test_switch = "arista"
@with_protocol
def test_configure_trunk_port(self, t):
enable(t)
configuring_interface(t, "Et1", do="switchport mode TrunK")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1",
" switchport mode trunk"])
# not really added because all vlan are in trunk by default on arista
configuring_interface(t, "Et1", do="switchport trunk allowed vlan add 123")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1",
" switchport mode trunk"])
configuring_interface(t, "Et1", do="switchport trunk allowed vlan none")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1",
" switchport trunk allowed vlan none",
" switchport mode trunk"])
configuring_interface(t, "Et1", do="switchport trunk allowed vlan add 123")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1",
" switchport trunk allowed vlan 123",
" switchport mode trunk"])
configuring_interface(t, "Et1", do="switchport trunk allowed vlan add 124,126-128")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1",
" switchport trunk allowed vlan 123-124,126-128",
" switchport mode trunk"])
configuring_interface(t, "Et1", do="switchport trunk allowed vlan remove 123-124,127")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1",
" switchport trunk allowed vlan 126,128",
" switchport mode trunk"])
configuring_interface(t, "Et1", do="switchport trunk allowed vlan all")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1",
" switchport mode trunk"])
configuring_interface(t, "Et1", do="switchport trunk allowed vlan 123-124,127")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1",
" switchport trunk allowed vlan 123-124,127",
" switchport mode trunk"])
configuring_interface(t, "Et1", do="no switchport trunk allowed vlan")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1",
" switchport mode trunk"])
configuring_interface(t, "Et1", do="no switchport mode")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1"])
@with_protocol
def test_configure_trunk_port_by_removing_one_vlan_shows_all_others(self, t):
enable(t)
configuring_interface(t, "Et1", do="switchport trunk allowed vlan remove 100")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1",
" switchport trunk allowed vlan 1-99,101-4094"
])
configuring_interface(t, "Et1", do="no switchport trunk allowed vlan")
assert_interface_configuration(t, "Ethernet1", [
"interface Ethernet1"])
@with_protocol
def test_switchport_trunk_mode_errors(self, t):
enable(t)
t.write("configure terminal")
t.read("my_arista(config)#")
t.write("interface Ethernet 1")
t.read("my_arista(config-if-Et1)#")
t.write("switchport mode")
t.readln("% Incomplete command")
t.read("my_arista(config-if-Et1)#")
t.write("switchport mode trunk trunk")
t.readln("% Invalid input")
t.read("my_arista(config-if-Et1)#")
t.write("switchport mode waatt")
t.readln("% Invalid input")
t.read("my_arista(config-if-Et1)#")
t.write("no switchport mode whatever this is ignored past mode")
t.read("my_arista(config-if-Et1)#")
t.write("exit")
t.read("my_arista(config)#")
t.write("exit")
t.read("my_arista#")
@with_protocol
def test_switchport_trunk_allowed_vlan_add_errors(self, t):
enable(t)
t.write("configure terminal")
t.read("my_arista(config)#")
t.write("interface Ethernet 1")
t.read("my_arista(config-if-Et1)#")
t.write("switchport trunk allowed vlan add")
t.readln("% Incomplete command")
t.read("my_arista(config-if-Et1)#")
t.write("switchport trunk allowed vlan add 1 2")
t.readln("% Invalid input")
t.read("my_arista(config-if-Et1)#")
t.write("switchport trunk allowed vlan add patate")
t.readln("% Invalid input")
t.read("my_arista(config-if-Et1)#")
t.write("no switchport trunk allowed vlan whatever this is ignored past mode")
t.read("my_arista(config-if-Et1)#")
t.write("exit")
t.read("my_arista(config)#")
t.write("exit")
t.read("my_arista#")
@with_protocol
def test_switchport_trunk_allowed_vlan_remove_errors(self, t):
enable(t)
t.write("configure terminal")
t.read("my_arista(config)#")
t.write("interface Ethernet 1")
t.read("my_arista(config-if-Et1)#")
t.write("switchport trunk allowed vlan add")
t.readln("% Incomplete command")
t.read("my_arista(config-if-Et1)#")
t.write("switchport trunk allowed vlan add 1 2")
t.readln("% Invalid input")
t.read("my_arista(config-if-Et1)#")
t.write("switchport trunk allowed vlan add patate")
t.readln("% Invalid input")
t.read("my_arista(config-if-Et1)#")
t.write("exit")
t.read("my_arista(config)#")
t.write("exit")
t.read("my_arista#")
@with_protocol
def test_switchport_trunk_allowed_vlan_none_errors(self, t):
enable(t)
t.write("configure terminal")
t.read("my_arista(config)#")
t.write("interface Ethernet 1")
t.read("my_arista(config-if-Et1)#")
t.write("switchport trunk allowed vlan none patate")
t.readln("% Invalid input")
t.read("my_arista(config-if-Et1)#")
t.write("exit")
t.read("my_arista(config)#")
t.write("exit")
t.read("my_arista#")
@with_protocol
def test_switchport_trunk_allowed_vlan_errors(self, t):
enable(t)
t.write("configure terminal")
t.read("my_arista(config)#")
t.write("interface Ethernet 1")
t.read("my_arista(config-if-Et1)#")
t.write("switchport trunk allowed vlan")
t.readln("% Incomplete command")
t.read("my_arista(config-if-Et1)#")
t.write("switchport trunk allowed vlan 1 2")
t.readln("% Invalid input")
t.read("my_arista(config-if-Et1)#")
t.write("switchport trunk allowed vlan patate")
t.readln("% Invalid input")
t.read("my_arista(config-if-Et1)#")
t.write("exit")
t.read("my_arista(config)#")
t.write("exit")
t.read("my_arista#")
@with_protocol
@with_eapi
def test_show_interfaces_switchport_doesnt_show_vlan_interfaces(self, t, api):
enable(t)
create_vlan(t, "299")
create_interface_vlan(t, "299")
t.write("show interfaces switchport")
t.readln("Default switchport mode: access")
t.readln("")
t.readln("Name: Et1")
t.readln("Switchport: Enabled")
t.readln("Administrative Mode: static access")
t.readln("Operational Mode: static access")
t.readln("MAC Address Learning: enabled")
t.readln("Dot1q ethertype/TPID: 0x8100 (active)")
t.readln("Dot1q Vlan Tag Required (Administrative/Operational): No/No")
t.readln("Access Mode VLAN: 1 (default)")
t.readln("Trunking Native Mode VLAN: 1 (default)")
t.readln("Administrative Native VLAN tagging: disabled")
t.readln("Trunking VLANs Enabled: ALL")
t.readln("Static Trunk Groups:")
t.readln("Dynamic Trunk Groups:")
t.readln("Source interface filtering: enabled")
t.readln("")
t.readln("Name: Et2")
t.readln("Switchport: Enabled")
t.readln("Administrative Mode: static access")
t.readln("Operational Mode: static access")
t.readln("MAC Address Learning: enabled")
t.readln("Dot1q ethertype/TPID: 0x8100 (active)")
t.readln("Dot1q Vlan Tag Required (Administrative/Operational): No/No")
t.readln("Access Mode VLAN: 1 (default)")
t.readln("Trunking Native Mode VLAN: 1 (default)")
t.readln("Administrative Native VLAN tagging: disabled")
t.readln("Trunking VLANs Enabled: ALL")
t.readln("Static Trunk Groups:")
t.readln("Dynamic Trunk Groups:")
t.readln("Source interface filtering: enabled")
t.readln("")
t.read("my_arista#")
result = api.enable(["show interfaces switchport"], strict=True)
expected_json_content = {
"sourceDetail": "",
"switchports": {
"Ethernet1": {
"enabled": True,
"switchportInfo": {
"accessVlanId": 1,
"accessVlanName": "default",
"dot1qVlanTagRequired": False,
"dot1qVlanTagRequiredStatus": False,
"dynamicAllowedVlans": {},
"dynamicTrunkGroups": [],
"macLearning": True,
"mode": "access",
"sourceportFilterMode": "enabled",
"staticTrunkGroups": [],
"tpid": "0x8100",
"tpidStatus": True,
"trunkAllowedVlans": "ALL",
"trunkingNativeVlanId": 1,
"trunkingNativeVlanName": "default"
}
},
"Ethernet2": {
"enabled": True,
"switchportInfo": {
"accessVlanId": 1,
"accessVlanName": "default",
"dot1qVlanTagRequired": False,
"dot1qVlanTagRequiredStatus": False,
"dynamicAllowedVlans": {},
"dynamicTrunkGroups": [],
"macLearning": True,
"mode": "access",
"sourceportFilterMode": "enabled",
"staticTrunkGroups": [],
"tpid": "0x8100",
"tpidStatus": True,
"trunkAllowedVlans": "ALL",
"trunkingNativeVlanId": 1,
"trunkingNativeVlanName": "default"
}
}
}
}
assert_that(result, is_([
{
"command": "show interfaces switchport",
"encoding": "json",
"response": expected_json_content,
"result": expected_json_content
}
]))
remove_interface_vlan(t, "299")
remove_vlan(t, "299")
@with_protocol
def test_show_interfaces_switchport_trunk_vlans(self, t):
enable(t)
create_vlan(t, "13")
create_vlan(t, "14")
configuring_interface(t, "Et1", do="switchport mode trunk")
configuring_interface(t, "Et1", do="switchport trunk allowed vlan 13-14")
t.write("show interfaces et1 switchport")
t.readln("Name: Et1")
t.readln("Switchport: Enabled")
t.readln("Administrative Mode: trunk")
t.readln("Operational Mode: trunk")
t.readln("MAC Address Learning: enabled")
t.readln("Dot1q ethertype/TPID: 0x8100 (active)")
t.readln("Dot1q Vlan Tag Required (Administrative/Operational): No/No")
t.readln("Access Mode VLAN: 1 (default)")
t.readln("Trunking Native Mode VLAN: 1 (default)")
t.readln("Administrative Native VLAN tagging: disabled")
t.readln("Trunking VLANs Enabled: 13-14")
t.readln("Static Trunk Groups:")
t.readln("Dynamic Trunk Groups:")
t.readln("Source interface filtering: enabled")
t.readln("")
t.read("my_arista#")
configuring_interface(t, "Et1", do="switchport trunk allowed vlan none")
t.write("show interfaces ethernet 1 switchport")
t.readln("Name: Et1")
t.readln("Switchport: Enabled")
t.readln("Administrative Mode: trunk")
t.readln("Operational Mode: trunk")
t.readln("MAC Address Learning: enabled")
t.readln("Dot1q ethertype/TPID: 0x8100 (active)")
t.readln("Dot1q Vlan Tag Required (Administrative/Operational): No/No")
t.readln("Access Mode VLAN: 1 (default)")
t.readln("Trunking Native Mode VLAN: 1 (default)")
t.readln("Administrative Native VLAN tagging: disabled")
t.readln("Trunking VLANs Enabled: NONE")
t.readln("Static Trunk Groups:")
t.readln("Dynamic Trunk Groups:")
t.readln("Source interface filtering: enabled")
t.readln("")
t.read("my_arista#")
remove_vlan(t, "13")
remove_vlan(t, "14")
@with_protocol
def test_show_interfaces_switchport_errors(self, t):
t.write("show interfaces patate switchport")
t.readln("% Incomplete command")
t.read("my_arista>")
t.write("show interfaces ethernet 1 2 switchport")
t.readln("% Invalid input")
t.read("my_arista>")
t.write("show interfaces et3 switchport")
t.readln("% Invalid input")
t.read("my_arista>")
|
|
from discord.ext import commands
from . import util
from .util import m
class TimerCategory (util.Cog):
@commands.group('timer', aliases=['t'], invoke_without_command=True)
async def group(self, ctx, *, input: str):
'''
Manages character timers
Timers help track values that change over time such as countdowns
The timer changes by delta when `t tick` or `endturn` are used
'''
try:
number, name = input.split(maxsplit=1)
number = int(number)
except ValueError:
raise util.invalid_subcommand(ctx)
await ctx.invoke(self.plus, number, name=name)
@group.command(aliases=['update'], ignore_extra=False)
async def add(self, ctx, name: str, initial: int, delta: int=-1):
'''
Adds or changes a character resource
Parameters:
[name] the name of the new timer
[initial] the value to start the timer at
[delta] the change every tick. defaults to -1
'''
character = util.get_character(ctx.session, ctx.author.id, ctx.guild.id)
timer = util.sql_update(ctx.session, m.Timer, {
'character': character,
'name': name,
}, {
'initial': initial,
'delta': delta,
})
await util.send_embed(ctx, description='{} now has {}'.format(str(character), str(timer)))
@group.command('+')
async def plus(self, ctx, number: int, *, name: str):
'''
Increase the value of a timer
A space is not required between the + and the number argument
Parameters:
[number] the value to increase the timer by
[name*] the name of the timer
'''
name = util.strip_quotes(name)
character = util.get_character(ctx.session, ctx.author.id, ctx.guild.id)
timer = ctx.session.query(m.Timer)\
.filter_by(character_id=character.id, name=name).one_or_none()
if timer is None:
raise util.ItemNotFoundError(name)
if timer.value is None:
raise Exception("{}'s {} is not running".format(str(character), timer.name))
prev = timer.value
timer.value = prev + number
ctx.session.commit()
description = "{}'s {}: `{} => {}`".format(
str(character), timer.name, prev, timer.value)
await util.send_embed(ctx, description=description)
@group.command('-')
async def minus(self, ctx, number: int, *, name: str):
'''
Decrease the value of a timer
A space is not required between the - and the number argument
Parameters:
[number] the value to decrease the timer by
[name*] the name of the timer
'''
name = util.strip_quotes(name)
await ctx.invoke(self.plus, -number, name=name)
@group.command(ignore_extra=False)
async def set(self, ctx, name: str, value: int):
'''
Sets the current value of a timer
Also starts the timer if it is not running
Parameters:
[name] the name of the time
[value] the new value of the timer
'''
character = util.get_character(ctx.session, ctx.author.id, ctx.guild.id)
timer = ctx.session.query(m.Timer)\
.filter_by(character_id=character.id, name=name).one_or_none()
if timer is None:
raise util.ItemNotFoundError(name)
prev = timer.value
if prev is None:
prev = timer.initial
timer.value = value
ctx.session.commit()
description = "{}'s {}: `{} => {}`".format(
str(character), timer.name, prev, timer.value)
await util.send_embed(ctx, description=description)
@group.command(aliases=['reset'], ignore_extra=False)
async def start(self, ctx, *, name: str):
'''
Starts the specified timer with the initial value
'''
character = util.get_character(ctx.session, ctx.author.id, ctx.guild.id)
timer = ctx.session.query(m.Timer)\
.filter_by(character_id=character.id, name=name).one_or_none()
if timer is None:
raise util.ItemNotFoundError(name)
timer.value = timer.initial
ctx.session.commit()
description = "{}'s {} started at {}".format(str(character), timer.name, timer.value)
await util.send_embed(ctx, description=description)
@group.command(ignore_extra=False)
async def stop(self, ctx, *, name: str):
'''
Stops the specified timer, removing its current value
'''
character = util.get_character(ctx.session, ctx.author.id, ctx.guild.id)
timer = ctx.session.query(m.Timer)\
.filter_by(character_id=character.id, name=name).one_or_none()
if timer is None:
raise util.ItemNotFoundError(name)
timer.value = None
ctx.session.commit()
description = "{}'s {} stopped".format(str(character), timer.name)
await util.send_embed(ctx, description=description)
@group.command(ignore_extra=False)
async def stopall(self, ctx):
'''
Stops all timers for the character
'''
character = util.get_character(ctx.session, ctx.author.id, ctx.guild.id)
for timer in character.timers:
if timer.value is not None:
timer.value = None
ctx.session.commit()
description = "All of {}'s timers are stopped".format(str(character))
await util.send_embed(ctx, description=description)
@group.command(ignore_extra=False)
async def tick(self, ctx):
'''
Changes all running timers by their deltas
'''
character = util.get_character(ctx.session, ctx.author.id, ctx.guild.id)
description = ''
for timer in character.timers:
if timer.value is not None:
prev = timer.value
timer.value += timer.delta
description += "{}'s {} ({:+}): `{} => {}`\n".format(
str(character), timer.name, timer.delta, prev, timer.value)
ctx.session.commit()
description += "{}'s turn is over".format(str(character))
await util.send_embed(ctx, description=description)
@commands.command(ignore_extra=False)
async def endturn(self, ctx):
'''
Changes all running timers by their deltas
'''
await ctx.invoke(self.tick)
@group.command()
async def check(self, ctx, *, name: str):
'''
Checks the status of a timer
Parameters:
[name*] the name of the timer
'''
name = util.strip_quotes(name)
character = util.get_character(ctx.session, ctx.author.id, ctx.guild.id)
timer = ctx.session.query(m.Timer)\
.filter_by(character_id=character.id, name=name).one_or_none()
if timer is None:
raise util.ItemNotFoundError(name)
await util.send_embed(ctx, description=str(timer))
@group.command(ignore_extra=False)
async def list(self, ctx):
'''
Lists all of a character's timers
'''
character = util.get_character(ctx.session, ctx.author.id, ctx.guild.id)
await util.inspector(ctx, character, 'timers')
@group.command(aliases=['delete'])
async def remove(self, ctx, *, name: str):
'''
Deletes a timer from the character
Parameters:
[name*] the name of the timer
'''
name = util.strip_quotes(name)
character = util.get_character(ctx.session, ctx.author.id, ctx.guild.id)
timer = ctx.session.query(m.Timer)\
.filter_by(character_id=character.id, name=name).one_or_none()
if timer is None:
raise util.ItemNotFoundError(name)
ctx.session.delete(timer)
ctx.session.commit()
await util.send_embed(ctx, description='{} removed'.format(str(timer)))
@group.command()
async def inspect(self, ctx, *, name: str):
'''
Lists the timers for a specified character
Parameters:
[name*] the name of the character to inspect
'''
name = util.strip_quotes(name)
await util.inspector(ctx, name, 'timers')
def setup(bot):
bot.add_cog(TimerCategory(bot))
|
|
"""
Create a boolean mask for if the pixels of a texture is constrained, True, or
free, False.
Written by Zachary Ferguson
"""
import itertools
import logging
import numpy
import scipy
import scipy.sparse
from tqdm import tqdm
from .seam_intervals import compute_edge_intervals
from .points_in_triangle import points_in_triangle
from .util import UV_to_XY, pairwise, lerp_UV, surrounding_pixels
def get_all_surrounding_pixels(edges, width, height):
"""
Get a set of all pixels surrounding the given edges.
Input:
edges - unsorted list of UV edges
width - width of the texture
height - height of the texture
Output:
Returns a set of (X, Y) coordinates for the surrounding pixels.
"""
# TODO: This could be improved for better performance
pixels = set()
for edge in edges:
interval = sorted(list(compute_edge_intervals(edge, width, height)))
# Find all pixels along the seam
for a, b in pairwise(interval):
uv_mid = lerp_UV((a + b) / 2.0, edge[0], edge[1])
pixels |= set(surrounding_pixels(
uv_mid, width, height, as_tuple=True))
return pixels
def mask_seam(mesh, seam_edges, width, height, seam_pixels=None):
"""
Create a boolean mask for if the pixels of a texture is constrained (True)
or free (False). Pixels along the seam are not constrained.
Steps:
1. Find all pixels along the seam_loops using find_all_seam_pixels()
2. Mark these pixels in the mask as False.
Inputs:
mesh - a OBJ recordclass
seam_edges - an unsorted list of edges along the seam
width - width of texture/mask
height - height of texture/mask
Output:
Constructs a width x height array of booleans. True if the pixel
is part of the mask/foreground. False if the pixel is part of the
background.
"""
# Store the surrounding pixels XY coords in pixels
seam_pixels = get_all_surrounding_pixels(seam_edges, width, height)
vals = numpy.full(len(seam_pixels), True, dtype=bool)
coords = numpy.array(list(seam_pixels))
mask = scipy.sparse.coo_matrix((vals, (coords[:, 1], coords[:, 0])),
shape=(height, width)).A.astype(bool)
return ~mask
def mask_inside_seam(mesh, seam_edges, width, height):
"""
Create a boolean mask for if the pixels of a texture is constrained (True)
or free (False).
Steps:
1. Find all pixels along the seam_loops using find_all_seam_pixels()
2. Test these pixels against the triangles of the mesh in UV-space.
a. If pixel is outside of all the triangles, mark it as free(false)
b. Else if the pixel is inside at least one triangle mark as
constrained (true)
Inputs:
mesh - a OBJ recordclass
seam_edges - an unsorted list of edges along the seam
width - width of texture/mask
height - height of texture/mask
Output:
Constructs a width x height array of booleans. True if the pixel
is part of the mask/foreground. False if the pixel is part of the
background.
"""
# Store the surrounding pixels XY coords in pixels
seam_pixels = get_all_surrounding_pixels(seam_edges, width, height)
# Create a list of the UV faces in Pixel space
faces = [
numpy.array([UV_to_XY(mesh.vt[fv.vt], width, height) for fv in face])
for face in mesh.f
]
# This mask should be small enough for a dense matrix
mask = numpy.zeros((height, width), dtype=bool)
# Constrain all the pixels in seam_pixels that are inside a face
pts = numpy.array(list(seam_pixels))
disable_pbar = logging.getLogger().getEffectiveLevel() > logging.INFO
for i, face in enumerate(tqdm(faces, unit="faces", disable=disable_pbar,
desc="Building Least Squares Constraints")):
# Create a bounding box for the face
ll = numpy.array([face[:, 0].min(), face[:, 1].min()])
ur = numpy.array([face[:, 0].max(), face[:, 1].max()])
# Intersect the bounding box with the seam pixels
inidx = numpy.all(numpy.logical_and(ll <= pts, pts <= ur), axis=1)
inbox = pts[inidx]
# Only test seam pixels inside the bounding_box
if(inbox.shape[0] > 0):
mask[inbox[:, 1], inbox[:, 0]] |= points_in_triangle(face, inbox)
# Mask is False if pixels inside (this needs to be inverted).
vals = numpy.full(len(seam_pixels), True, dtype=bool)
coords = numpy.array(list(seam_pixels))
full = scipy.sparse.coo_matrix((vals, (coords[:, 1], coords[:, 0])),
shape=mask.shape).A
mask = full ^ mask
return ~mask
def mask_inside_faces(mesh, width, height, init_mask=None):
"""
Create a boolean mask for if the pixels of a texture is constrained (True)
or free (False). For all pixels mask the pixel as false if the pixel is
inside all triangles.
Inputs:
mesh - a OBJ recordclass
width - width of texture/mask
height - height of texture/mask
init_mask - a mask of size height x width to start from.
(Default: None -> initial mask of all False)
Output:
Constructs a width x height array of booleans. True if the pixel
is part of the mask/foreground. False if the pixel is part of the
background.
"""
# Create a list of the UV faces in Pixel space
faces = [
numpy.array([UV_to_XY(mesh.vt[fv.vt], width, height) for fv in face])
for face in mesh.f
]
# This mask should be small enough for a dense matrix
mask = init_mask
if(mask is None):
mask = numpy.zeros((height, width), dtype=bool)
disable_pbar = logging.getLogger().getEffectiveLevel() > logging.INFO
for i, face in enumerate(tqdm(faces, unit="faces", disable=disable_pbar,
desc="Building Dirichlet Energy")):
# Bounding box for the face to get surrounding pixels.
ll = numpy.array([face[:, 0].min(), face[:, 1].min()])
ur = numpy.array([face[:, 0].max(), face[:, 1].max()])
bbox = numpy.vstack([ll, ur])
xRange = range(
max(0, int(bbox[0][0])),
min(width,
int(numpy.ceil(bbox[1][0])) + 1))
yRange = range(
max(0, int(bbox[0][1])),
min(height,
int(numpy.ceil(bbox[1][1])) + 1))
inbox = numpy.array(list(itertools.product(xRange, yRange)))
# Test inside face for all pixels in the bounding box
mask[inbox[:, 1], inbox[:, 0]] |= points_in_triangle(face, inbox)
return ~mask
if __name__ == "__main__":
import obj_reader
import texture
from find_seam import find_seam, seam_to_UV
mesh = obj_reader.quads_to_triangles(
obj_reader.load_obj('../models/cow.obj'))
texture = numpy.array(
texture.load_texture("../textures/cow/Cow_Monster_N.png"))
height, width, depth = (texture.shape + (1, ))[:3]
N = width * height
textureVec = texture.reshape(N, -1)
logging.info("\nFinding seam of model")
seam, boundary, foldovers = find_seam(mesh)
uv_seam, uv_boundary, uv_foldovers = seam_to_UV(
mesh, seam, boundary, foldovers)
logging.info("Done\n")
logging.info("Number of edges along the seam: %d" % (len(seam) * 2))
logging.info("Number of edges along the boundary: %d" % len(boundary))
logging.info("Number of foldover edges: %d\n" % len(foldovers))
# Find all of the seam loops
bag_of_edges = ([edge for edgepair in uv_seam
for edge in edgepair] + uv_boundary + uv_foldovers)
# Constrain the values
logging.info("Mask Inside Seam")
lsq_mask = mask_inside_seam(mesh, bag_of_edges, width, height)
logging.info("Mask Seam")
lsq1_mask = mask_seam(mesh, bag_of_edges, width, height)
logging.info("XOR")
lsq2_mask = lsq_mask ^ lsq1_mask
# Construct a dirichlet energy for the texture.
logging.info("Mark Inside Faces")
dirichlet_mask = mask_inside_faces(
mesh, width, height, init_mask=~lsq_mask)
|
|
#!/usr/bin/env python
"""
A program to solve the 3D incompressible magnetohydrodynamics equations using the
implicit midpoint rule
The program is based on the Orszag-Patterson algorithm as documented on pg. 98
of C. Canuto, M.Y. Hussaini, A. Quarteroni and T.A. Zhang
"Spectral Methods: Evolution to Complex Geometries and Applications to Fluid Dynamics"
Springer (2007)
The Helmholtz decomposition is used to project the magnetic field onto a divergence
free subspace. Initial work on this has been done with Damian San Roman Alerigi
More information on visualization can be found on the Mayavi
website, in particular:
http://github.enthought.com/mayavi/mayavi/mlab.html
which was last checked on 6 April 2012
"""
import math
import numpy
from mayavi import mlab
import matplotlib.pyplot as plt
import time
# Grid
Lx=1.0 # Period 2*pi*Lx
Ly=1.0 # Period 2*pi*Ly
Lz=1.0 # Period 2*pi*Lz
Nx=64 # Number of harmonics
Ny=64 # Number of harmonics
Nz=64 # Number of harmonics
Nt=20 # Number of time slices
tmax=0.2 # Maximum time
dt=tmax/Nt # time step
t=0.0 # initial time
Re=1.0 # Reynolds number
Rem=1.0 # Magnetic Reynolds number
tol=0.1**(10) # tolerance for fixed point iterations
x = [i*2.0*math.pi*(Lx/Nx) for i in xrange(-Nx/2,1+Nx/2)]
y = [i*2.0*math.pi*(Ly/Ny) for i in xrange(-Ny/2,1+Ny/2)]
z = [i*2.0*math.pi*(Lz/Nz) for i in xrange(-Nz/2,1+Nz/2)]
k_x = (1.0/Lx)*numpy.array([complex(0,1)*n for n in range(0,Nx/2) \
+ [0] + range(-Nx/2+1,0)])
k_y = (1.0/Ly)*numpy.array([complex(0,1)*n for n in range(0,Ny/2) \
+ [0] + range(-Ny/2+1,0)])
k_z = (1.0/Lz)*numpy.array([complex(0,1)*n for n in range(0,Nz/2) \
+ [0] + range(-Nz/2+1,0)])
kxm=numpy.zeros((Nx,Ny,Nz), dtype=complex)
kym=numpy.zeros((Nx,Ny,Nz), dtype=complex)
kzm=numpy.zeros((Nx,Ny,Nz), dtype=complex)
k2xm=numpy.zeros((Nx,Ny,Nz), dtype=float)
k2ym=numpy.zeros((Nx,Ny,Nz), dtype=float)
k2zm=numpy.zeros((Nx,Ny,Nz), dtype=float)
xx=numpy.zeros((Nx,Ny,Nz), dtype=float)
yy=numpy.zeros((Nx,Ny,Nz), dtype=float)
zz=numpy.zeros((Nx,Ny,Nz), dtype=float)
for i in xrange(Nx):
for j in xrange(Ny):
for k in xrange(Nz):
kxm[i,j,k] = k_x[i]
kym[i,j,k] = k_y[j]
kzm[i,j,k] = k_z[k]
k2xm[i,j,k] = numpy.real(k_x[i]**2)
k2ym[i,j,k] = numpy.real(k_y[j]**2)
k2zm[i,j,k] = numpy.real(k_z[k]**2)
xx[i,j,k] = x[i]
yy[i,j,k] = y[j]
zz[i,j,k] = z[k]
# allocate arrays
u=numpy.zeros((Nx,Ny,Nz), dtype=float)
uold=numpy.zeros((Nx,Ny,Nz), dtype=float)
v=numpy.zeros((Nx,Ny,Nz), dtype=float)
vold=numpy.zeros((Nx,Ny,Nz), dtype=float)
w=numpy.zeros((Nx,Ny,Nz), dtype=float)
wold=numpy.zeros((Nx,Ny,Nz), dtype=float)
bx=numpy.zeros((Nx,Ny,Nz), dtype=float)
bxold=numpy.zeros((Nx,Ny,Nz), dtype=float)
by=numpy.zeros((Nx,Ny,Nz), dtype=float)
byold=numpy.zeros((Nx,Ny,Nz), dtype=float)
bz=numpy.zeros((Nx,Ny,Nz), dtype=float)
bzold=numpy.zeros((Nx,Ny,Nz), dtype=float)
utemp=numpy.zeros((Nx,Ny,Nz), dtype=float)
vtemp=numpy.zeros((Nx,Ny,Nz), dtype=float)
wtemp=numpy.zeros((Nx,Ny,Nz), dtype=float)
bxtemp=numpy.zeros((Nx,Ny,Nz), dtype=float)
bytemp=numpy.zeros((Nx,Ny,Nz), dtype=float)
bztemp=numpy.zeros((Nx,Ny,Nz), dtype=float)
omegax=numpy.zeros((Nx,Ny,Nz), dtype=float)
omegay=numpy.zeros((Nx,Ny,Nz), dtype=float)
omegaz=numpy.zeros((Nx,Ny,Nz), dtype=float)
omegatot=numpy.zeros((Nx,Ny,Nz), dtype=float)
omegabx=numpy.zeros((Nx,Ny,Nz), dtype=float)
omegaby=numpy.zeros((Nx,Ny,Nz), dtype=float)
omegabz=numpy.zeros((Nx,Ny,Nz), dtype=float)
omegabtot=numpy.zeros((Nx,Ny,Nz), dtype=float)
ux=numpy.zeros((Nx,Ny,Nz), dtype=float)
uy=numpy.zeros((Nx,Ny,Nz), dtype=float)
uz=numpy.zeros((Nx,Ny,Nz), dtype=float)
vx=numpy.zeros((Nx,Ny,Nz), dtype=float)
vy=numpy.zeros((Nx,Ny,Nz), dtype=float)
vz=numpy.zeros((Nx,Ny,Nz), dtype=float)
wx=numpy.zeros((Nx,Ny,Nz), dtype=float)
wy=numpy.zeros((Nx,Ny,Nz), dtype=float)
wz=numpy.zeros((Nx,Ny,Nz), dtype=float)
uxold=numpy.zeros((Nx,Ny,Nz), dtype=float)
uyold=numpy.zeros((Nx,Ny,Nz), dtype=float)
uzold=numpy.zeros((Nx,Ny,Nz), dtype=float)
vxold=numpy.zeros((Nx,Ny,Nz), dtype=float)
vyold=numpy.zeros((Nx,Ny,Nz), dtype=float)
vzold=numpy.zeros((Nx,Ny,Nz), dtype=float)
wxold=numpy.zeros((Nx,Ny,Nz), dtype=float)
wyold=numpy.zeros((Nx,Ny,Nz), dtype=float)
wzold=numpy.zeros((Nx,Ny,Nz), dtype=float)
bxx=numpy.zeros((Nx,Ny,Nz), dtype=float)
bxy=numpy.zeros((Nx,Ny,Nz), dtype=float)
bxz=numpy.zeros((Nx,Ny,Nz), dtype=float)
byx=numpy.zeros((Nx,Ny,Nz), dtype=float)
byy=numpy.zeros((Nx,Ny,Nz), dtype=float)
byz=numpy.zeros((Nx,Ny,Nz), dtype=float)
bzx=numpy.zeros((Nx,Ny,Nz), dtype=float)
bzy=numpy.zeros((Nx,Ny,Nz), dtype=float)
bzz=numpy.zeros((Nx,Ny,Nz), dtype=float)
bxxold=numpy.zeros((Nx,Ny,Nz), dtype=float)
bxyold=numpy.zeros((Nx,Ny,Nz), dtype=float)
bxzold=numpy.zeros((Nx,Ny,Nz), dtype=float)
byxold=numpy.zeros((Nx,Ny,Nz), dtype=float)
byyold=numpy.zeros((Nx,Ny,Nz), dtype=float)
byzold=numpy.zeros((Nx,Ny,Nz), dtype=float)
bzxold=numpy.zeros((Nx,Ny,Nz), dtype=float)
bzyold=numpy.zeros((Nx,Ny,Nz), dtype=float)
bzzold=numpy.zeros((Nx,Ny,Nz), dtype=float)
nonlinu=numpy.zeros((Nx,Ny,Nz), dtype=float)
nonlinv=numpy.zeros((Nx,Ny,Nz), dtype=float)
nonlinw=numpy.zeros((Nx,Ny,Nz), dtype=float)
uhat=numpy.zeros((Nx,Ny,Nz), dtype=complex)
what=numpy.zeros((Nx,Ny,Nz), dtype=complex)
vhat=numpy.zeros((Nx,Ny,Nz), dtype=complex)
bxhat=numpy.zeros((Nx,Ny,Nz), dtype=complex)
byhat=numpy.zeros((Nx,Ny,Nz), dtype=complex)
bzhat=numpy.zeros((Nx,Ny,Nz), dtype=complex)
phat=numpy.zeros((Nx,Ny,Nz), dtype=complex)
temphat=numpy.zeros((Nx,Ny,Nz), dtype=complex)
rhsuhatfix=numpy.zeros((Nx,Ny,Nz), dtype=complex)
rhsvhatfix=numpy.zeros((Nx,Ny,Nz), dtype=complex)
rhswhatfix=numpy.zeros((Nx,Ny,Nz), dtype=complex)
rhsbxhatfix=numpy.zeros((Nx,Ny,Nz), dtype=complex)
rhsbyhatfix=numpy.zeros((Nx,Ny,Nz), dtype=complex)
rhsbzhatfix=numpy.zeros((Nx,Ny,Nz), dtype=complex)
nonlinuhat=numpy.zeros((Nx,Ny,Nz), dtype=complex)
nonlinvhat=numpy.zeros((Nx,Ny,Nz), dtype=complex)
nonlinwhat=numpy.zeros((Nx,Ny,Nz), dtype=complex)
tdata=numpy.zeros((Nt), dtype=float)
# initial conditions for Taylor-Green Vortex
theta=0.0
u=(2.0/(3.0**0.5))*numpy.sin(theta+2.0*math.pi/3.0)*numpy.sin(xx)*numpy.cos(yy)*numpy.cos(zz)
v=(2.0/(3.0**0.5))*numpy.sin(theta-2.0*math.pi/3.0)*numpy.cos(xx)*numpy.sin(yy)*numpy.cos(zz)
w=(2.0/(3.0**0.5))*numpy.sin(theta)*numpy.cos(xx)*numpy.cos(yy)*numpy.sin(zz)
# Exact solution
#sl=1
#sk=1
#sm=1
#lamlkm=(sl**2.0+sk**2.0+sm**2.0)**0.5
#u=-0.5*(lamlkm*sl*numpy.cos(sk*xx)*numpy.sin(sl*yy)*numpy.sin(sm*zz) \
#+sm*sk*numpy.sin(sk*xx)*numpy.cos(sl*yy)*numpy.cos(sm*zz))*numpy.exp(-t*(lamlkm**2.0)/Rey)
#v= 0.5*(lamlkm*sk*numpy.sin(sk*xx)*numpy.cos(sl*yy)*numpy.sin(sm*zz) \
#-sm*sl*numpy.cos(sk*xx)*numpy.sin(sl*yy)*numpy.cos(sm*zz))*numpy.exp(-t*(lamlkm**2.0)/Rey)
#w= numpy.cos(sk*xx)*numpy.cos(sl*yy)*numpy.sin(sm*zz)*numpy.exp(-t*(lamlkm**2.0)/Rey)
# initial fluid field terms
uhat=numpy.fft.fftn(u)
vhat=numpy.fft.fftn(v)
what=numpy.fft.fftn(w)
temphat=kxm*uhat
ux=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*uhat
uy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*uhat
uz=numpy.real(numpy.fft.ifftn(temphat))
temphat=kxm*vhat
vx=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*vhat
vy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*vhat
vz=numpy.real(numpy.fft.ifftn(temphat))
temphat=kxm*what
wx=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*what
wy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*what
wz=numpy.real(numpy.fft.ifftn(temphat))
# Calculate fluid vorticity for plotting
omegax=wy-vz
omegay=uz-wx
omegaz=vx-uy
omegatot=omegax**2.0 + omegay**2.0 + omegaz**2.0
# initial magnetic field terms
bxhat=numpy.fft.fftn(bx)
byhat=numpy.fft.fftn(by)
bzhat=numpy.fft.fftn(bz)
temphat=kxm*bxhat
bxx=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*bxhat
bxy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*bxhat
bxz=numpy.real(numpy.fft.ifftn(temphat))
temphat=kxm*byhat
byx=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*byhat
byy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*byhat
byz=numpy.real(numpy.fft.ifftn(temphat))
temphat=kxm*bzhat
bzx=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*bzhat
bzy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*bzhat
bzz=numpy.real(numpy.fft.ifftn(temphat))
# Calculate magnetic vorticity for plotting
omegabx=bzy-byz
omegaby=bxz-bzx
omegabz=byx-bxy
omegabtot=omegabx**2.0 + omegaby**2.0 + omegabz**2.0
#src=mlab.contour3d(xx,yy,zz,u,colormap='jet',opacity=0.1,contours=4)
src = mlab.pipeline.scalar_field(xx,yy,zz,omegatot,colormap='YlGnBu')
mlab.pipeline.iso_surface(src, contours=[omegatot.min()+0.1*omegatot.ptp(), ], \
colormap='YlGnBu',opacity=0.85)
mlab.pipeline.iso_surface(src, contours=[omegatot.max()-0.1*omegatot.ptp(), ], \
colormap='YlGnBu',opacity=1.0)
#src = mlab.pipeline.scalar_field(xx,yy,zz,omegabtot,colormap='YlGnBu')
#mlab.pipeline.iso_surface(src, contours=[omegabtot.min()+0.1*omegatot.ptp(), ], \
# colormap='YlGnBu',opacity=0.85)
#mlab.pipeline.iso_surface(src, contours=[omegabtot.max()-0.1*omegatot.ptp(), ], \
# colormap='YlGnBu',opacity=1.0)
mlab.pipeline.image_plane_widget(src,plane_orientation='z_axes', \
slice_index=Nz/2,colormap='YlGnBu', \
opacity=0.01)
mlab.pipeline.image_plane_widget(src,plane_orientation='y_axes', \
slice_index=Ny/2,colormap='YlGnBu', \
opacity=0.01)
mlab.pipeline.image_plane_widget(src,plane_orientation='x_axes', \
slice_index=Nx/2,colormap='YlGnBu', \
opacity=0.01)
mlab.scalarbar()
mlab.xlabel('x',object=src)
mlab.ylabel('y',object=src)
mlab.zlabel('z',object=src)
t=0.0
tdata[0]=t
#solve pde and plot results
for n in xrange(Nt):
uold=u
uxold=ux
uyold=uy
uzold=uz
vold=v
vxold=vx
vyold=vy
vzold=vz
wold=w
wxold=wx
wyold=wy
wzold=wz
bxold=bx
bxxold=bxx
bxyold=bxy
bxzold=bxz
byold=by
byxold=byx
byyold=byy
byzold=byz
bzold=bz
bzxold=bzx
bzyold=bzy
bzzold=bzz
rhsuhatfix=(1.0/dt + (0.5/Re)*(k2xm+k2ym+k2zm))*uhat
rhsvhatfix=(1.0/dt + (0.5/Re)*(k2xm+k2ym+k2zm))*vhat
rhswhatfix=(1.0/dt + (0.5/Re)*(k2xm+k2ym+k2zm))*what
rhsbxhatfix=(1.0/dt + (0.5/Rem)*(k2xm+k2ym+k2zm))*bxhat
rhsbyhatfix=(1.0/dt + (0.5/Rem)*(k2xm+k2ym+k2zm))*byhat
rhsbzhatfix=(1.0/dt + (0.5/Rem)*(k2xm+k2ym+k2zm))*bzhat
chg=1.0
t=t+dt
while(chg>tol):
# Fluid field
nonlinu=0.25*((u+uold)*(ux+uxold)+(v+vold)*(uy+uyold)+(w+wold)*(uz+uzold)-\
(bx+bxold)*(bxx+bxxold)-(by+byold)*(bxy+bxyold)-(bz+bzold)*(bxz+bxzold))
nonlinv=0.25*((u+uold)*(vx+vxold)+(v+vold)*(vy+vyold)+(w+wold)*(vz+vzold)-\
(bx+bxold)*(byx+byxold)-(by+byold)*(byy+byyold)-(bz+bzold)*(byz+byzold))
nonlinw=0.25*((u+uold)*(wx+wxold)+(v+vold)*(wy+wyold)+(w+wold)*(wz+wzold)-\
(bx+bxold)*(bzx+bzxold)-(by+byold)*(bzy+bzyold)-(bz+bzold)*(bzz+bzzold))
nonlinuhat=numpy.fft.fftn(nonlinu)
nonlinvhat=numpy.fft.fftn(nonlinv)
nonlinwhat=numpy.fft.fftn(nonlinw)
phat=-1.0*(kxm*nonlinuhat+kym*nonlinvhat+kzm*nonlinwhat)/(k2xm+k2ym+k2zm+0.1**13)
uhat=(rhsuhatfix-nonlinuhat-kxm*phat)/(1.0/dt - (0.5/Re)*(k2xm+k2ym+k2zm))
vhat=(rhsvhatfix-nonlinvhat-kym*phat)/(1.0/dt - (0.5/Re)*(k2xm+k2ym+k2zm))
what=(rhswhatfix-nonlinwhat-kzm*phat)/(1.0/dt - (0.5/Re)*(k2xm+k2ym+k2zm))
temphat=kxm*uhat
ux=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*uhat
uy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*uhat
uz=numpy.real(numpy.fft.ifftn(temphat))
temphat=kxm*vhat
vx=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*vhat
vy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*vhat
vz=numpy.real(numpy.fft.ifftn(temphat))
temphat=kxm*what
wx=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*what
wy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*what
wz=numpy.real(numpy.fft.ifftn(temphat))
utemp=u
vtemp=v
wtemp=w
u=numpy.real(numpy.fft.ifftn(uhat))
v=numpy.real(numpy.fft.ifftn(vhat))
w=numpy.real(numpy.fft.ifftn(what))
# Magnetic field
nonlinu=0.25*((u+uold)*(bxx+bxxold)+(v+vold)*(bxy+bxyold)+(w+wold)*(bxz+bxzold)-\
(bx+bxold)*(ux+uxold)-(by+byold)*(uy+uyold)-(bz+bzold)*(uz+uzold))
nonlinv=0.25*((u+uold)*(byx+byxold)+(v+vold)*(byy+byyold)+(w+wold)*(byz+byzold)-\
(bx+bxold)*(vx+vxold)-(by+byold)*(vy+vyold)-(bz+bzold)*(vz+vzold))
nonlinw=0.25*((u+uold)*(bzx+bzxold)+(v+vold)*(bzy+bzyold)+(w+wold)*(bzz+bzzold)-\
(bx+bxold)*(wx+wxold)-(by+byold)*(wy+wyold)-(bz+bzold)*(wz+wzold))
nonlinuhat=numpy.fft.fftn(nonlinu)
nonlinvhat=numpy.fft.fftn(nonlinv)
nonlinwhat=numpy.fft.fftn(nonlinw)
phat=-1.0*(kxm*nonlinuhat+kym*nonlinvhat+kzm*nonlinwhat)/(k2xm+k2ym+k2zm+0.1**13)
bxhat=(rhsbxhatfix-nonlinuhat-kxm*phat)/(1.0/dt - (0.5/Rem)*(k2xm+k2ym+k2zm))
byhat=(rhsbyhatfix-nonlinvhat-kym*phat)/(1.0/dt - (0.5/Rem)*(k2xm+k2ym+k2zm))
bzhat=(rhsbzhatfix-nonlinwhat-kzm*phat)/(1.0/dt - (0.5/Rem)*(k2xm+k2ym+k2zm))
temphat=kxm*bxhat
bxx=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*bxhat
bxy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*bxhat
bxz=numpy.real(numpy.fft.ifftn(temphat))
temphat=kxm*byhat
byx=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*byhat
byy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*byhat
byz=numpy.real(numpy.fft.ifftn(temphat))
temphat=kxm*bzhat
bzx=numpy.real(numpy.fft.ifftn(temphat))
temphat=kym*bzhat
bzy=numpy.real(numpy.fft.ifftn(temphat))
temphat=kzm*bzhat
bzz=numpy.real(numpy.fft.ifftn(temphat))
bxtemp=bx
bytemp=by
bztemp=bz
bx=numpy.real(numpy.fft.ifftn(bxhat))
by=numpy.real(numpy.fft.ifftn(byhat))
bz=numpy.real(numpy.fft.ifftn(bzhat))
chg=numpy.max(abs(u-utemp))+numpy.max(abs(v-vtemp))+numpy.max(abs(w-wtemp))+\
numpy.max(abs(bx-bxtemp))+numpy.max(abs(by-bytemp))+numpy.max(abs(bz-bztemp))
# calculate vorticity for plotting
omegax=wy-vz
omegay=uz-wx
omegaz=vx-uy
omegatot=omegax**2.0 + omegay**2.0 + omegaz**2.0
src.mlab_source.scalars = omegatot
tdata[n]=t
omegabx=bzy-byz
omegaby=bxz-bzx
omegabz=byx-bxy
omegabtot=omegabx**2.0 + omegaby**2.0 + omegabz**2.0
#src.mlab_source.scalars = omegatot
|
|
import sys
import numpy as np
from AnyQt.QtWidgets import QLayout
from Orange.base import SklLearner
from Orange.classification import OneClassSVMLearner, EllipticEnvelopeLearner
from Orange.data import Table, Domain, ContinuousVariable
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
from Orange.widgets.widget import Msg
from Orange.widgets.utils.sql import check_sql_input
class OWOutliers(widget.OWWidget):
name = "Outliers"
description = "Detect outliers."
icon = "icons/Outliers.svg"
priority = 3000
category = "Data"
keywords = ["data", "outlier", "inlier"]
inputs = [("Data", Table, "set_data")]
outputs = [("Inliers", Table), ("Outliers", Table)]
want_main_area = False
OneClassSVM, Covariance = range(2)
outlier_method = Setting(OneClassSVM)
nu = Setting(50)
gamma = Setting(0.01)
cont = Setting(10)
empirical_covariance = Setting(False)
support_fraction = Setting(1)
data_info_default = 'No data on input.'
in_out_info_default = ' '
class Error(widget.OWWidget.Error):
singular_cov = Msg("Singular covariance matrix.")
multiclass_error = Msg("Multiple class data is not supported")
def __init__(self):
super().__init__()
self.data = None
self.n_inliers = self.n_outliers = None
box = gui.vBox(self.controlArea, "Information")
self.data_info_label = gui.widgetLabel(box, self.data_info_default)
self.in_out_info_label = gui.widgetLabel(box,
self.in_out_info_default)
box = gui.vBox(self.controlArea, "Outlier Detection Method")
detection = gui.radioButtons(box, self, "outlier_method")
gui.appendRadioButton(detection,
"One class SVM with non-linear kernel (RBF)")
ibox = gui.indentedBox(detection)
tooltip = "An upper bound on the fraction of training errors and a " \
"lower bound of the fraction of support vectors"
gui.widgetLabel(ibox, 'Nu:', tooltip=tooltip)
self.nu_slider = gui.hSlider(
ibox, self, "nu", minValue=1, maxValue=100, ticks=10,
labelFormat="%d %%", callback=self.nu_changed, tooltip=tooltip)
self.gamma_spin = gui.spin(
ibox, self, "gamma", label="Kernel coefficient:", step=1e-2,
spinType=float, minv=0.01, maxv=10, callback=self.gamma_changed)
gui.separator(detection, 12)
self.rb_cov = gui.appendRadioButton(detection, "Covariance estimator")
ibox = gui.indentedBox(detection)
self.l_cov = gui.widgetLabel(ibox, 'Contamination:')
self.cont_slider = gui.hSlider(
ibox, self, "cont", minValue=0, maxValue=100, ticks=10,
labelFormat="%d %%", callback=self.cont_changed)
ebox = gui.hBox(ibox)
self.cb_emp_cov = gui.checkBox(
ebox, self, "empirical_covariance",
"Support fraction:", callback=self.empirical_changed)
self.support_fraction_spin = gui.spin(
ebox, self, "support_fraction", step=1e-1, spinType=float,
minv=0.1, maxv=10, callback=self.support_fraction_changed)
gui.separator(detection, 12)
gui.button(self.buttonsArea, self, "Detect Outliers",
callback=self.commit)
self.layout().setSizeConstraint(QLayout.SetFixedSize)
def nu_changed(self):
self.outlier_method = self.OneClassSVM
def gamma_changed(self):
self.outlier_method = self.OneClassSVM
def cont_changed(self):
self.outlier_method = self.Covariance
def support_fraction_changed(self):
self.outlier_method = self.Covariance
def empirical_changed(self):
self.outlier_method = self.Covariance
def disable_covariance(self):
self.outlier_method = self.OneClassSVM
self.rb_cov.setDisabled(True)
self.l_cov.setDisabled(True)
self.cont_slider.setDisabled(True)
self.cb_emp_cov.setDisabled(True)
self.support_fraction_spin.setDisabled(True)
self.warning('Too many features for covariance estimation.')
def enable_covariance(self):
self.rb_cov.setDisabled(False)
self.l_cov.setDisabled(False)
self.cont_slider.setDisabled(False)
self.cb_emp_cov.setDisabled(False)
self.support_fraction_spin.setDisabled(False)
self.warning()
@check_sql_input
def set_data(self, dataset):
self.data = dataset
if self.data is None:
self.data_info_label.setText(self.data_info_default)
self.in_out_info_label.setText(self.in_out_info_default)
else:
self.data_info_label.setText('%d instances' % len(self.data))
self.in_out_info_label.setText(' ')
self.enable_covariance()
if self.data and len(self.data.domain.attributes) > 1500:
self.disable_covariance()
self.commit()
def commit(self):
self.clear_messages()
inliers = outliers = None
self.n_inliers = self.n_outliers = None
if self.data is not None and len(self.data) > 0:
if self.data.Y.ndim > 1:
self.Error.multiclass_error()
else:
try:
y_pred = self.detect_outliers()
except ValueError:
self.Error.singular_cov()
self.in_out_info_label.setText(self.in_out_info_default)
else:
inliers_ind = np.where(y_pred == 1)[0]
outliers_ind = np.where(y_pred == -1)[0]
inliers = Table(self.new_domain, self.new_data, inliers_ind)
outliers = Table(self.new_domain,
self.new_data, outliers_ind)
self.in_out_info_label.setText(
"{} inliers, {} outliers".format(len(inliers),
len(outliers)))
self.n_inliers = len(inliers)
self.n_outliers = len(outliers)
self.send("Inliers", inliers)
self.send("Outliers", outliers)
def detect_outliers(self):
if self.outlier_method == self.OneClassSVM:
learner = OneClassSVMLearner(
gamma=self.gamma, nu=self.nu / 100,
preprocessors=SklLearner.preprocessors)
else:
learner = EllipticEnvelopeLearner(
support_fraction=self.support_fraction
if self.empirical_covariance else None,
contamination=self.cont / 100.)
model = learner(self.data)
y_pred = model(self.data)
self.add_metas(model)
return np.array(y_pred)
def add_metas(self, model):
if self.outlier_method == self.Covariance:
mahal = model.mahalanobis(self.data.X)
mahal = mahal.reshape(len(self.data), 1)
attrs = self.data.domain.attributes
classes = self.data.domain.class_vars
new_metas = list(self.data.domain.metas) + \
[ContinuousVariable(name="Mahalanobis")]
self.new_domain = Domain(attrs, classes, new_metas)
self.new_data = Table(self.new_domain, self.data)
self.new_data.metas = np.hstack((self.data.metas, mahal))
else:
self.new_domain = self.data.domain
self.new_data = self.data
def send_report(self):
if self.n_outliers is None or self.n_inliers is None:
return
self.report_items("Data",
(("Input instances", len(self.data)),
("Inliers", self.n_inliers),
("Outliers", self.n_outliers)))
if self.outlier_method == 0:
self.report_items(
"Detection",
(("Detection method",
"One class SVM with non-linear kernel (RBF)"),
("Regularization (nu)", self.nu),
("Kernel coefficient", self.gamma)))
else:
self.report_items(
"Detection",
(("Detection method", "Covariance estimator"),
("Contamination", self.cont),
("Support fraction", self.support_fraction)))
def test_main():
from AnyQt.QtWidgets import QApplication
app = QApplication([])
data = Table("iris")
w = OWOutliers()
w.set_data(data)
w.commit()
w.show()
return app.exec_()
if __name__ == "__main__":
sys.exit(test_main())
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for gclient.py.
See gclient_smoketest.py for integration tests.
"""
import Queue
import copy
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gclient
import gclient_utils
from testing_support import trial_dir
def write(filename, content):
"""Writes the content of a file and create the directories as needed."""
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(content)
class SCMMock(object):
def __init__(self, unit_test, url):
self.unit_test = unit_test
self.url = url
def RunCommand(self, command, options, args, file_list):
self.unit_test.assertEquals('None', command)
self.unit_test.processed.put(self.url)
def FullUrlForRelativeUrl(self, url):
return self.url + url
# pylint: disable=R0201
def DoesRemoteURLMatch(self, _):
return True
def GetActualRemoteURL(self, _):
return self.url
class GclientTest(trial_dir.TestCase):
def setUp(self):
super(GclientTest, self).setUp()
self.processed = Queue.Queue()
self.previous_dir = os.getcwd()
os.chdir(self.root_dir)
# Manual mocks.
self._old_createscm = gclient.gclient_scm.CreateSCM
gclient.gclient_scm.CreateSCM = self._createscm
self._old_sys_stdout = sys.stdout
sys.stdout = gclient.gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient.gclient_utils.MakeFileAnnotated(sys.stdout)
def tearDown(self):
self.assertEquals([], self._get_processed())
gclient.gclient_scm.CreateSCM = self._old_createscm
sys.stdout = self._old_sys_stdout
os.chdir(self.previous_dir)
super(GclientTest, self).tearDown()
def _createscm(self, parsed_url, root_dir, name, out_fh=None, out_cb=None):
self.assertTrue(parsed_url.startswith('svn://example.com/'), parsed_url)
self.assertTrue(root_dir.startswith(self.root_dir), root_dir)
return SCMMock(self, parsed_url)
def testDependencies(self):
self._dependencies('1')
def testDependenciesJobs(self):
self._dependencies('1000')
def _dependencies(self, jobs):
"""Verifies that dependencies are processed in the right order.
e.g. if there is a dependency 'src' and another 'src/third_party/bar', that
bar isn't fetched until 'src' is done.
Also test that a From() dependency should not be processed when it is listed
as a requirement.
Args:
|jobs| is the number of parallel jobs simulated.
"""
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', jobs])
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "bar", "url": "svn://example.com/bar" },\n'
' { "name": "bar/empty", "url": "svn://example.com/bar_empty" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",\n'
# This one will depend on dir1/dir2 in bar.
' "foo/dir1/dir2/dir3": "/dir1/dir2/dir3",\n'
' "foo/dir1/dir2/dir3/dir4": "/dir1/dir2/dir3/dir4",\n'
' "foo/dir1/dir2/dir5/dir6":\n'
' From("foo/dir1/dir2/dir3/dir4", "foo/dir1/dir2"),\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
# There is two foo/dir1/dir2. This one is fetched as bar/dir1/dir2.
' "foo/dir1/dir2": "/dir1/dir2",\n'
'}')
write(
os.path.join('bar/empty', 'DEPS'),
'deps = {\n'
'}')
# Test From()
write(
os.path.join('foo/dir1/dir2/dir3/dir4', 'DEPS'),
'deps = {\n'
# This one should not be fetched or set as a requirement.
' "foo/dir1/dir2/dir5": "svn://example.com/x",\n'
# This foo/dir1/dir2 points to a different url than the one in bar.
' "foo/dir1/dir2": "/dir1/another",\n'
'}')
obj = gclient.GClient.LoadCurrentConfig(options)
self._check_requirements(obj.dependencies[0], {})
self._check_requirements(obj.dependencies[1], {})
obj.RunOnDeps('None', args)
actual = self._get_processed()
first_3 = [
'svn://example.com/bar',
'svn://example.com/bar_empty',
'svn://example.com/foo',
]
if jobs != 1:
# We don't care of the ordering of these items except that bar must be
# before bar/empty.
self.assertTrue(
actual.index('svn://example.com/bar') <
actual.index('svn://example.com/bar_empty'))
self.assertEquals(first_3, sorted(actual[0:3]))
else:
self.assertEquals(first_3, actual[0:3])
self.assertEquals(
[
'svn://example.com/foo/dir1',
'svn://example.com/bar/dir1/dir2',
'svn://example.com/foo/dir1/dir2/dir3',
'svn://example.com/foo/dir1/dir2/dir3/dir4',
'svn://example.com/foo/dir1/dir2/dir3/dir4/dir1/another',
],
actual[3:])
self.assertEquals(3, len(obj.dependencies))
self.assertEquals('foo', obj.dependencies[0].name)
self.assertEquals('bar', obj.dependencies[1].name)
self.assertEquals('bar/empty', obj.dependencies[2].name)
self._check_requirements(
obj.dependencies[0],
{
'foo/dir1': ['bar', 'bar/empty', 'foo'],
'foo/dir1/dir2/dir3':
['bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2'],
'foo/dir1/dir2/dir3/dir4':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3'],
'foo/dir1/dir2/dir5/dir6':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3/dir4'],
})
self._check_requirements(
obj.dependencies[1],
{
'foo/dir1/dir2': ['bar', 'bar/empty', 'foo', 'foo/dir1'],
})
self._check_requirements(
obj.dependencies[2],
{})
self._check_requirements(
obj,
{
'foo': [],
'bar': [],
'bar/empty': ['bar'],
})
def _check_requirements(self, solution, expected):
for dependency in solution.dependencies:
e = expected.pop(dependency.name)
a = sorted(dependency.requirements)
self.assertEquals(e, a, (dependency.name, e, a))
self.assertEquals({}, expected)
def _get_processed(self):
"""Retrieves the item in the order they were processed."""
items = []
try:
while True:
items.append(self.processed.get_nowait())
except Queue.Empty:
pass
return items
def testAutofix(self):
# Invalid urls causes pain when specifying requirements. Make sure it's
# auto-fixed.
d = gclient.Dependency(
None, 'name', 'proto://host/path/@revision', None, None, None, None,
None, '', True)
self.assertEquals('proto://host/path@revision', d.url)
def testStr(self):
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'url', None, None, None, None, None, 'DEPS', True),
gclient.Dependency(
obj, 'bar', 'url', None, None, None, None, None, 'DEPS', True),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.Dependency(
obj.dependencies[0], 'foo/dir1', 'url', None, None, None, None,
None, 'DEPS', True),
gclient.Dependency(
obj.dependencies[0], 'foo/dir2',
gclient.GClientKeywords.FromImpl('bar'), None, None, None, None,
None, 'DEPS', True),
gclient.Dependency(
obj.dependencies[0], 'foo/dir3',
gclient.GClientKeywords.FileImpl('url'), None, None, None, None,
None, 'DEPS', True),
],
[])
# Make sure __str__() works fine.
# pylint: disable=W0212
obj.dependencies[0]._file_list.append('foo')
str_obj = str(obj)
self.assertEquals(471, len(str_obj), '%d\n%s' % (len(str_obj), str_obj))
def testHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
print >> fh, 'solutions = [{"name":"top","url":"svn://example.com/top"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
print >> fh, 'hooks = %s' % repr(hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options)
self.assertEqual(client.GetHooks(options), [x['action'] for x in hooks])
def testCustomHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
extra_hooks = [{'name': 'append', 'pattern':'.', 'action':['supercmd']}]
print >> fh, ('solutions = [{"name":"top","url":"svn://example.com/top",'
'"custom_hooks": %s},' ) % repr(extra_hooks + [{'name': 'skip'}])
print >> fh, '{"name":"bottom","url":"svn://example.com/bottom"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
hooks.append({'pattern':'.', 'action':['cmd2', 'arg1', 'arg2']})
skip_hooks = [
{'name': 'skip', 'pattern':'.', 'action':['cmd3', 'arg1', 'arg2']}]
skip_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['cmd4', 'arg1', 'arg2']})
print >> fh, 'hooks = %s' % repr(hooks + skip_hooks)
fh.close()
# Make sure the custom hooks for that project don't affect the next one.
subdir_fn = os.path.join(topdir, 'bottom')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
sub_hooks = [{'pattern':'.', 'action':['response1', 'yes1', 'yes2']}]
sub_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['response2', 'yes', 'sir']})
print >> fh, 'hooks = %s' % repr(sub_hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options)
self.assertEqual(client.GetHooks(options),
[x['action'] for x in hooks + extra_hooks + sub_hooks])
def testTargetOS(self):
"""Verifies that specifying a target_os pulls in all relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. The
value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz', 'unix'], sorted(obj.enforced_os))
def testTargetOsWithTargetOsOnly(self):
"""Verifies that specifying a target_os and target_os_only pulls in only
the relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. With
target_os_only also set, the _enforced_os tuple will be set to only the
target_os value.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz'], sorted(obj.enforced_os))
def testTargetOsOnlyWithoutTargetOs(self):
"""Verifies that specifying a target_os_only without target_os_only raises
an exception.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
exception_raised = False
try:
gclient.GClient.LoadCurrentConfig(options)
except gclient_utils.Error:
exception_raised = True
self.assertTrue(exception_raised)
def testTargetOsInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file pulls in all
relevant dependencies.
The target_os variable in a DEPS file allows specifying the name of an
additional OS which should be considered when selecting dependencies from a
DEPS' deps_os. The value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },\n'
' { "name": "bar",\n'
' "url": "svn://example.com/bar",\n'
' }]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix", },\n'
' "baz": { "foo/baz": "/baz", },\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps_os = {\n'
' "unix": { "bar/unix": "/unix", },\n'
' "baz": { "bar/baz": "/baz", },\n'
' "jaz": { "bar/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
'svn://example.com/bar',
'svn://example.com/bar/unix',
'svn://example.com/foo',
'svn://example.com/foo/baz',
'svn://example.com/foo/unix',
],
sorted(self._get_processed()))
def testUpdateWithOsDeps(self):
"""Verifies that complicated deps_os constructs result in the
correct data also with multple operating systems. Also see
testDepsOsOverrideDepsInDepsFile."""
test_data = [
# Tuples of deps, deps_os, os_list and expected_deps.
(
# OS doesn't need module.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os1'],
{'foo': None}
),
(
# OS wants a different version of module.
{'foo': 'default_foo'},
{'os1': { 'foo': 'os1_foo'} },
['os1'],
{'foo': 'os1_foo'}
),
(
# OS with no overrides at all.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os2'],
{'foo': 'default_foo'}
),
(
# One OS doesn't need module, one OS wants the default.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': {}},
['os1', 'os2'],
{'foo': 'default_foo'}
),
(
# One OS doesn't need module, another OS wants a special version.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': { 'foo': 'os2_foo'}},
['os1', 'os2'],
{'foo': 'os2_foo'}
),
(
# One OS wants to add a module.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1'],
{'foo': 'default_foo',
'bar': 'os1_bar'}
),
(
# One OS wants to add a module. One doesn't care.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': 'os1_bar'}
),
(
# Two OSes want to add a module with the same definition.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os12_bar' },
'os2': { 'bar': 'os12_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': 'os12_bar'}
),
]
for deps, deps_os, target_os_list, expected_deps in test_data:
orig_deps = copy.deepcopy(deps)
result = gclient.Dependency.MergeWithOsDeps(deps, deps_os, target_os_list)
self.assertEqual(result, expected_deps)
self.assertEqual(deps, orig_deps)
def testLateOverride(self):
"""Verifies expected behavior of LateOverride."""
url = "[email protected]:dart-lang/spark.git"
d = gclient.Dependency(None, 'name', 'url',
None, None, None, None, None, '', True)
late_url = d.LateOverride(url)
self.assertEquals(url, late_url)
def testDepsOsOverrideDepsInDepsFile(self):
"""Verifies that a 'deps_os' path can override a 'deps' path. Also
see testUpdateWithOsDeps above.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps = {\n'
' "foo/src": "/src",\n' # This path is to be overridden by similar path
# in deps_os['unix'].
'}\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix",'
' "foo/src": "/src_unix"},\n'
' "baz": { "foo/baz": "/baz",\n'
' "foo/src": None},\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/foo/baz',
'svn://example.com/foo/src_unix',
'svn://example.com/foo/unix',
],
sorted(self._get_processed()))
def testRecursionOverride(self):
"""Verifies gclient respects the |recursion| var syntax.
We check several things here:
- |recursion| = 3 sets recursion on the foo dep to exactly 3
(we pull /fizz, but not /fuzz)
- pulling foo/bar at recursion level 1 (in .gclient) is overriden by
a later pull of foo/bar at recursion level 2 (in the dep tree)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/bar',
'svn://example.com/foo/bar',
'svn://example.com/foo/bar/baz',
'svn://example.com/foo/bar/baz/fizz',
],
self._get_processed())
def testRecursedepsOverride(self):
"""Verifies gclient respects the |recursedeps| var syntax.
This is what we mean to check here:
- |recursedeps| = [...] on 2 levels means we pull exactly 3 deps
(up to /fizz, but not /fuzz)
- pulling foo/bar with no recursion (in .gclient) is overriden by
a later pull of foo/bar with recursion (in the dep tree)
- pulling foo/tar with no recursion (in .gclient) is no recursively
pulled (taz is left out)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
' { "name": "foo/tar", "url": "svn://example.com/tar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}\n'
'recursedeps = ["baz"]')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
write(
os.path.join('tar', 'DEPS'),
'deps = {\n'
' "taz": "/taz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/bar',
'svn://example.com/tar',
'svn://example.com/foo/bar',
'svn://example.com/foo/bar/baz',
'svn://example.com/foo/bar/baz/fizz',
],
self._get_processed())
def testRecursedepsOverrideWithRelativePaths(self):
"""Verifies gclient respects |recursedeps| with relative paths."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
# use_relative_paths means the following dep evaluates with 'foo'
# prepended.
'svn://example.com/foo/bar',
],
self._get_processed())
def testRecursionOverridesRecursedeps(self):
"""Verifies gclient respects |recursion| over |recursedeps|.
|recursion| is set in a top-level DEPS file. That value is meant
to affect how many subdeps are parsed via recursion.
|recursedeps| is set in each DEPS file to control whether or not
to recurse into the immediate next subdep.
This test verifies that if both syntaxes are mixed in a DEPS file,
we disable |recursedeps| support and only obey |recursion|.
Since this setting is evaluated per DEPS file, recursed DEPS
files will each be re-evaluated according to the per DEPS rules.
So a DEPS that only contains |recursedeps| could then override any
previous |recursion| setting. There is extra processing to ensure
this does not happen.
For this test to work correctly, we need to use a DEPS chain that
only contains recursion controls in the top DEPS file.
In foo, |recursion| and |recursedeps| are specified. When we see
|recursion|, we stop trying to use |recursedeps|.
There are 2 constructions of DEPS here that are key to this test:
(1) In foo, if we used |recursedeps| instead of |recursion|, we
would also pull in bar. Since bar's DEPS doesn't contain any
recursion statements, we would stop processing at bar.
(2) In fizz, if we used |recursedeps| at all, we should pull in
fuzz.
We expect to keep going past bar (satisfying 1) and we don't
expect to pull in fuzz (satisfying 2).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}\n'
'recursedeps = ["fuzz"]')
write(
os.path.join('fuzz', 'DEPS'),
'deps = {\n'
' "tar": "/tar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/bar',
'svn://example.com/foo/bar',
# Deps after this would have been skipped if we were obeying
# |recursedeps|.
'svn://example.com/foo/bar/baz',
'svn://example.com/foo/bar/baz/fizz',
# And this dep would have been picked up if we were obeying
# |recursedeps|.
# 'svn://example.com/foo/bar/baz/fuzz',
],
self._get_processed())
def testGitDeps(self):
"""Verifies gclient respects a .DEPS.git deps file.
Along the way, we also test that if both DEPS and .DEPS.git are present,
that gclient does not read the DEPS file. This will reliably catch bugs
where gclient is always hitting the wrong file (DEPS).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/foo/bar',
],
self._get_processed())
def testGitDepsFallback(self):
"""Verifies gclient respects fallback to DEPS upon missing deps file."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
'svn://example.com/foo',
'svn://example.com/foo/bar',
],
self._get_processed())
if __name__ == '__main__':
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout, include_zero=True)
sys.stderr = gclient_utils.MakeFileAutoFlush(sys.stderr)
sys.stderr = gclient_utils.MakeFileAnnotated(sys.stderr, include_zero=True)
logging.basicConfig(
level=[logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG][
min(sys.argv.count('-v'), 3)],
format='%(relativeCreated)4d %(levelname)5s %(module)13s('
'%(lineno)d) %(message)s')
unittest.main()
|
|
"""
#;+
#; NAME:
#; fN.data
#; Version 2.0
#;
#; PURPOSE:
#; Module for fN data constraints
#; 12-Mar-2015 by JXP edited by Alix Feinsod
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, imp
from xastropy.xutils import xdebug as xdb
from xastropy.spec import abs_line, voigt
from xastropy.igm import tau_eff
#from xastropy.igm.fN.model import fN_Model
from astropy.io import fits
# Path for xastropy
xa_path = imp.find_module('xastropy')[1]
#class fN_Constraint(object):
class fN_Constraint(object):
"""A Class for fN constraints
Attributes:
fN_dtype: string
Constraint type for the fN
'fN' -- Standard f(N) evaluation
'MFP' -- MFP
'LLS' -- LLS incidence
'teff' -- tau effective
'beta' -- slope constraint
flavor: string
Specific type of constraint
comment: string
ref: string
Reference
cosm: string
Cosmology used (e.g. WMAP05)
zeval: float
Redshift where the constraint is evaluated
data: dict
Dictionary containing the constraints
"""
# Initialize with type
def __init__(self, fN_dtype, zeval=0., ref='', flavor=''):
self.fN_dtype = fN_dtype # Should probably check the choice
self.zeval = zeval
self.ref = ref
self.flavor = flavor
# Read from binary FITS table
def from_fits_table(self, row):
# Common items
common = ['REF','COSM','TYPE','COMMENT']
self.ref = row['REF']
self.cosm = row['COSM']
self.flavor = row['TYPE']
self.comment = row['COMMENT']
# zeval
if 'ZEVAL' in row.array.names: self.zeval = row['ZEVAL']
elif 'Z_LLS' in row.array.names: self.zeval = row['Z_LLS']
elif 'Z_MFP' in row.array.names: self.zeval = row['Z_MFP']
elif 'Z_TEFF' in row.array.names: self.zeval = row['Z_TEFF']
else:
raise ValueError('fN.data: No redshift info!')
# zip the rest
self.data = dict(zip(row.array.names,row))
for item in common: self.data.pop(item) # No need to duplicate
# Output
def __repr__(self):
return ('[%s: %s_%s z=%g, ref=%s]' %
(self.__class__.__name__,
self.fN_dtype, self.flavor,
self.zeval, self.ref) )
# ###################### ###############
# ###################### ###############
# Read from ASCII file
def fN_data_from_ascii_file(infile):
#makes new fN constraint with data type fN
fNc = fN_Constraint('fN')
ftype = fNc.fN_dtype.encode('ascii')
fNc.fN_dtype = ftype
fNc.ref=infile.encode('ascii')
# Open file
f = open(infile, 'r')
# Read and ignore header lines
firstline = f.readline()
# get rid of newline /n symbol
firstline =firstline.strip()
#get zeval and DX from first line
values = firstline.split()
fNc.zeval = float(values[0])
ZEVAL = float(values[0])
DX = float(values[1])
#declaration of variables
BINS1 =[]
BINS2 = []
fn = []
SIG_FN1 = []
SIG_FN2 = []
count = 0
numlines=0
# Loop over lines and extract info
for line in f:
line = line.strip()
columns = line.split()
BINS1.append(float(columns[0]))
BINS2.append(float(columns[1]))
fn.append(float(columns[2]))
SIG_FN1.append(float(columns[3]))
SIG_FN2.append(float(columns[3]))
numlines +=1
if (float(columns[0])!=0) or (float(columns[1])!=0) or (float(columns[2])!=0) or (float(columns[3])!=0):
count +=1
f.close()
NPT = int(count)
bins = []
bins.append(BINS1)
bins.append(BINS2)
sig_fn = []
sig_fn.append(SIG_FN1)
sig_fn.append(SIG_FN2)
BINS = np.ndarray(shape=(2, numlines), dtype=float, buffer=np.array(bins))
SIG_FN = np.ndarray(shape=(2, numlines), dtype=float, buffer=np.array(sig_fn))
FN = np.ndarray(shape=(numlines,), dtype=float, buffer=np.array(fn))
#makes array with names in ASCII not unicode
arrayofnames = ['BINS','FN','SIG_FN','DX','NPT','ZEVAL']
names = []
for name in arrayofnames:
newname = name.encode('ascii')
names.append(newname)
values = [BINS,FN,SIG_FN,DX,NPT,ZEVAL]
fNc.data = dict(zip(names, values))
return fNc
def fn_data_from_fits(fits_file):
""" Build up a list of fN constraints from a multi-extension FITS file
Parameters:
fits_file: string
Name of FITS file
Returns:
fN_list: list
List of fN_Constraint objects
JXP 07 Nov 2014
"""
# List of constraints
fN_cs = []
# Read
if isinstance(fits_file,list):
for ifile in fits_file:
tmp_cs = fn_data_from_fits(ifile)
for cs in tmp_cs: fN_cs.append(cs)
else:
hdus = fits.open(fits_file)
if len(hdus) == 1:
raise ValueError('fN.data: Expecting a multi-extension fits file -- %s' % fits_file)
# Loop through hdu
for hdu in hdus[1:]:
data = hdu.data
# Get ftype
if 'FN' in data.dtype.names: ftype = 'fN' # Standard f(N) data
elif 'TAU_LIM' in data.dtype.names: ftype = 'LLS' # LLS survey
elif 'MFP' in data.dtype.names: ftype = 'MFP' # MFP measurement
elif 'TEFF' in data.dtype.names: ftype = 'teff' # tau effective (Lya)
else:
raise ValueError('fN.data: Cannot figure out ftype')
# Loop on the Table
for row in data:
fNc = fN_Constraint(ftype)
fNc.from_fits_table(row)
fN_cs.append(fNc)
# Return
return fN_cs
# Reproduce the main figure from P14 (data only)
def tst_fn_data(fN_model=None, model_two=None, data_list=None, outfil=None):
""" Make a plot like the final figure from P14
Parameters:
noshow: boolean (False)
Show the plot?
JXP 07 Nov 2014
"""
import matplotlib as mpl
mpl.rcParams['font.family'] = 'STIXGeneral-Regular' # Not for PDF
mpl.rcParams['lines.linewidth'] = 2
from matplotlib import pyplot as plt
#from matplotlib.backends.backend_pdf import PdfPages
# Output
#if outfil != None:
# pp = PdfPages(outfil)
#mpl.rcParams['font.family'] = 'stixgeneral' # For PDF
# fN data
#fn_file = os.environ.get('XIDL_DIR')+'IGM/fN_empirical/fn_constraints_z2.5_vanilla.fits'
#k13r13_file = os.environ.get('XIDL_DIR')+'IGM/fN_empirical/fn_constraints_K13R13_vanilla.fits'
#n12_file = os.environ.get('XIDL_DIR')+'IGM/fN_empirical/fn_constraints_N12_vanilla.fits'
fn_file = xa_path+'/igm/fN/fn_constraints_z2.5_vanilla.fits'
k13r13_file = xa_path+'/igm/fN/fn_constraints_K13R13_vanilla.fits'
n12_file = xa_path+'/igm/fN/fn_constraints_N12_vanilla.fits'
all_fN_cs = fn_data_from_fits([fn_file,k13r13_file, n12_file])
#ascii_file = xa_path+'/igm/fN/asciidatan12'
#ascii_data = fN_data_from_ascii_file(ascii_file)
#all_fN_cs.append(ascii_data)
# Remove K12
#data_list = ['K13R13','OPB07', 'N12']
#outfil = 'tmp.png'
if data_list is None:
fN_cs = [fN_c for fN_c in all_fN_cs if ((fN_c.ref != 'K02') & (fN_c.ref != 'PW09'))]
else:
fN_cs = [fN_c for fN_c in all_fN_cs if fN_c.ref in data_list]
fN_dtype = [fc.fN_dtype for fc in fN_cs]
fig = plt.figure(figsize=(8, 5))
fig.clf()
main = fig.add_axes( [0.1, 0.1, 0.8, 0.8] ) # xypos, xy-size
# f(N) data
main.set_ylabel(r'$\log f(N_{\rm HI})$')
main.set_xlabel(r'$\log N_{\rm HI}$')
main.set_ylim(-25., -9)
for fN_c in fN_cs:
if fN_c.fN_dtype == 'fN':
# Length
ip = range(fN_c.data['NPT'])
#xdb.set_trace()
val = np.where(fN_c.data['FN'][ip] > -90)[0]
#xdb.set_trace()
if len(val) > 0:
#xdb.set_trace()
ipv = np.array(ip)[val]
xval = np.median(fN_c.data['BINS'][:,ipv],0)
xerror = [ fN_c.data['BINS'][1,ipv]-xval, xval-fN_c.data['BINS'][0,ipv] ]
yerror = [ fN_c.data['SIG_FN'][1,ipv], fN_c.data['SIG_FN'][0,ipv] ] # Inverted!
main.errorbar(xval, fN_c.data['FN'][ipv], xerr=xerror, yerr=yerror, fmt='o',
label=fN_c.ref,capthick=2)
main.legend(loc='lower left', numpoints=1)
# Model?
#print(fN_model.param)
if fN_model is not None:
xplt = 12.01 + 0.01*np.arange(1100)
yplt = fN_model.eval(xplt, 2.4)
main.plot(xplt,yplt,'-',color='black')
print(xplt[0],yplt[0])
if model_two is not None:
xplt = 12.01 + 0.01*np.arange(1100)
yplt = model_two.eval(xplt, 2.4)
main.plot(xplt,yplt,'-',color='gray')
#xdb.set_trace()
# Extras
#mpl.rcParams['lines.capthick'] = 2
inset = fig.add_axes( [0.55, 0.6, 0.25, 0.25] ) # xypos, xy-size
inset.set_ylabel('Value') # LHS
inset.xaxis.set_major_locator(plt.FixedLocator(range(5)))
#lbl1 = r'$\tau_{\rm eff}^{\rm Ly\alpha}'
inset.xaxis.set_major_formatter(plt.FixedFormatter(['',r'$\tau_{\rm eff}^{\rm Ly\alpha}$',
r'$\ell(X)_{\rm LLS}$',
r'$\lambda_{\rm mfp}^{912}$', '']))
inset.set_ylim(0., 0.6)
## #######
# tau_eff
flg_teff = 1
try:
itau = fN_dtype.index('teff') # Passes back the first one
except:
#raise ValueError('fN.data: Missing teff type')
flg_teff = 0
#xdb.set_trace()
if flg_teff:
teff=float(fN_cs[itau].data['TEFF'])
D_A = 1. - np.exp(-1. * teff)
SIGDA_LIMIT = 0.1 # Allows for systemtics and b-value uncertainty
sig_teff = np.max([fN_cs[itau].data['SIG_TEFF'], (SIGDA_LIMIT*teff)])
# Plot
inset.errorbar(1, teff, sig_teff, fmt='_', capthick=2)
# Model
if fN_model != None:
model_teff = tau_eff.ew_teff_lyman(1215.6701*(1+fN_cs[itau].zeval), fN_cs[itau].zeval+0.1,
fN_model, NHI_MIN=fN_cs[itau].data['NHI_MNX'][0],
NHI_MAX=fN_cs[itau].data['NHI_MNX'][1])
inset.plot(1, model_teff, 'ko')
#xdb.set_trace()
## #######
# LLS constraint
flg_LLS = 1
try:
iLLS = fN_dtype.index('LLS') # Passes back the first one
except:
#raise ValueError('fN.data: Missing LLS type')
flg_LLS = 0
if flg_LLS:
inset.errorbar(2, fN_cs[iLLS].data['LX'], yerr=fN_cs[iLLS].data['SIG_LX'],
fmt='_', capthick=2)
# Model
if fN_model != None:
lX = fN_model.calc_lox(fN_cs[iLLS].zeval,
17.19+np.log10(fN_cs[iLLS].data['TAU_LIM']), 22.)
inset.plot(2, lX, 'ko')
## #######
# MFP constraint
flg_MFP = 1
try:
iMFP = fN_dtype.index('MFP') # Passes back the first one
except:
#raise ValueError('fN.data: Missing MFP type')
flg_MFP = 0
if flg_MFP:
inset2 = inset.twinx()
inset2.errorbar(3, fN_cs[iMFP].data['MFP'], yerr=fN_cs[iMFP].data['SIG_MFP'],
fmt='_', capthick=2)
inset2.set_xlim(0,4)
inset2.set_ylim(0,350)
inset2.set_ylabel('(Mpc)')
# Model
if fN_model != None:
#fN_model.zmnx = (0.1, 20.) # Reset for MFP calculation
mfp = fN_model.mfp(fN_cs[iMFP].zeval)
inset2.plot(3, mfp, 'ko')
# Show
if outfil != None:
plt.savefig(outfil,bbox_inches='tight')
else:
plt.show()
## #################################
## #################################
## TESTING
## #################################
if __name__ == '__main__':
# Read a dataset
fn_file = xa_path+'/igm/fN/fn_constraints_z2.5_vanilla.fits'
k13r13_file = xa_path+'/igm/fN/fn_constraints_K13R13_vanilla.fits'
n12_file = xa_path+'/igm/fN/fn_constraints_N12_vanilla.fits'
all_fN_cs = fn_data_from_fits([fn_file, k13r13_file, n12_file])
#ascii_file = xa_path+'/igm/fN/asciidatan12'
#ascii_data = fN_data_from_ascii_file(ascii_file)
#all_fN_cs.append(ascii_data)
print(all_fN_cs)
for fN_c in all_fN_cs: print(fN_c)
# Plot
tst_fn_data()
xdb.set_trace()
print('fN.data: All done testing..')
|
|
#!/usr/bin/python
# Naomi Hiebert coded this
#import our data structures
from accMacDict import accMac
from unaMacDict import unaMac
from binMacDict import binMac
from jmpMacDict import jmpMac
from asmDicts import opcodes, unaryOpcodes, dataTypes
#import global variables
import globalVars
# The real heart of the operation - identifies macros anywhere,
# including inside other macros! Tends to get called recursively
# since macros inside macros need to be expanded inside macros.
#Takes: a split line
#Returns: a list of (joined) lines
def expandline(splitline):
expLine = []
if isFallthroughLine(splitline): #the base case - encompasses several other cases
expLine.append(" ".join(splitline))
elif isINFLine(splitline):
getINFValue(splitline)
elif isIncludeStatement(splitline):
expLine.append(handleInclude(splitline))
elif isAccMacro(splitline):
expLine.extend(expandAccMacro(splitline))
elif isUnaryMacro(splitline):
expLine.extend(expandUnaryMacro(splitline))
elif isBinaryMacro(splitline):
expLine.extend(expandBinaryMacro(splitline))
elif isJumpMacro(splitline):
expLine.extend(expandJumpMacro(splitline))
else:
syntaxfail(splitline)
return expLine
#boolean functions - for identifying macros and syntax errors
#all the base cases return true on this line
def isFallthroughLine(splitline):
if isBlankOrComment(splitline):
return True
elif isNativeASM(splitline):
return True
elif isSoleLabel(splitline):
return True
elif isData(splitline):
return True
else:
return False
#INF header lines get grabbed
def isINFLine(splitline):
if len(splitline) == 2 and splitline[0] == "INF":
return True
else:
return False
#blank or comment lines fall through unchanged
def isBlankOrComment(splitline):
if len(splitline) == 0 or splitline[0][0] == '#' or splitline[0][0] == ";":
return True
else:
return False
#checks if it's native ASM.
def isNativeASM(splitline):
if len(splitline) == 2 and splitline[0] in opcodes:
if globalVars.DataFields:
structurefail(splitline)
return True
elif len(splitline) == 1 and splitline[0] in unaryOpcodes:
if globalVars.DataFields:
structurefail(splitline)
return True
else:
return False
#checks if it fits the standard label syntax, alone on a line
def isSoleLabel(splitline):
if len(splitline) == 1 and splitline[0][-1] == ':':
return True
else:
return False
#checks if it's a data declaration, possibly with label
def isData(splitline):
if len(splitline) > 1 and splitline[0] in dataTypes:
globalVars.DataFields = True
return True
elif len(splitline) > 2 and splitline[1] in dataTypes:
globalVars.DataFields = True
return True
else:
return False
#detects INCL statements
def isIncludeStatement(splitline):
if splitline[0] == "INCL" and len(splitline) == 2:
return True
else:
return False
#detects accumulator-based macros
def isAccMacro(splitline):
if len(splitline) == 2 and splitline[0] in accMac and splitline[1] == "ACC":
return True
else:
return False
#detects unary operation macros
def isUnaryMacro(splitline):
if len(splitline) == 4 and splitline[0] in unaMac and splitline[2] == "INTO":
return True
elif len(splitline) == 2 and splitline[0] in unaMac and splitline[1] != "ACC":
return True
else:
return False
#detects binary operation macros
def isBinaryMacro(splitline):
if len(splitline) == 5 and splitline[0] in binMac and splitline[3] == "INTO":
return True
elif len(splitline) == 3 and splitline[0] in binMac:
return True
else:
return False
#detects jump macros
def isJumpMacro(splitline):
if len(splitline) == 5 and splitline[0] in jmpMac and splitline[3] == "TO":
return True
else:
return False
#complains when it can't figure out what you're saying
def syntaxfail(errorline):
raise Exception("Syntax Error!", " ".join(errorline))
#complains when you put data in front of instructions
def structurefail(errorline):
raise Exception("Structural Error: Instructions cannot be placed after data fields!",
" ".join(errorline))
#complains when you ask for the fifth or greater nibble of an address
def addroffsetfail(errortoken):
raise Exception("Addressing Error: Addresses are only four nibbles long!", errortoken)
#replacement functions - expand those macros!
# The simplest expasion function, since no acc macro
# takes any arguments. Some might contain other macros
# though, so we still need to check
#Takes: a split line (as list of single-word strings)
#Returns: a list of (joined) lines (possibly a single-element list)
def expandAccMacro(inMac):
outlines = []
for line in accMac[inMac[0]].splitlines():
countMacroUsage(line.split())
outlines.extend(expandline(line.split()))
return outlines
# Really the only difference between unary and binary is
# the number of arguments. That's why the functions are
# almost identical.
#Takes: a split line
#Returns: a list of (joined) lines
def expandUnaryMacro(inMac):
outlines = []
op1 = inMac[1]
#Assume in-place operation if no dest given
if len(inMac) == 4:
dest = inMac[3]
else:
dest = op1
for line in unaMac[inMac[0]].splitlines():
splitline = line.split()
#replace our placeholder labels with the input ones
splitline = replaceLabels(splitline, "$op1", op1)
splitline = replaceLabels(splitline, "&op1", op1)
splitline = replaceLabels(splitline, "$dest", dest)
countMacroUsage(splitline)
#recursively expand the resulting line
outlines.extend(expandline(splitline))
return outlines
#Takes: a split line
#Returns: a list of lines
def expandBinaryMacro(inMac):
outlines = []
op1 = inMac[1]
op2 = inMac[2]
if len(inMac) == 5:
dest = inMac[4]
else:
dest = op1
for line in binMac[inMac[0]].splitlines():
splitline = line.split()
#replace our placeholder labels with the input ones
splitline = replaceLabels(splitline, "$op1", op1)
splitline = replaceLabels(splitline, "$op2", op2)
splitline = replaceLabels(splitline, "$dest", dest)
countMacroUsage(splitline)
#recursively expand the resulting line
outlines.extend(expandline(splitline))
return outlines
# Frankly, this is no different from the operation macros.
# I just split them into different dictionaries for ease of
# coding and maintenance. The only cost of that decision was
# having to write this function, which is basically identical
# to the functions above.
#Takes: a split line
#Returns: a list of lines
def expandJumpMacro(inMac):
outlines = []
op1 = inMac[1]
op2 = inMac[2]
dest = inMac[4]
for line in jmpMac[inMac[0]].splitlines():
splitline = line.split()
#replace our placeholder labels with the input ones
splitline = replaceLabels(splitline, "$op1", op1)
splitline = replaceLabels(splitline, "$op2", op2)
splitline = replaceLabels(splitline, "$dest", dest)
countMacroUsage(splitline)
#recursively expand the resulting line
outlines.extend(expandline(splitline))
return outlines
# One of the more complex bits of code in this script, if only
# because of the amount of string operations involved. Takes
# macros and part of their context, and replaces the $-marked
# placeholder tokens in the macros with the actual labels they
# should hold. Also does math on memory offsets, so we don't
# have to define a new label for each nibble of memory. Finally,
# keeps up the counter on the amount of memory used internal to
# the macros we're using. This allows us to declare only as much
# macro scratch space as we need.
#Takes: a split line,
# the placeholder (starts with $ or maybe &) label to replace
# the new label (maybe with [offset]) to replace it with
#Also note that the placeholder in the line may also have an offset
#Returns: a split line
#Edits: global "memUsed" variable, if necessary
def replaceLabels(splitline, oldlabel, replabel):
outline = []
for token in splitline:
#put it in the output line, adapted
if token.startswith(oldlabel) and "$" in oldlabel:
outline.append(reptoken(token, replabel))
elif token.startswith(oldlabel) and "&" in oldlabel:
outline.append(repaddress(token, replabel))
else:
#not the label we're looking for
outline.append(token)
return outline
def countMacroUsage(outline):
#check if we're using macro memory. If so, we might need to
#expand our macro memory bank.
#We can get away with only checking the last token on the line
#because macro memory is always assigned to before it is used,
#and assignment is always to the last label on a line.
if "macro[" in outline[-1]:
macoffset = outline[-1][outline[-1].index('[') + 1 : outline[-1].index(']')]
macoffset = int(macoffset, 16)
macoffset += 1
if macoffset > globalVars.memUsed:
globalVars.memUsed = macoffset
#Takes: The token to replace (maybe with offset, starts with $)
# The new label to replace things with
#Returns:
# A new token, with calculate labels
#Assumes:
# If replabel or token uses the "&" syntax, it already has
# the trailing [] present, as &(label[A])[B] but definitely
# not &(label[A]). This is always the case if this program
# applied the "&" syntax itself; users might break things.
def reptoken(token, replabel):
#default values, if no offset found
oldoffset = 0
repoffset = 0
#get the offset from the replacement, if necessary
if '[' in replabel and ']' in replabel:
repoffset = replabel[replabel.rindex('[') + 1 : replabel.rindex(']')]
repoffset = hexSmartInt(repoffset)
#and from the old label, if necessary
if '[' in token and ']' in token:
oldoffset = token[token.rindex('[') + 1 : token.rindex(']')]
oldoffset = hexSmartInt(oldoffset)
#add them together
newoffset = oldoffset + repoffset
#smash together the new token
if '[' in replabel:
newtoken = replabel[:replabel.rfind('[')] + '[' + hex(newoffset)[2:] + ']'
else:
newtoken = replabel + '[' + hex(newoffset)[2:] + ']'
if '&' in newtoken and newoffset > 3:
addroffsetfail(newtoken)
return newtoken
#Takes: A token to replace (maybe with offset in [0:4], starts with &)
# A label to replace it with (completely unrelated offset, not already using &)
#Returns: A token formed as &(replabel[repoffset])[tokenoffset]
def repaddress(token, replabel):
repoffset = 0
addroffset = 0
#get the offset from the replacement, if necessary
if '[' in replabel and ']' in replabel:
repoffset = replabel[replabel.index('[') + 1 : replabel.index(']')]
repoffset = int(repoffset, 16)
replabel = replabel[:replabel.find('[')]
#and from the old label, if necessary
if '[' in token and ']' in token:
addroffset = token[token.index('[') + 1 : token.index(']')]
addroffset = int(addroffset, 16)
token = token[:token.find('[')]
#assemble new token
newtoken = "&(" + replabel + '[' + hex(repoffset)[2:] + "])[" + hex(addroffset)[2:] + ']'
return newtoken
#deal with include statements by adding them to the FList queue
def handleInclude(splitline):
if splitline[1] not in globalVars.FList:
globalVars.FList.append(splitline[1])
return ";Included " + splitline[1]
else:
return ";Ignored repeated include: " + splitline[1]
#deal with INF statements by, if they're in the first file,
#setting out output INF statement to have the given value.
#If there's no statement in the first file, use the default
#(1024).
def getINFValue(splitline):
if globalVars.FIndex != 0:
return
else:
globalVars.BAddr = int(splitline[1], 0)
#Like int(token, 0) but defaults to hexadecimal.
def hexSmartInt(token):
if token[0] == '0' and not token.isdigit():
if len(token) > 2 and token[1] == 'd':
return int(token[2:], 10)
else:
return int(token, 0)
else:
return int(token, 16)
|
|
# -*- coding: utf-8 -*-
from datetime import datetime as dt
import pytz
import logging
import re
import os
import json
import shutil
import requests
import tempfile
from django.core.management.base import BaseCommand
from osf.utils.permissions import ADMIN, WRITE
from osf.models import (
ApiOAuth2PersonalToken,
RegistrationSchema,
Node,
DraftRegistration,
OSFUser
)
from osf.models.nodelog import NodeLog
from website.project.metadata.schemas import ensure_schema_structure, from_json
from website.settings import WATERBUTLER_INTERNAL_URL
from framework.auth.core import Auth
from zipfile import ZipFile
logger = logging.getLogger(__name__)
HERE = os.path.dirname(os.path.abspath(__file__))
check_id = lambda item: re.match(r'(^[0-9]{8}[A-Z]{2})', item)
class EGAPUploadException(Exception):
pass
def ensure_egap_schema():
schema = ensure_schema_structure(from_json('egap-registration-3.json'))
schema_obj, created = RegistrationSchema.objects.update_or_create(
name=schema['name'],
schema_version=schema.get('version', 1),
defaults={
'schema': schema,
}
)
if created:
schema_obj.save()
return RegistrationSchema.objects.get(name='EGAP Registration', schema_version=3)
def get_creator_auth_header(creator_username):
creator = OSFUser.objects.get(username=creator_username)
token, created = ApiOAuth2PersonalToken.objects.get_or_create(name='egap_creator', owner=creator)
if created:
token.save()
return creator, {'Authorization': 'Bearer {}'.format(token.token_id)}
def create_node_from_project_json(egap_assets_path, egap_project_dir, creator):
with open(os.path.join(egap_assets_path, egap_project_dir, 'project.json'), 'r') as fp:
project_data = json.load(fp)
title = project_data['title']
node = Node(title=title, creator=creator)
node.save() # must save before adding contribs for auth reasons
for contributor in project_data['contributors']:
email = ''
if contributor.get('email'):
email = contributor.get('email').strip()
email = email.split('\\u00a0')[0].split(',')[0]
if '<' in email:
email = email.split('<')[1].replace('>', '')
node.add_contributor_registered_or_not(
Auth(creator),
full_name=contributor['name'],
email=email,
permissions=WRITE,
send_email='false'
)
node.set_visible(creator, visible=False, log=False, save=True)
return node
def rollback_node_from_project_json(egap_assets_path, egap_project_dir, creator):
with open(os.path.join(egap_assets_path, egap_project_dir, 'project.json'), 'r') as fp:
project_data = json.load(fp)
title = project_data['title']
try:
node = Node.objects.filter(title=title, creator=creator).get()
except Exception:
logger.error(
'Attempted rollback on Node titled {}. Node was not created.'.format(title)
)
return
node.delete()
return
def recursive_upload(auth, node, dir_path, parent='', metadata=None):
if metadata is None:
metadata = []
try:
for item in os.listdir(dir_path):
item_path = os.path.join(dir_path, item)
base_url = '{}/v1/resources/{}/providers/osfstorage/{}'.format(WATERBUTLER_INTERNAL_URL, node._id, parent)
if os.path.isfile(item_path):
with open(item_path, 'rb') as fp:
url = base_url + '?name={}&kind=file'.format(item)
resp = requests.put(url, data=fp.read(), headers=auth)
else:
url = base_url + '?name={}&kind=folder'.format(item)
resp = requests.put(url, headers=auth)
metadata = recursive_upload(auth, node, item_path, parent=resp.json()['data']['attributes']['path'], metadata=metadata)
if resp.status_code == 409: # if we retry something already uploaded just skip.
continue
if resp.status_code != 201:
raise EGAPUploadException('Error waterbutler response is {}, with {}'.format(resp.status_code, resp.content))
metadata.append(resp.json())
except EGAPUploadException as e:
logger.info(str(e))
metadata = recursive_upload(auth, node, dir_path, parent=parent, metadata=metadata)
return metadata
def get_egap_assets(guid, creator_auth):
node = Node.load(guid)
zip_file = node.files.first()
temp_path = tempfile.mkdtemp()
url = '{}/v1/resources/{}/providers/osfstorage/{}'.format(WATERBUTLER_INTERNAL_URL, guid, zip_file._id)
zip_file = requests.get(url, headers=creator_auth).content
egap_assets_path = os.path.join(temp_path, 'egap_assets.zip')
with open(egap_assets_path, 'wb') as fp:
fp.write(zip_file)
with ZipFile(egap_assets_path, 'r') as zipObj:
zipObj.extractall(temp_path)
zip_parent = [file for file in os.listdir(temp_path) if file not in ('__MACOSX', 'egap_assets.zip') and not check_id(file)]
if zip_parent:
zip_parent = zip_parent[0]
for i in os.listdir(os.path.join(temp_path, zip_parent)):
shutil.move(os.path.join(temp_path, zip_parent, i), os.path.join(temp_path, i))
if zip_parent:
os.rmdir(os.path.join(temp_path, zip_parent))
return temp_path
def register_silently(draft_registration, auth, sanction_type, external_registered_date, embargo_end_date):
registration = draft_registration.register(auth, save=True)
registration.external_registration = True
registration.registered_date = external_registered_date
registration.registered_from.add_log(
action=NodeLog.EXTERNAL_REGISTRATION_CREATED,
params={
'node': registration.registered_from._id,
'registration': registration._id
},
auth=auth,
log_date=external_registered_date)
registration.registered_from.add_log(
action=NodeLog.EXTERNAL_REGISTRATION_IMPORTED,
params={
'node': registration.registered_from._id,
'registration': registration._id
},
auth=auth,
log_date=dt.now().replace(tzinfo=pytz.utc))
if sanction_type == 'Embargo':
registration.embargo_registration(auth.user, embargo_end_date)
else:
registration.require_approval(auth.user)
registration.save()
def main(guid, creator_username):
egap_schema = ensure_egap_schema()
creator, creator_auth = get_creator_auth_header(creator_username)
egap_assets_path = get_egap_assets(guid, creator_auth)
# __MACOSX is a hidden file created by the os when zipping
directory_list = [directory for directory in os.listdir(egap_assets_path) if directory not in ('egap_assets.zip', '__MACOSX') and not directory.startswith('.')]
directory_list.sort()
for egap_project_dir in directory_list:
logger.info(
'Attempting to import the follow directory: {}'.format(egap_project_dir)
)
# Node Creation
try:
node = create_node_from_project_json(egap_assets_path, egap_project_dir, creator=creator)
except Exception as err:
logger.error(
'There was an error attempting to create a node from the '
'{} directory. Attempting to rollback node and contributor creation'.format(egap_project_dir)
)
logger.error(str(err))
try:
rollback_node_from_project_json(egap_assets_path, egap_project_dir, creator=creator)
except Exception as err:
logger.error(str(err))
continue
# Node File Upload
non_anon_files = os.path.join(egap_assets_path, egap_project_dir, 'data', 'nonanonymous')
non_anon_metadata = recursive_upload(creator_auth, node, non_anon_files)
anon_files = os.path.join(egap_assets_path, egap_project_dir, 'data', 'anonymous')
if os.path.isdir(anon_files):
anon_metadata = recursive_upload(creator_auth, node, anon_files)
else:
anon_metadata = {}
# DraftRegistration Metadata Handling
with open(os.path.join(egap_assets_path, egap_project_dir, 'registration-schema.json'), 'r') as fp:
registration_metadata = json.load(fp)
# add selectedFileName Just so filenames are listed in the UIj
non_anon_metadata_dict = []
for data in non_anon_metadata:
if data['data']['attributes']['kind'] == 'folder':
continue
data['selectedFileName'] = data['data']['attributes']['name']
data['sha256'] = data['data']['attributes']['extra']['hashes']['sha256']
data['nodeId'] = node._id
non_anon_metadata_dict.append(data)
anon_metadata_dict = []
for data in anon_metadata:
if data['data']['attributes']['kind'] == 'folder':
continue
data['selectedFileName'] = data['data']['attributes']['name']
data['sha256'] = data['data']['attributes']['extra']['hashes']['sha256']
data['nodeId'] = node._id
anon_metadata_dict.append(data)
non_anon_titles = ', '.join([data['data']['attributes']['name'] for data in non_anon_metadata_dict])
registration_metadata['q37'] = {'comments': [], 'extra': non_anon_metadata_dict, 'value': non_anon_titles}
anon_titles = ', '.join([data['data']['attributes']['name'] for data in anon_metadata_dict])
registration_metadata['q38'] = {'comments': [], 'extra': anon_metadata_dict, 'value': anon_titles}
embargo_date = registration_metadata.pop('q12', None)
# DraftRegistration Creation
draft_registration = DraftRegistration.create_from_node(
node=node,
user=creator,
schema=egap_schema,
data=registration_metadata,
)
# Registration Creation
logger.info(
'Attempting to create a Registration for Project {}'.format(node._id)
)
# Retrieve EGAP registration date and potential embargo go-public date
if registration_metadata.get('q4'):
egap_registration_date_string = registration_metadata['q4']['value']
egap_registration_date = dt.strptime(egap_registration_date_string, '%m/%d/%Y - %H:%M').replace(tzinfo=pytz.UTC)
else:
logger.error(
'DraftRegistration associated with Project {} '
'does not have a valid registration date in registration_metadata'.format(node._id)
)
continue
if embargo_date:
if embargo_date.get('value'):
egap_embargo_public_date_string = embargo_date['value']
egap_embargo_public_date = dt.strptime(egap_embargo_public_date_string, '%m/%d/%y').replace(tzinfo=pytz.UTC)
else:
egap_embargo_public_date = None
else:
egap_embargo_public_date = None
sanction_type = 'RegistrationApproval'
if egap_embargo_public_date and (egap_embargo_public_date > dt.today().replace(tzinfo=pytz.UTC)):
sanction_type = 'Embargo'
logger.info(
'Attempting to register {} silently'.format(node._id)
)
try:
register_silently(draft_registration, Auth(creator), sanction_type, egap_registration_date, egap_embargo_public_date)
except Exception as err:
logger.error(
'Unexpected error raised when attempting to silently register '
'project {}. Continuing...'.format(node._id))
logger.info(str(err))
continue
# Update contributors on project to Admin
contributors = node.contributor_set.all()
for contributor in contributors:
if contributor.user == creator:
pass
else:
node.update_contributor(contributor.user, permission=ADMIN, visible=True, auth=Auth(creator), save=True)
shutil.rmtree(egap_assets_path)
class Command(BaseCommand):
"""Magically morphs csv data into lovable nodes with draft registrations attached
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'-c',
'--creator',
help='This should be the username of the initial adminstrator for the imported nodes',
required=True
)
parser.add_argument(
'-id',
'--guid',
help='This should be the guid of the private project with the directory structure',
required=True
)
def handle(self, *args, **options):
creator_username = options.get('creator', False)
guid = options.get('guid', False)
main(guid, creator_username)
|
|
# Paper -> Font and Background Color
# Independent Text Binarization
# using egde boxes to isolate and identify
# characters; edge boxes with intensity values
# will be stord in one of the intermediate images
import cv2
import numpy as np
import sys
import os.path
# check args
if len(sys.argv) != 3:
print "%s [input_file] [output_file]" % (sys.argv[0])
sys.exit()
else:
input_file = sys.argv[1]
output_file = sys.argv[2]
if not os.path.isfile(input_file):
print "%s file not found" % input_file
sys.exit()
# debug flag
DEBUG = 0
# determine the pixel density
# ref: human eyes register colors differently.
# pixel intensity: 0.30R + 0.59G + 0.11B
# x,y are the coordinates of the image
def pixelIntensity(x, y):
global img, img_y, img_x
if y >= img_y or x >= img_x:
# print "pixel out of bounds ("+str(y)+","+str(x)+")"
return 0
pixel = img[y][x]
return 0.30*pixel[2] + 0.59*pixel[1] + 0.11*pixel[0]
# checks if contour is
# a connected shape
def isConnected(contour):
first = contour[0][0]
last = contour[len(contour)-1][0]
return abs(first[0] - last[0]) <= 1 and abs(first[1] - last[1]) <= 1
# helper function
# gets the contour at a given index
def getContour(index):
global contours
return contours[index]
# count the number of real children
def countChildren(index, h_, contour):
# no children
if h_[index][2] < 0:
return 0
else:
# if the first child is a contour
# we care about, then count it,
# otherwise don't
if keep(getContour(h_[index][2])):
count = 1
else:
count = 0
# count all of the child's siblings
# and their children
count += countSiblings(h_[index][2], h_, contour, True)
# Count the number of relevant siblings of a contour
def countSiblings(index, h_, contour, inc_children=False):
# Include the children if necessary
if inc_children:
count = countChildren(index, h_, contour)
else:
count = 0
# Look ahead
p_ = h_[index][0]
while p_ > 0:
if keep(getContour(p_)):
count += 1
if inc_children:
count += countChildren(p_, h_, contour)
p_ = h_[p_][0]
# Look behind
n = h_[index][1]
while n > 0:
if keep(getContour(n)):
count += 1
if inc_children:
count += countChildren(n, h_, contour)
n = h_[n][1]
return count
# whether we care about this contour
def keep(contour):
return keepBox(contour) and isConnected(contour)
# Whether we should keep the containing box of this
# contour based on it's shape
def keepBox(contour):
xx, yy, w_, h_ = cv2.boundingRect(contour)
# width and height need to be floats
w_ *= 1.0
h_ *= 1.0
# Test it's shape - if it's too oblong or tall it's
# probably not a real character
if w_ / h_ < 0.1 or w_ / h_ > 10:
if DEBUG:
print "\t Rejected because of shape: (" + str(xx) + "," + str(yy) + "," + str(w_) + "," + str(h_) + ")" + \
str(w_ / h_)
return False
# check size of the box
if ((w_ * h_) > ((img_x * img_y) / 5)) or ((w_ * h_) < 15):
if DEBUG:
print "\t Rejected because of size"
return False
return True
# whether contour is a child
def isChild(index, h_):
return getParent(index, h_) > 0
# get contour's parent
def getParent(index, h_):
parent = h_[index][3]
while not keep(getContour(parent)) and parent > 0:
parent = h_[parent][3]
return parent
def includeBox(index, h_, contour):
if DEBUG:
print str(index) + ":"
if isChild(index, h_):
print "\tIs a child"
print "\tparent " + str(getParent(index, h_)) + " has " + str(
countChildren(getParent(index, h_), h_, contour)) + " children"
print "\thas " + str(countChildren(index, h_, contour)) + " children"
if isChild(index, h_) and countChildren(getParent(index, h_), h_, contour) <= 2:
if DEBUG:
print "\t skipping: is an interior to a letter"
return False
if countChildren(index, h_, contour) > 2:
if DEBUG:
print "\t skipping, is a container of letters"
return False
if DEBUG:
print "\t keeping"
return True
# load image
originalImg = cv2.imread(input_file)
# surround image with border
# ensuring bounds for processing
img = cv2.copyMakeBorder(originalImg, 50, 50, 50, 50, cv2.BORDER_CONSTANT)
# get width and height of image
img_y = len(img)
img_x = len(img[0])
if DEBUG:
print "Image is " + str(len(img)) + "x" + str(len(img[0]))
# split into (R,G,B)
blue, green, red = cv2.split(img)
# edge detection using canny edge detection algorithm
blue = cv2.Canny(blue, 200, 250)
green = cv2.Canny(green, 200, 250)
red = cv2.Canny(red, 200, 250)
# Join edges back into image
edges = blue | green | red
# find contours
img_ret, contours, hierarchy = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
hierarchy = hierarchy[0]
if DEBUG:
processed = edges.copy()
rejected = edges.copy()
# These are the boxes that we are determining
keepers = []
# For each contour, find the bounding rectangle and decide
# if it's one we care about
for index_, contour_ in enumerate(contours):
if DEBUG:
print "Processing #%d" % index_
x, y, w, h = cv2.boundingRect(contour_)
# Check the contour and it's bounding box
if keep(contour_) and includeBox(index_, hierarchy, contour_):
# It's a winner!
keepers.append([contour_, [x, y, w, h]])
if DEBUG:
cv2.rectangle(processed, (x, y), (x + w, y + h), (100, 100, 100), 1)
cv2.putText(processed, str(index_), (x, y - 5), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))
else:
if DEBUG:
cv2.rectangle(rejected, (x, y), (x + w, y + h), (100, 100, 100), 1)
cv2.putText(rejected, str(index_), (x, y - 5), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))
# Make a white copy of our image
new_image = edges.copy()
new_image.fill(255)
boxes = []
# For each box, find the foreground and background intensities
for index_, (contour_, box) in enumerate(keepers):
# Find the average intensity of the edge pixels to
# determine the foreground intensity
fg_int = 0.0
for p in contour_:
fg_int += ii(p[0][0], p[0][1])
fg_int /= len(contour_)
if DEBUG:
print "FG Intensity for #%d = %d" % (index_, fg_int)
# Find the intensity of three pixels going around the
# outside of each corner of the bounding box to determine
# the background intensity
x_, y_, width, height = box
bg_int = \
[
# bottom left corner 3 pixels
ii(x_ - 1, y_ - 1),
ii(x_ - 1, y_),
ii(x_, y_ - 1),
# bottom right corner 3 pixels
ii(x_ + width + 1, y_ - 1),
ii(x_ + width, y_ - 1),
ii(x_ + width + 1, y_),
# top left corner 3 pixels
ii(x_ - 1, y_ + height + 1),
ii(x_ - 1, y_ + height),
ii(x_, y_ + height + 1),
# top right corner 3 pixels
ii(x_ + width + 1, y_ + height + 1),
ii(x_ + width, y_ + height + 1),
ii(x_ + width + 1, y_ + height)
]
# Find the median of the background
# pixels determined above
bg_int = np.median(bg_int)
if DEBUG:
print "BG Intensity for #%d = %s" % (index_, repr(bg_int))
# Determine if the box should be inverted
if fg_int >= bg_int:
fg = 255
bg = 0
else:
fg = 0
bg = 255
# Loop through every pixel in the box and color the
# pixel accordingly
for x in range(x_, x_ + width):
for y in range(y_, y_ + height):
if y >= img_y or x >= img_x:
if DEBUG:
print "pixel out of bounds (%d,%d)" % (y, x)
continue
if ii(x, y) > fg_int:
new_image[y][x] = bg
else:
new_image[y][x] = fg
# blur a bit to improve ocr accuracy
new_image = cv2.blur(new_image, (2, 2))
cv2.imwrite(output_file, new_image)
if DEBUG:
cv2.imwrite('edges.png', edges)
cv2.imwrite('processed.png', processed)
cv2.imwrite('rejected.png', rejected)
|
|
"""
Test the functions that load libgmt.
"""
import ctypes
import shutil
import subprocess
import sys
import types
from pathlib import PurePath
import pytest
from pygmt.clib.loading import check_libgmt, clib_full_names, clib_names, load_libgmt
from pygmt.exceptions import GMTCLibError, GMTCLibNotFoundError, GMTOSError
class FakedLibGMT: # pylint: disable=too-few-public-methods
"""
Class for faking a GMT library.
"""
def __init__(self, name):
self._name = name
def __str__(self):
return self._name
def test_check_libgmt():
"""
Make sure check_libgmt fails when given a bogus library.
"""
libgmt = FakedLibGMT("/path/to/libgmt.so")
msg = (
# pylint: disable=protected-access
f"Error loading '{libgmt._name}'. "
"Couldn't access function GMT_Create_Session. "
"Ensure that you have installed an up-to-date GMT version 6 library. "
"Please set the environment variable 'GMT_LIBRARY_PATH' to the "
"directory of the GMT 6 library."
)
with pytest.raises(GMTCLibError, match=msg):
check_libgmt(libgmt)
def test_clib_names():
"""
Make sure we get the correct library name for different OS names.
"""
for linux in ["linux", "linux2", "linux3"]:
assert clib_names(linux) == ["libgmt.so"]
assert clib_names("darwin") == ["libgmt.dylib"]
assert clib_names("win32") == ["gmt.dll", "gmt_w64.dll", "gmt_w32.dll"]
for freebsd in ["freebsd10", "freebsd11", "freebsd12"]:
assert clib_names(freebsd) == ["libgmt.so"]
with pytest.raises(GMTOSError):
clib_names("meh")
###############################################################################
# Tests for load_libgmt
def test_load_libgmt():
"""
Test that loading libgmt works and doesn't crash.
"""
check_libgmt(load_libgmt())
@pytest.mark.skipif(sys.platform == "win32", reason="run on UNIX platforms only")
def test_load_libgmt_fails(monkeypatch):
"""
Test that GMTCLibNotFoundError is raised when GMT's shared library cannot
be found.
"""
with monkeypatch.context() as mpatch:
mpatch.setattr(sys, "platform", "win32") # pretend to be on Windows
mpatch.setattr(
subprocess, "check_output", lambda cmd, encoding: "libfakegmt.so"
)
with pytest.raises(GMTCLibNotFoundError):
check_libgmt(load_libgmt())
def test_load_libgmt_with_a_bad_library_path(monkeypatch):
"""
Test that loading still works when given a bad library path.
"""
# Set a fake "GMT_LIBRARY_PATH"
monkeypatch.setenv("GMT_LIBRARY_PATH", "/not/a/real/path")
assert check_libgmt(load_libgmt()) is None
class TestLibgmtBrokenLibs:
"""
Test that load_libgmt still works when a broken library is found.
"""
# load the GMT library before mocking the ctypes.CDLL function
loaded_libgmt = load_libgmt()
invalid_path = "/invalid/path/to/libgmt.so"
faked_libgmt1 = FakedLibGMT("/path/to/faked/libgmt1.so")
faked_libgmt2 = FakedLibGMT("/path/to/faked/libgmt2.so")
def _mock_ctypes_cdll_return(self, libname):
"""
Mock the return value of ctypes.CDLL.
Parameters
----------
libname : str or FakedLibGMT or ctypes.CDLL
Path to the GMT library, a faked GMT library, or a working library
loaded as ctypes.CDLL.
Return
------
object
Either the loaded GMT library or the faked GMT library.
"""
if isinstance(libname, FakedLibGMT):
# libname is a faked GMT library, return the faked library
return libname
if isinstance(libname, str):
# libname is an invalid library path in string type,
# raise OSError like the original ctypes.CDLL
raise OSError(f"Unable to find '{libname}'")
# libname is a loaded GMT library
return self.loaded_libgmt
@pytest.fixture
def mock_ctypes(self, monkeypatch):
"""
Patch the ctypes.CDLL function.
"""
monkeypatch.setattr(ctypes, "CDLL", self._mock_ctypes_cdll_return)
def test_two_broken_libraries(self, mock_ctypes): # pylint: disable=unused-argument
"""
Case 1: two broken libraries.
Raise the GMTCLibNotFoundError exception. Error message should contain
information of both libraries that failed to load properly.
"""
# pylint: disable=protected-access
lib_fullnames = [self.faked_libgmt1, self.faked_libgmt2]
msg_regex = (
fr"Error loading GMT shared library at '{self.faked_libgmt1._name}'.\n"
fr"Error loading '{self.faked_libgmt1._name}'. Couldn't access.*\n"
fr"Error loading GMT shared library at '{self.faked_libgmt2._name}'.\n"
f"Error loading '{self.faked_libgmt2._name}'. Couldn't access.*"
)
with pytest.raises(GMTCLibNotFoundError, match=msg_regex):
load_libgmt(lib_fullnames=lib_fullnames)
def test_load_brokenlib_invalidpath(
self, mock_ctypes
): # pylint: disable=unused-argument
"""
Case 2: broken library + invalid path.
Raise the GMTCLibNotFoundError exception. Error message should contain
information of one library that failed to load and one invalid path.
"""
# pylint: disable=protected-access
lib_fullnames = [self.faked_libgmt1, self.invalid_path]
msg_regex = (
fr"Error loading GMT shared library at '{self.faked_libgmt1._name}'.\n"
fr"Error loading '{self.faked_libgmt1._name}'. Couldn't access.*\n"
fr"Error loading GMT shared library at '{self.invalid_path}'.\n"
f"Unable to find '{self.invalid_path}'"
)
with pytest.raises(GMTCLibNotFoundError, match=msg_regex):
load_libgmt(lib_fullnames=lib_fullnames)
def test_brokenlib_invalidpath_workinglib(
self, mock_ctypes
): # pylint: disable=unused-argument
"""
Case 3: broken library + invalid path + working library.
"""
lib_fullnames = [self.faked_libgmt1, self.invalid_path, self.loaded_libgmt]
assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None
def test_invalidpath_brokenlib_workinglib(
self, mock_ctypes
): # pylint: disable=unused-argument
"""
Case 4: invalid path + broken library + working library.
"""
lib_fullnames = [self.invalid_path, self.faked_libgmt1, self.loaded_libgmt]
assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None
def test_workinglib_brokenlib_invalidpath(
self, mock_ctypes
): # pylint: disable=unused-argument
"""
Case 5: working library + broken library + invalid path.
"""
lib_fullnames = [self.loaded_libgmt, self.faked_libgmt1, self.invalid_path]
assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None
def test_brokenlib_brokenlib_workinglib(
self, mock_ctypes
): # pylint: disable=unused-argument
"""
Case 6: repeating broken libraries + working library.
"""
lib_fullnames = [self.faked_libgmt1, self.faked_libgmt1, self.loaded_libgmt]
assert check_libgmt(load_libgmt(lib_fullnames=lib_fullnames)) is None
###############################################################################
# Tests for clib_full_names
@pytest.fixture(scope="module", name="gmt_lib_names")
def fixture_gmt_lib_names():
"""
Return a list of the library names for the current operating system.
"""
return clib_names(sys.platform)
@pytest.fixture(scope="module", name="gmt_bin_dir")
def fixture_gmt_bin_dir():
"""
Return GMT's bin directory.
"""
return str(PurePath(shutil.which("gmt")).parent)
@pytest.fixture(scope="module", name="gmt_lib_realpath")
def fixture_gmt_lib_realpath():
"""
Return the real path of the GMT library.
"""
lib_realpath = subprocess.check_output(
["gmt", "--show-library"], encoding="utf-8"
).rstrip("\n")
# On Windows, clib_full_names() returns paths with separator "\\",
# but "gmt --show-library" returns paths with separator "/".
# Use `str(PurePath(realpath)` to mimic the behavior of clib_full_names()
return str(PurePath(lib_realpath))
def test_clib_full_names_gmt_library_path_undefined_path_empty(
monkeypatch, gmt_lib_names
):
"""
Make sure that clib_full_names() returns a generator with expected names
when GMT_LIBRARY_PATH is undefined and PATH is empty.
"""
with monkeypatch.context() as mpatch:
mpatch.delenv("GMT_LIBRARY_PATH", raising=False)
mpatch.setenv("PATH", "")
lib_fullpaths = clib_full_names()
assert isinstance(lib_fullpaths, types.GeneratorType)
assert list(lib_fullpaths) == gmt_lib_names
def test_clib_full_names_gmt_library_path_defined_path_empty(
monkeypatch, gmt_lib_names, gmt_lib_realpath
):
"""
Make sure that clib_full_names() returns a generator with expected names
when GMT_LIBRARY_PATH is defined and PATH is empty.
"""
with monkeypatch.context() as mpatch:
mpatch.setenv("GMT_LIBRARY_PATH", str(PurePath(gmt_lib_realpath).parent))
mpatch.setenv("PATH", "")
lib_fullpaths = clib_full_names()
assert isinstance(lib_fullpaths, types.GeneratorType)
assert list(lib_fullpaths) == [gmt_lib_realpath] + gmt_lib_names
def test_clib_full_names_gmt_library_path_undefined_path_included(
monkeypatch, gmt_lib_names, gmt_lib_realpath, gmt_bin_dir
):
"""
Make sure that clib_full_names() returns a generator with expected names
when GMT_LIBRARY_PATH is undefined and PATH includes GMT's bin path.
"""
with monkeypatch.context() as mpatch:
mpatch.delenv("GMT_LIBRARY_PATH", raising=False)
mpatch.setenv("PATH", gmt_bin_dir)
lib_fullpaths = clib_full_names()
assert isinstance(lib_fullpaths, types.GeneratorType)
# Windows: find_library() searches the library in PATH, so one more
npath = 2 if sys.platform == "win32" else 1
assert list(lib_fullpaths) == [gmt_lib_realpath] * npath + gmt_lib_names
def test_clib_full_names_gmt_library_path_defined_path_included(
monkeypatch, gmt_lib_names, gmt_lib_realpath, gmt_bin_dir
):
"""
Make sure that clib_full_names() returns a generator with expected names
when GMT_LIBRARY_PATH is defined and PATH includes GMT's bin path.
"""
with monkeypatch.context() as mpatch:
mpatch.setenv("GMT_LIBRARY_PATH", str(PurePath(gmt_lib_realpath).parent))
mpatch.setenv("PATH", gmt_bin_dir)
lib_fullpaths = clib_full_names()
assert isinstance(lib_fullpaths, types.GeneratorType)
# Windows: find_library() searches the library in PATH, so one more
npath = 3 if sys.platform == "win32" else 2
assert list(lib_fullpaths) == [gmt_lib_realpath] * npath + gmt_lib_names
def test_clib_full_names_gmt_library_path_incorrect_path_included(
monkeypatch, gmt_lib_names, gmt_lib_realpath, gmt_bin_dir
):
"""
Make sure that clib_full_names() returns a generator with expected names
when GMT_LIBRARY_PATH is defined but incorrect and PATH includes GMT's bin
path.
"""
with monkeypatch.context() as mpatch:
mpatch.setenv("GMT_LIBRARY_PATH", "/not/a/valid/library/path")
mpatch.setenv("PATH", gmt_bin_dir)
lib_fullpaths = clib_full_names()
assert isinstance(lib_fullpaths, types.GeneratorType)
# Windows: find_library() searches the library in PATH, so one more
npath = 2 if sys.platform == "win32" else 1
assert list(lib_fullpaths) == [gmt_lib_realpath] * npath + gmt_lib_names
|
|
# Copyright (C) 2012
# See LICENSE.txt for details.
"""
Epytext (and general Python docstring) wrapper
==============================================
Utility for wrapping docstrings in Python; specifically, docstrings in U{Epytext
<http://epydoc.sourceforge.net/manual-epytext.html>} format, or those that are
close enough.
The wrapping herein generally adheres to all the conventions set forth by the
Twisted project U{http://twistedmatrix.com/}.
Currently (obviously) the only supported editor is U{Sublime Text 2
<http://www.sublimetext.com/>} but a sufficiently enterprising individual could
either use this file as a script (no dependencies!) by piping the contents of
the docstring to it, or call L{wrapPythonDocstring} and preserve point position.
"""
from __future__ import unicode_literals
import re
from uuid import uuid4
__all__ = [
"wrapPythonDocstring"
]
def isUnderline(expr):
return bool(re.match("[=]+$", expr) or re.match("[-]+$", expr))
def startslist(x):
return (x == '-' or (x.endswith(".") and x[:-1].isdigit()))
class RegularParagraph(object):
otherIndent = ""
def __init__(self, pointTracker, fixedIndent="", hangIndent="",
followIndent=""):
self.words = []
self.fixedIndent = fixedIndent
self.hangIndent = hangIndent
self.followIndent = followIndent
self.more = None
self.pointTracker = pointTracker
self._unwrappedLines = 0
self._headingType = None
self._headingPoints = []
def matchesTag(self, other):
return False
def __nonzero__(self):
return bool(self.words)
def all(self):
while self is not None:
#print self.__class__.__name__
if self:
yield self
self = self.more
def setIsHeading(self, headingType):
self._headingType = headingType
def isHeading(self):
return bool(self._headingType)
def add(self, line):
clean = self.pointTracker.peek(line)
stripped = clean.strip()
if stripped:
self._unwrappedLines += 1
active = self
firstword = list(self.pointTracker.filterWords(line.split()))[0]
if stripped.startswith("@"):
fp = FieldParagraph(pointTracker=self.pointTracker)
fp.words.extend(line.split())
active = self.more = fp
elif isUnderline(stripped) and self._unwrappedLines == 2:
# This paragraph is actually a section heading.
active.setIsHeading(stripped[0])
self._headingPoints = self.pointTracker.extractPoints(line)
# FIXME: should respect leading indentation.
active = self.nextRegular()
elif startslist(firstword):
# Aesthetically I prefer a 2-space indent here, but the
# convention in the codebase seems to be 4 spaces.
LIST_INDENT = 4
# FIXME: this also needs to respect leading indentation so it
# can properly represent nested lists.
hangIndent = self.pointTracker.lengthOf(firstword) + 1
fi = self.fixedIndent
if not (self.words and startslist(self.words[0])):
fi += (" " * LIST_INDENT)
fp = RegularParagraph(
pointTracker=self.pointTracker,
fixedIndent=fi,
hangIndent=" " * hangIndent,
followIndent=self.followIndent,
)
fp.words.extend(line.split())
active = self.more = fp
else:
self.words.extend(line.split())
if stripped.endswith("::"):
active.more = PreFormattedParagraph(
active,
indentBegins=len(clean) - len(clean.lstrip())
)
active = active.more
return active
else:
rawstrip = line.strip()
if rawstrip:
self.words.append(rawstrip)
if len(list(self.pointTracker.filterWords(self.words))):
return self.nextRegular()
return self
def wrap(self, output, indentation, width):
if not self.words:
return
thisLine = self.firstIndent(indentation)
first = True
prevWord = ''
for word in self.words:
if not self.pointTracker.isWord(word):
thisLine += word
continue
if ((prevWord.endswith(".") or prevWord.endswith("?") or
prevWord.endswith("!")) and not prevWord[:-1].isdigit()):
words = prevWord.split(".")[:-1]
if ( len(words) > 1 and
[self.pointTracker.lengthOf(x) for x in words] ==
[1] * len(words) ):
# acronym
spaces = 1
else:
spaces = 2
else:
spaces = 1
prevWord = word
if ( self.pointTracker.lengthOf(thisLine) +
self.pointTracker.lengthOf(word) + spaces <= width ):
if first:
first = not first
else:
thisLine += (" " * spaces)
thisLine += word
else:
output.write(self.pointTracker.scan(thisLine, output.tell()))
output.write("\n")
thisLine = self.restIndent(indentation) + word
output.write(self.pointTracker.scan(thisLine, output.tell()))
output.write("\n")
if self.isHeading():
indentText = self.firstIndent(indentation)
lineSize = self.pointTracker.lengthOf(thisLine) - len(indentText)
output.write(self.pointTracker.scan(
indentText + ''.join(self._headingPoints) +
(self._headingType * lineSize), output.tell()
))
output.write("\n")
def firstIndent(self, indentation):
return indentation + self.fixedIndent
def restIndent(self, indentation):
return (indentation + self.fixedIndent + self.hangIndent +
self.otherIndent)
def genRegular(self):
return RegularParagraph(pointTracker=self.pointTracker,
fixedIndent=self.nextIndent(),
followIndent=self.nextIndent())
def nextRegular(self):
self.more = self.genRegular()
return self.more
def nextIndent(self):
return self.followIndent
class FieldParagraph(RegularParagraph):
otherIndent = " "
def nextIndent(self):
return " "
def matchesTag(self, other):
if isinstance(other, FieldParagraph):
myWords = list(self.pointTracker.filterWords(self.words))
theirWords = list(self.pointTracker.filterWords(other.words))
if ( set([myWords[0], theirWords[0]]) ==
set(["@return:", "@rtype:"]) ):
# matching @return and @rtype fields.
return True
elif len(myWords) > 1 and len(theirWords) > 1:
# matching @param and @type fields.
return myWords[1] == theirWords[1]
return False
else:
return False
class PreFormattedParagraph(object):
def __init__(self, before, indentBegins):
self.lines = []
self.before = before
pointTracker = before.pointTracker
fixedIndent = (before.fixedIndent + before.hangIndent +
before.otherIndent)
self.indentBegins = indentBegins
self.fixedIndent = fixedIndent
self.more = None
self.pointTracker = pointTracker
def matchesTag(self, other):
return False
def add(self, line):
actualLine = self.pointTracker.peek(line)
if actualLine.strip():
if len(actualLine) - len(actualLine.lstrip()) <= self.indentBegins:
next = self.more = self.before.genRegular()
return next.add(line)
self.lines.append(line.rstrip())
else:
self.lines.append(line.strip())
return self
def fixIndentation(self):
while self.lines and not self.lines[0].strip():
self.lines.pop(0)
while self.lines and not self.lines[-1].strip():
self.lines.pop()
if not self.lines:
return
cleanLines = map(self.pointTracker.peek, self.lines)
commonLeadingIndent = min([len(x) - len(x.lstrip()) for x in cleanLines
if x.strip()])
newLines = []
for actualLine, line in zip(cleanLines, self.lines):
if actualLine != line and line[:commonLeadingIndent].strip():
# There's a marker, and it's in the leading whitespace.
# Explicitly reposition the marker at the beginning of the fixed
# indentation.
line = (self.pointTracker.marker +
actualLine[commonLeadingIndent:])
else:
line = line.rstrip()[commonLeadingIndent:]
newLines.append(line)
self.lines = newLines
def wrap(self, output, indentation, width):
# OK, now we know about all the lines we're going to know about.
self.fixIndentation()
for line in self.lines:
if self.pointTracker.peek(line):
output.write(indentation + " " + self.fixedIndent)
output.write(self.pointTracker.scan(line, output.tell()))
output.write("\n")
class PointTracker(object):
"""
Object for keeping track of where the insertion points are.
"""
def __init__(self, point):
self.point = point
self.marker = "{" + unicode(uuid4()) + "}"
self.outPoints = []
def annotate(self, text):
"""
Add point references to a block of text.
"""
return text[:self.point] + self.marker + text[self.point:]
def filterWords(self, words):
for word in words:
if self.isWord(word):
yield self.peek(word)
def isWord(self, text):
"""
Is the given word actually a word, or just an artifact of the
point-tracking process? If it's just the point marker by itself, then
no, it isn't, and don't insert additional whitespace after it.
"""
return not (text == self.marker)
def lengthOf(self, word):
"""
How long would this word be if it didn't have any point-markers in it?
"""
return len(self.peek(word))
def peek(self, word):
"""
What would this word look like if it didn't have any point-markers in
it?
"""
return word.replace(self.marker, "")
def extractPoints(self, text):
"""
Return a C{list} of all point markers contained in the text.
"""
if self.marker in text:
return [self.marker]
return []
def scan(self, text, offset):
"""
Scan some text for point markers, remember them, and remove them.
"""
idx = text.find(self.marker)
if idx == -1:
return text
self.outPoints.append(idx + offset)
return self.peek(text)
def wrapPythonDocstring(docstring, output, indentation=" ",
width=79, point=0):
"""
Wrap a given Python docstring.
@param docstring: the docstring itself (just the stuff between the quotes).
@type docstring: unicode
@param output: The unicode output file to write the wrapped docstring to.
@type output: L{file}-like (C{write} takes unicode.)
@param indentation: a string (consisting only of spaces) indicating the
amount of space to shift by. Don't adjust this. It's always 4 spaces.
PEP8 says so.
@type indentation: L{unicode}
@param width: The maximum number of characters allowed in a wrapped line.
@type width: L{int}
@param point: The location of the cursor in the text, as an offset from the
beginning of the docstring. If this function is being used from within
a graphical editor, this parameter can be used (in addition to the
return value of this function) to reposition the cursor at the relative
position which the user will expect.
@return: The new location of the cursor.
"""
# TODO: multiple points; usable, for example, for start and end of a
# currently active selection.
pt = PointTracker(point)
start = paragraph = RegularParagraph(pt)
docstring = pt.annotate(docstring)
for line in docstring.split("\n"):
paragraph = paragraph.add(line)
prevp = None
for paragraph in start.all():
if not paragraph.matchesTag(prevp):
output.write("\n")
prevp = paragraph
paragraph.wrap(output, indentation, width)
output.write(indentation)
return pt.outPoints[0]
if __name__ == '__main__':
import sys
from cStringIO import StringIO
io = StringIO()
indata = sys.stdin.read()
firstline = [line for line in indata.split("\n") if line][0]
wrapPythonDocstring(indata, io,
indentation=" " * (len(firstline) - len(firstline.lstrip())))
sys.stdout.write(io.getvalue())
sys.stdout.flush()
|
|
# -*- coding: utf-8 -*-
"""
zine.pluginsystem
~~~~~~~~~~~~~~~~~
This module implements the plugin system.
Plugin Distribution
-------------------
The best way to distribute plugins are `.plugin` files. Those files are
simple zip files that are uncompressed when installed from the plugin
admin panel. You can easily create .plugin files yourself. Just finish
the plugin and use the `scripts/bundle-plugin` script or do it
programmatically::
app.plugins['<name of the plugin>'].dump('/target/filename.plugin')
This will save the plugin as `.plugin` package. The preferred filename
for templates is `<FILESYSTEM_NAME>-<VERSION>.plugin`. So if
you want to dump all the plugins you have into plugin files you can use
this snippet::
for plugin in app.plugins.itervalues():
plugin.dump('%s-%s.plugin' % (
plugin.filesystem_name,
plugin.version
))
It's only possible to create packages of plugins that are bound to an
application so just create a development instance for plugin development.
Plugin Metadata
---------------
To identify a plugin metadata is used. Zine requires a file
named `metadata.txt` to load some information about the plugin.
Zine currently supports the following metadata information:
:Name:
The full name of the plugin.
:Plugin URL:
The URL of the plugin (e.g download location).
:Description:
The full description of the plugin.
:Author:
The name of the author of the plugin.
Use the this field in the form of ``Name <[email protected]>``
where `Name` is the full name of the author.
:Author URL:
The website of the plugin author.
:Contributors:
Add a list of all contributors separated by a comma.
Use this field in the form of ``Name1 <[email protected]>, Name2
<[email protected]>`` where `Name` is the full name of the author
and the email is optional.
:Version:
The version of the deployed plugin.
:Preview:
*For themes only*
A little preview of the theme deployed by the plugin.
:Depends:
A list of plugins the plugin depends on. All plugin names will
be split at a comma and also named exactly as the depended plugin.
All plugins in this list will be activated if found but if one
is missing, the admin will be informed about that and the plugin
won't be activated.
Each key can be suffixed with "[LANG_CODE]" for internationalization::
Title: Example Plugin
Title[de]: Beispielplugin
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import __builtin__
import re
import sys
import inspect
from os import path, listdir, walk, makedirs
from types import ModuleType
from shutil import rmtree
from time import localtime, time
from urllib import quote
from werkzeug import cached_property, escape
from zine.application import get_application
from zine.utils import log
from zine.utils.mail import split_email, is_valid_email, check
from zine.utils.exceptions import UserException, summarize_exception
from zine.i18n import ZineTranslations as Translations, lazy_gettext, _
from zine.environment import BUILTIN_PLUGIN_FOLDER
_py_import = __builtin__.__import__
_i18n_key_re = re.compile(r'^(.*?)\[([^\]]+)\]$')
#: a dict of all managed applications by iid.
#: every application in this dict has a plugin space this module
#: controls. This is only used internally
_managed_applications = {}
PACKAGE_VERSION = 1
def get_object_name(obj):
"""Return a human readable name for the object."""
if inspect.isclass(obj) or inspect.isfunction(obj):
cls = obj
else:
cls = obj.__class__
if cls.__module__.startswith('zine.plugins.'):
prefix = cls.__module__.split('.', 2)[-1]
elif cls.__module__.startswith('zine.'):
prefix = cls.__module__
else:
prefix = 'external.' + cls.__module__
return prefix + '.' + cls.__name__
def find_plugins(app):
"""Return an iterator over all plugins available."""
enabled_plugins = set()
found_plugins = set()
for plugin in app.cfg['plugins']:
plugin = plugin.strip()
if plugin:
enabled_plugins.add(plugin)
for folder in app.plugin_searchpath:
if not path.isdir(folder):
continue
for filename in listdir(folder):
full_name = path.join(folder, filename)
if path.isdir(full_name) and \
path.isfile(path.join(full_name, 'metadata.txt')) and \
filename not in found_plugins:
found_plugins.add(filename)
yield Plugin(app, str(filename), path.abspath(full_name),
filename in enabled_plugins)
def install_package(app, package):
"""Install a plugin from a package to the instance plugin folder."""
from zipfile import ZipFile, error as BadZipFile
import py_compile
try:
f = ZipFile(package)
except (IOError, BadZipFile):
raise InstallationError('invalid')
# get the package version
try:
package_version = int(f.read('ZINE_PACKAGE'))
plugin_name = f.read('ZINE_PLUGIN')
except (KeyError, ValueError):
raise InstallationError('invalid')
# check if the package version is handleable
if package_version > PACKAGE_VERSION:
raise InstallationError('version')
# check if there is already a plugin with the same name
plugin_path = path.join(app.instance_folder, 'plugins', plugin_name)
if path.exists(plugin_path):
raise InstallationError('exists')
# make sure that we have a folder
try:
makedirs(plugin_path)
except (IOError, OSError):
pass
# now read all the files and write them to the folder
for filename in f.namelist():
if not filename.startswith('pdata/'):
continue
dst_filename = path.join(plugin_path, *filename[6:].split('/'))
try:
makedirs(path.dirname(dst_filename))
except (IOError, OSError):
pass
try:
dst = file(dst_filename, 'wb')
except IOError:
raise InstallationError('ioerror')
try:
dst.write(f.read(filename))
finally:
dst.close()
if filename.endswith('.py'):
py_compile.compile(dst_filename)
plugin = Plugin(app, plugin_name, plugin_path, False)
app.plugins[plugin_name] = plugin
app.cfg.touch()
return plugin
def get_package_metadata(package):
"""Get the metadata of a plugin in a package. Pass it a filepointer or
filename. Raises a `ValueError` if the package is not valid.
"""
from zipfile import ZipFile, error as BadZipFile
try:
f = ZipFile(package)
except (IOError, BadZipFile):
raise ValueError('not a valid package')
# get the package version and name
try:
package_version = int(f.read('ZINE_PACKAGE'))
plugin_name = f.read('ZINE_PLUGIN')
except (KeyError, ValueError):
raise ValueError('not a valid package')
if package_version > PACKAGE_VERSION:
raise ValueError('incompatible package version')
try:
metadata = parse_metadata(f.read('pdata/metadata.txt'))
except KeyError:
metadata = {}
metadata['uid'] = plugin_name
return metadata
def parse_metadata(string_or_fp):
"""Parse the metadata and return it as metadata object."""
result = {}
translations = {}
if isinstance(string_or_fp, basestring):
fileiter = iter(string_or_fp.splitlines(True))
else:
fileiter = iter(string_or_fp.readline, '')
fileiter = (line.decode('utf-8') for line in fileiter)
for line in fileiter:
line = line.strip()
if not line or line.startswith('#'):
continue
if not ':' in line:
key = line.strip()
value = ''
else:
key, value = line.split(':', 1)
while value.endswith('\\'):
try:
value = value[:-1] + fileiter.next().rstrip('\n')
except StopIteration:
pass
key = '_'.join(key.lower().split()).encode('ascii', 'ignore')
value = value.lstrip()
match = _i18n_key_re.match(key)
if match is not None:
key, lang = match.groups()
translations.setdefault(lang, {})[key] = value
else:
result[key] = value
return MetaData(result, translations)
class MetaData(object):
"""Holds metadata. This object has a dict like interface to the metadata
from the file and will return the values for the current language by
default. It's however possible to get an "untranslated" version of the
metadata by calling the `untranslated` method.
"""
def __init__(self, values, i18n_values=None):
self._values = values
self._i18n_values = i18n_values or {}
def untranslated(self):
"""Return a metadata object without translations."""
return MetaData(self._values)
def __getitem__(self, name):
locale = str(get_application().locale)
if name in self._i18n_values.get(locale, ()):
return self._i18n_values[locale][name]
if name in self._values:
return self._values[name]
raise KeyError(name)
def get(self, name, default=None):
"""Return a key or the default value if no value exists."""
try:
return self[name]
except KeyError:
return default
def __contains__(self, name):
try:
self[name]
except KeyError:
return False
return True
def _dict_method(name):
def proxy(self):
return getattr(self.as_dict(), name)()
proxy.__name__ = name
proxy.__doc__ = getattr(dict, name).__doc__
return proxy
__iter__ = iterkeys = _dict_method('iterkeys')
itervalues = _dict_method('itervalues')
iteritems = _dict_method('iteritems')
keys = _dict_method('keys')
values = _dict_method('values')
items = _dict_method('items')
del _dict_method
def as_dict(self):
result = self._values.copy()
result.update(self._i18n_values.get(str(get_application().locale), {}))
return result
class InstallationError(UserException):
"""Raised during plugin installation."""
MESSAGES = {
'invalid': lazy_gettext('Could not install the plugin because the '
'uploaded file is not a valid plugin file.'),
'version': lazy_gettext('The plugin uploaded has a newer package '
'version than this Zine installation '
'can handle.'),
'exists': lazy_gettext('A plugin with the same UID is already '
'installed. Aborted.'),
'ioerror': lazy_gettext('Could not install the package because the '
'installer wasn\'t able to write the package '
'information. Wrong permissions?')
}
def __init__(self, code):
UserException.__init__(self, self.MESSAGES[code])
self.code = code
class SetupError(UserException):
"""Raised by plugins if they want to stop their setup. If a plugin raises
a `SetupError` during the init, it will be disabled automatically.
"""
def make_setup_error(exc_info=None):
"""Create a new SetupError for the last exception and log it."""
if exc_info is None:
exc_info = sys.exc_info()
# log the exception
log.exception(_(u'Plugin setup error'), 'pluginsystem', exc_info)
exc_type, exc_value, tb = exc_info
# if the exception is already a SetupError we only
# have to return it unchanged.
if isinstance(exc_value, SetupError):
return exc_value
# otherwise create an error message for it and return a new
# exception.
error, (filename, line) = summarize_exception(exc_info)
return SetupError(_(u'Exception happend on setup: '
u'%(error)s (%(file)s, line %(line)d)') % {
'error': escape(error),
'file': filename,
'line': line
})
class Plugin(object):
"""Wraps a plugin module."""
def __init__(self, app, name, path_, active):
self.app = app
self.name = name
self.path = path_
self.active = active
self.instance_plugin = path.commonprefix([
path.realpath(path_), path.realpath(app.plugin_folder)]) == \
app.plugin_folder
self.setup_error = None
def remove(self):
"""Remove the plugin from the instance folder."""
if not self.instance_plugin:
raise ValueError('cannot remove non instance-plugins')
if self.active:
raise ValueError('cannot remove active plugin')
rmtree(self.path)
del self.app.plugins[self.name]
def dump(self, fp):
"""Dump the plugin as package into the filepointer or file."""
from zipfile import ZipFile, ZipInfo
f = ZipFile(fp, 'w')
# write all files into a "pdata/" folder
offset = len(self.path) + 1
for dirpath, dirnames, filenames in walk(self.path):
# don't recurse into hidden dirs
for i in range(len(dirnames)-1, -1, -1):
if dirnames[i].startswith('.'):
del dirnames[i]
for filename in filenames:
if filename.endswith('.pyc') or \
filename.endswith('.pyo'):
continue
f.write(path.join(dirpath, filename),
path.join('pdata', dirpath[offset:], filename))
# add the package information files
for name, data in [('ZINE_PLUGIN', self.name),
('ZINE_PACKAGE', PACKAGE_VERSION)]:
zinfo = ZipInfo(name, localtime(time()))
zinfo.compress_type = f.compression
zinfo.external_attr = (33188 & 0xFFFF) << 16L
f.writestr(zinfo, str(data))
f.close()
@cached_property
def metadata(self):
try:
f = file(path.join(self.path, 'metadata.txt'))
except IOError:
return {}
try:
return parse_metadata(f)
finally:
f.close()
@cached_property
def translations(self):
"""The translations for this application."""
locale_path = path.join(self.path, 'i18n')
return Translations.load(locale_path, self.app.cfg['language'])
@cached_property
def is_documented(self):
"""This property is True if the plugin has documentation."""
for lang in self.app.cfg['language'], 'en':
if path.isfile(path.join(self.path, 'docs', lang, 'index.page')):
return True
return False
@cached_property
def is_bundled(self):
"""This property is True if the plugin is bundled with Zine."""
return path.commonprefix([
path.realpath(self.path), path.realpath(BUILTIN_PLUGIN_FOLDER)]) == \
BUILTIN_PLUGIN_FOLDER
@cached_property
def module(self):
"""The module of the plugin. The first access imports it."""
try:
# we directly import from the zine module space
return __import__('zine.plugins.%s' % self.name, None, None,
['setup'])
except:
if not self.app.cfg['plugin_guard']:
raise
self.setup_error = make_setup_error()
@property
def display_name(self):
"""The full name from the metadata."""
return self.metadata.get('name', self.name)
@property
def filesystem_name(self):
"""The human readable package name for the filesystem."""
string = self.metadata.untranslated().get('name', self.name)
return ''.join(string.split())
@property
def html_display_name(self):
"""The display name as HTML link."""
link = self.plugin_url
if link:
return u'<a href="%s">%s</a>' % (
escape(link),
escape(self.display_name)
)
return escape(self.display_name)
@property
def plugin_url(self):
"""Return the URL of the plugin."""
return self.metadata.get('plugin_url')
@property
def description(self):
"""Return the description of the plugin."""
return self.metadata.get('description', u'')
@property
def has_author(self):
"""Does the plugin has an author at all?"""
return 'author' in self.metadata
@property
def author_info(self):
"""The author, mail and author URL of the plugin."""
return split_email(self.metadata.get('author', u'Nobody')) + \
(self.metadata.get('author_url'),)
@property
def contributors(self):
"""The Contributors of the plugin."""
data = self.metadata.get('contributors', '')
if not data:
return []
return [split_email(c.strip()) for c in
self.metadata.get('contributors', '').split(',')]
@property
def html_contributors_info(self):
result = []
for contributor in self.contributors:
name, contact = contributor
if not contact:
result.append(escape(name))
else:
result.append('<a href="%s">%s</a>' % (
escape(check(is_valid_email, contact) and
'mailto:' + contact or contact),
escape(name)
))
return u', '.join(result)
@property
def html_author_info(self):
"""Return the author info as html link."""
name, email, url = self.author_info
if not url:
if not email:
return escape(name)
url = 'mailto:%s' % quote(email)
return u'<a href="%s">%s</a>' % (
escape(url),
escape(name)
)
@property
def author(self):
"""Return the author of the plugin."""
x = self.author_info
return x[0] or x[1]
@property
def author_email(self):
"""Return the author email address of the plugin."""
return self.author_info[1]
@property
def author_url(self):
"""Return the URL of the author of the plugin."""
return self.author_info[2]
@property
def version(self):
"""The version of the plugin."""
return self.metadata.get('version')
@property
def depends(self):
"""A list of depenencies for this plugin.
Plugins listed here won't be loaded automaticly.
"""
depends = self.metadata.get('depends', '').strip()
return filter(None, [x.strip() for x in depends.split(',')])
def setup(self):
"""Setup the plugin."""
try:
self.module.setup(self.app, self)
except:
if self.setup_error is None:
self.setup_error = make_setup_error()
if not self.app.cfg['plugin_guard']:
raise
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name
)
def set_plugin_searchpath(searchpath):
"""Set the plugin searchpath for the plugin pseudo package."""
_plugins.__path__ = searchpath
# the application imports this on setup and modifies it
sys.modules['zine.plugins'] = _plugins = ModuleType('zine.plugins')
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
from random import random
from time import time
from os.path import join
from swift import gettext_ as _
import hashlib
from eventlet import sleep, Timeout
from eventlet.greenpool import GreenPool
from swift.common.daemon import Daemon
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.utils import get_logger, dump_recon_cache, split_path
from swift.common.http import HTTP_NOT_FOUND, HTTP_CONFLICT, \
HTTP_PRECONDITION_FAILED
from swift.container.reconciler import direct_delete_container_entry
MAX_OBJECTS_TO_CACHE = 100000
class ObjectExpirer(Daemon):
"""
Daemon that queries the internal hidden expiring_objects_account to
discover objects that need to be deleted.
:param conf: The daemon configuration.
"""
def __init__(self, conf, logger=None, swift=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-expirer')
self.interval = int(conf.get('interval') or 300)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
request_tries = int(conf.get('request_tries') or 3)
self.swift = swift or InternalClient(
conf_path, 'Swift Object Expirer', request_tries)
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, 'object.recon')
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass completed in %ds; %d objects expired') %
(elapsed, self.report_objects))
dump_recon_cache({'object_expiration_pass': elapsed,
'expired_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass so far %ds; %d objects expired') %
(elapsed, self.report_objects))
self.report_last_time = time()
def iter_cont_objs_to_expire(self):
"""
Yields (container, obj) tuples to be deleted
"""
obj_cache = {}
cnt = 0
all_containers = set()
for c in self.swift.iter_containers(self.expiring_objects_account):
container = str(c['name'])
timestamp = int(container)
if timestamp > int(time()):
break
all_containers.add(container)
for o in self.swift.iter_objects(self.expiring_objects_account,
container):
obj = o['name'].encode('utf8')
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
try:
cust_account, cust_cont, cust_obj = \
split_path('/' + actual_obj, 3, 3, True)
cache_key = '%s/%s' % (cust_account, cust_cont)
except ValueError:
cache_key = None
if self.processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % self.processes != self.process:
continue
if cache_key not in obj_cache:
obj_cache[cache_key] = []
obj_cache[cache_key].append((container, obj))
cnt += 1
if cnt > MAX_OBJECTS_TO_CACHE:
while obj_cache:
for key in obj_cache.keys():
if obj_cache[key]:
yield obj_cache[key].pop()
cnt -= 1
else:
del obj_cache[key]
while obj_cache:
for key in obj_cache.keys():
if obj_cache[key]:
yield obj_cache[key].pop()
else:
del obj_cache[key]
for container in all_containers:
yield (container, None)
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = set([])
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug('Run begin')
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for container, obj in self.iter_cont_objs_to_expire():
containers_to_delete.add(container)
if not obj:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %s %s') %
(container, str(err)))
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
def run_forever(self, *args, **kwargs):
"""
Executes passes forever, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon has no additional keyword args.
"""
sleep(random() * self.interval)
while True:
begin = time()
try:
self.run_once(*args, **kwargs)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
elapsed = time() - begin
if elapsed < self.interval:
sleep(random() * (self.interval - elapsed))
def get_process_values(self, kwargs):
"""
Sets self.processes and self.process from the kwargs if those
values exist, otherwise, leaves those values as they were set in
the config file.
:param kwargs: Keyword args passed into the run_forever(), run_once()
methods. They have values specified on the command
line when the daemon is run.
"""
if kwargs.get('processes') is not None:
self.processes = int(kwargs['processes'])
if kwargs.get('process') is not None:
self.process = int(kwargs['process'])
if self.process < 0:
raise ValueError(
'process must be an integer greater than or equal to 0')
if self.processes < 0:
raise ValueError(
'processes must be an integer greater than or equal to 0')
if self.processes and self.process >= self.processes:
raise ValueError(
'process must be less than or equal to processes')
def delete_object(self, actual_obj, timestamp, container, obj):
start_time = time()
try:
try:
self.delete_actual_object(actual_obj, timestamp)
except UnexpectedResponse as err:
if err.resp.status_int != HTTP_NOT_FOUND:
raise
if float(timestamp) > time() - self.reclaim_age:
# we'll have to retry the DELETE later
raise
self.pop_queue(container, obj)
self.report_objects += 1
self.logger.increment('objects')
except (Exception, Timeout) as err:
self.logger.increment('errors')
self.logger.exception(
_('Exception while deleting object %s %s %s') %
(container, obj, str(err)))
self.logger.timing_since('timing', start_time)
self.report()
def pop_queue(self, container, obj):
"""
Issue a delete object request to the container for the expiring object
queue entry.
"""
direct_delete_container_entry(self.swift.container_ring,
self.expiring_objects_account,
container, obj)
def delete_actual_object(self, actual_obj, timestamp):
"""
Deletes the end-user object indicated by the actual object name given
'<account>/<container>/<object>' if and only if the X-Delete-At value
of the object is exactly the timestamp given.
:param actual_obj: The name of the end-user object to delete:
'<account>/<container>/<object>'
:param timestamp: The timestamp the X-Delete-At value must match to
perform the actual delete.
"""
path = '/v1/' + urllib.quote(actual_obj.lstrip('/'))
self.swift.make_request('DELETE', path,
{'X-If-Delete-At': str(timestamp)},
(2, HTTP_PRECONDITION_FAILED))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities to be used in Interactive Beam.
"""
from __future__ import absolute_import
import hashlib
import logging
import pandas as pd
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
def to_element_list(
reader, # type: Generator[Union[TestStreamPayload.Event, WindowedValueHolder]]
coder, # type: Coder
include_window_info # type: bool
):
# type: (...) -> List[WindowedValue]
"""Returns an iterator that properly decodes the elements from the reader.
"""
for e in reader:
if isinstance(e, TestStreamPayload.Event):
if (e.HasField('watermark_event') or e.HasField('processing_time_event')):
continue
else:
for tv in e.element_event.elements:
decoded = coder.decode(tv.encoded_element)
yield (
decoded.windowed_value
if include_window_info else decoded.windowed_value.value)
else:
yield e.windowed_value if include_window_info else e.windowed_value.value
def elements_to_df(elements, include_window_info=False):
# type: (List[WindowedValue], bool) -> DataFrame
"""Parses the given elements into a Dataframe.
If the elements are a list of WindowedValues, then it will break out the
elements into their own DataFrame and return it. If include_window_info is
True, then it will concatenate the windowing information onto the elements
DataFrame.
"""
rows = []
windowed_info = []
for e in elements:
rows.append(e.value)
if include_window_info:
windowed_info.append([e.timestamp.micros, e.windows, e.pane_info])
rows_df = pd.DataFrame(rows)
if include_window_info:
windowed_info_df = pd.DataFrame(
windowed_info, columns=['event_time', 'windows', 'pane_info'])
final_df = pd.concat([rows_df, windowed_info_df], axis=1)
else:
final_df = rows_df
return final_df
def register_ipython_log_handler():
# type: () -> None
"""Adds the IPython handler to a dummy parent logger (named
'apache_beam.runners.interactive') of all interactive modules' loggers so that
if is_in_notebook, logging displays the logs as HTML in frontends.
"""
# apache_beam.runners.interactive is not a module, thus this "root" logger is
# a dummy one created to hold the IPython log handler. When children loggers
# have propagate as True (by default) and logging level as NOTSET (by default,
# so the "root" logger's logging level takes effect), the IPython log handler
# will be triggered at the "root"'s own logging level. And if a child logger
# sets its logging level, it can take control back.
interactive_root_logger = logging.getLogger('apache_beam.runners.interactive')
if any([isinstance(h, IPythonLogHandler)
for h in interactive_root_logger.handlers]):
return
interactive_root_logger.setLevel(logging.INFO)
interactive_root_logger.addHandler(IPythonLogHandler())
# Disable the propagation so that logs emitted from interactive modules should
# only be handled by loggers and handlers defined within interactive packages.
interactive_root_logger.propagate = False
class IPythonLogHandler(logging.Handler):
"""A logging handler to display logs as HTML in IPython backed frontends."""
# TODO(BEAM-7923): Switch to Google hosted CDN once
# https://code.google.com/archive/p/google-ajax-apis/issues/637 is resolved.
log_template = """
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<div class="alert alert-{level}">{msg}</div>"""
logging_to_alert_level_map = {
logging.CRITICAL: 'danger',
logging.ERROR: 'danger',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'dark',
logging.NOTSET: 'light'
}
def emit(self, record):
try:
from html import escape
from IPython.core.display import HTML
from IPython.core.display import display
display(
HTML(
self.log_template.format(
level=self.logging_to_alert_level_map[record.levelno],
msg=escape(record.msg % record.args))))
except ImportError:
pass # NOOP when dependencies are not available.
def obfuscate(*inputs):
# type: (*Any) -> str
"""Obfuscates any inputs into a hexadecimal string."""
str_inputs = [str(input) for input in inputs]
merged_inputs = '_'.join(str_inputs)
return hashlib.md5(merged_inputs.encode('utf-8')).hexdigest()
class ProgressIndicator(object):
"""An indicator visualizing code execution in progress."""
# TODO(BEAM-7923): Switch to Google hosted CDN once
# https://code.google.com/archive/p/google-ajax-apis/issues/637 is resolved.
spinner_template = """
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<div id="{id}" class="spinner-border text-info" role="status">
</div>"""
spinner_removal_template = """
$("#{id}").remove();"""
def __init__(self, enter_text, exit_text):
# type: (str, str) -> None
self._id = 'progress_indicator_{}'.format(obfuscate(id(self)))
self._enter_text = enter_text
self._exit_text = exit_text
def __enter__(self):
try:
from IPython.core.display import HTML
from IPython.core.display import display
from apache_beam.runners.interactive import interactive_environment as ie
if ie.current_env().is_in_notebook:
display(HTML(self.spinner_template.format(id=self._id)))
else:
display(self._enter_text)
except ImportError:
pass # NOOP when dependencies are not available.
def __exit__(self, exc_type, exc_value, traceback):
try:
from IPython.core.display import Javascript
from IPython.core.display import display
from IPython.core.display import display_javascript
from apache_beam.runners.interactive import interactive_environment as ie
if ie.current_env().is_in_notebook:
script = self.spinner_removal_template.format(id=self._id)
display_javascript(
Javascript(
ie._JQUERY_WITH_DATATABLE_TEMPLATE.format(
customized_script=script)))
else:
display(self._exit_text)
except ImportError:
pass # NOOP when dependencies are not avaialble.
def progress_indicated(func):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""A decorator using a unique progress indicator as a context manager to
execute the given function within."""
def run_within_progress_indicator(*args, **kwargs):
with ProgressIndicator('Processing...', 'Done.'):
return func(*args, **kwargs)
return run_within_progress_indicator
|
|
"""Support for Mikrotik routers as device tracker."""
import logging
import ssl
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME, CONF_PORT, CONF_SSL, CONF_METHOD)
_LOGGER = logging.getLogger(__name__)
MTK_DEFAULT_API_PORT = '8728'
MTK_DEFAULT_API_SSL_PORT = '8729'
CONF_LOGIN_METHOD = 'login_method'
MTK_LOGIN_PLAIN = 'plain'
MTK_LOGIN_TOKEN = 'token'
CONF_ENCODING = 'encoding'
DEFAULT_ENCODING = 'utf-8'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_METHOD): cv.string,
vol.Optional(CONF_LOGIN_METHOD):
vol.Any(MTK_LOGIN_PLAIN, MTK_LOGIN_TOKEN),
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string,
})
def get_scanner(hass, config):
"""Validate the configuration and return MTikScanner."""
scanner = MikrotikScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class MikrotikScanner(DeviceScanner):
"""This class queries a Mikrotik router."""
def __init__(self, config):
"""Initialize the scanner."""
self.last_results = {}
self.host = config[CONF_HOST]
self.ssl = config[CONF_SSL]
try:
self.port = config[CONF_PORT]
except KeyError:
if self.ssl:
self.port = MTK_DEFAULT_API_SSL_PORT
else:
self.port = MTK_DEFAULT_API_PORT
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.login_method = config.get(CONF_LOGIN_METHOD)
self.method = config.get(CONF_METHOD)
self.encoding = config[CONF_ENCODING]
self.connected = False
self.success_init = False
self.client = None
self.wireless_exist = None
self.success_init = self.connect_to_device()
if self.success_init:
_LOGGER.info("Start polling Mikrotik (%s) router...", self.host)
self._update_info()
else:
_LOGGER.error("Connection to Mikrotik (%s) failed", self.host)
def connect_to_device(self):
"""Connect to Mikrotik method."""
import librouteros
from librouteros.login import login_plain, login_token
if self.login_method == MTK_LOGIN_PLAIN:
login_method = (login_plain,)
elif self.login_method == MTK_LOGIN_TOKEN:
login_method = (login_token,)
else:
login_method = (login_plain, login_token)
try:
kwargs = {
'port': self.port,
'encoding': self.encoding,
'login_methods': login_method
}
if self.ssl:
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
kwargs['ssl_wrapper'] = ssl_context.wrap_socket
self.client = librouteros.connect(
self.host,
self.username,
self.password,
**kwargs
)
try:
routerboard_info = self.client(
cmd='/system/routerboard/getall')
except (librouteros.exceptions.TrapError,
librouteros.exceptions.MultiTrapError,
librouteros.exceptions.ConnectionError):
routerboard_info = None
raise
if routerboard_info:
_LOGGER.info(
"Connected to Mikrotik %s with IP %s",
routerboard_info[0].get('model', 'Router'), self.host)
self.connected = True
try:
self.capsman_exist = self.client(
cmd='/caps-man/interface/getall')
except (librouteros.exceptions.TrapError,
librouteros.exceptions.MultiTrapError,
librouteros.exceptions.ConnectionError):
self.capsman_exist = False
if not self.capsman_exist:
_LOGGER.info(
"Mikrotik %s: Not a CAPSman controller. Trying "
"local interfaces", self.host)
try:
self.wireless_exist = self.client(
cmd='/interface/wireless/getall')
except (librouteros.exceptions.TrapError,
librouteros.exceptions.MultiTrapError,
librouteros.exceptions.ConnectionError):
self.wireless_exist = False
if not self.wireless_exist and not self.capsman_exist \
or self.method == 'ip':
_LOGGER.info(
"Mikrotik %s: Wireless adapters not found. Try to "
"use DHCP lease table as presence tracker source. "
"Please decrease lease time as much as possible",
self.host)
if self.method:
_LOGGER.info(
"Mikrotik %s: Manually selected polling method %s",
self.host, self.method)
except (librouteros.exceptions.TrapError,
librouteros.exceptions.MultiTrapError,
librouteros.exceptions.ConnectionError) as api_error:
_LOGGER.error("Connection error: %s", api_error)
return self.connected
def scan_devices(self):
"""Scan for new devices and return a list with found device MACs."""
import librouteros
try:
self._update_info()
except (librouteros.exceptions.TrapError,
librouteros.exceptions.MultiTrapError,
librouteros.exceptions.ConnectionError) as api_error:
_LOGGER.error("Connection error: %s", api_error)
self.connect_to_device()
return [device for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
return self.last_results.get(device)
def _update_info(self):
"""Retrieve latest information from the Mikrotik box."""
if self.method:
devices_tracker = self.method
else:
if self.capsman_exist:
devices_tracker = 'capsman'
elif self.wireless_exist:
devices_tracker = 'wireless'
else:
devices_tracker = 'ip'
_LOGGER.debug(
"Loading %s devices from Mikrotik (%s) ...",
devices_tracker, self.host)
device_names = self.client(cmd='/ip/dhcp-server/lease/getall')
if devices_tracker == 'capsman':
devices = self.client(
cmd='/caps-man/registration-table/getall')
elif devices_tracker == 'wireless':
devices = self.client(
cmd='/interface/wireless/registration-table/getall')
else:
devices = device_names
if device_names is None and devices is None:
return False
mac_names = {device.get('mac-address'): device.get('host-name')
for device in device_names if device.get('mac-address')}
if devices_tracker in ('wireless', 'capsman'):
self.last_results = {
device.get('mac-address'):
mac_names.get(device.get('mac-address'))
for device in devices}
else:
self.last_results = {
device.get('mac-address'):
mac_names.get(device.get('mac-address'))
for device in device_names if device.get('active-address')}
return True
|
|
from django.utils.translation import ugettext
from livesettings import values
from livesettings.exceptions import SettingNotSet
from livesettings.utils import is_string_like
import logging
import warnings
log = logging.getLogger('configuration')
_NOTSET = object()
class ConfigurationSettings(object):
_instance = None
def __new__(cls, *args, **kwargs):
# for backwards compatibility, make this a singleton.
if ConfigurationSettings._instance is None:
instance = ConfigurationSettings._instance = super(ConfigurationSettings, cls).__new__(cls, *args, **kwargs)
instance.settings = values.SortedDotDict()
instance.prereg = {}
else:
warnings.warn("The ConfigurationSettings singleton is deprecated. Use livesettings.configuration_settings instead", DeprecationWarning)
return ConfigurationSettings._instance
def __getitem__(self, key):
"""Get an element either by ConfigurationGroup object or by its key"""
key = self._resolve_key(key)
return self.settings.get(key)
def __getattr__(self, key):
"""Get an element either by ConfigurationGroup object or by its key"""
try:
return self[key]
except KeyError:
raise AttributeError, key
def __iter__(self):
for v in self.groups():
yield v
def __len__(self):
return len(self.settings)
def __contains__(self, key):
key = self._resolve_key(key)
return key in self.settings
def _resolve_key(self, raw):
if is_string_like(raw):
key = raw
elif isinstance(raw, values.ConfigurationGroup):
key = raw.key
else:
group = self.groups()[raw]
key = group.key
return key
def get_config(self, group, key):
try:
if isinstance(group, values.ConfigurationGroup):
group = group.key
cg = self.settings.get(group, None)
if not cg:
raise SettingNotSet('%s config group does not exist' % group)
else:
return cg[key]
except KeyError:
raise SettingNotSet('%s.%s' % (group, key))
def groups(self):
"""Return ordered list"""
values = self.settings.values()
values.sort()
return values
def has_config(self, group, key):
if isinstance(group, values.ConfigurationGroup):
group = group.key
cfg = self.settings.get(group, None)
if cfg and key in cfg:
return True
else:
return False
def preregister_choice(self, group, key, choice):
"""Setup a choice for a group/key which hasn't been instantiated yet."""
k = (group, key)
if self.prereg.has_key(k):
self.prereg[k].append(choice)
else:
self.prereg[k] = [choice]
def register(self, value):
g = value.group
if not isinstance(g, values.ConfigurationGroup):
raise ValueError('value.group should be an instance of ConfigurationGroup')
groupkey = g.key
valuekey = value.key
k = (groupkey, valuekey)
if self.prereg.has_key(k):
for choice in self.prereg[k]:
value.add_choice(choice)
if not groupkey in self.settings:
self.settings[groupkey] = g
self.settings[groupkey][valuekey] = value
return value
def __unicode__(self):
return u"ConfigurationSettings: " + unicode(self.groups())
configuration_settings = ConfigurationSettings()
def config_exists(group, key):
"""Test to see if a setting has been registered"""
return configuration_settings.has_config(group, key)
def config_get(group, key):
"""Get a configuration setting"""
try:
return configuration_settings.get_config(group, key)
except SettingNotSet:
log.debug('SettingNotSet: %s.%s', group, key)
raise
def config_get_group(group):
return configuration_settings[group]
def config_collect_values(group, groupkey, key, unique=True, skip_missing=True):
"""Look up (group, groupkey) from config, then take the values returned and
use them as groups for a second-stage lookup.
For example:
config_collect_values(PAYMENT, MODULES, CREDITCHOICES)
Stage 1: ['PAYMENT_GOOGLE', 'PAYMENT_AUTHORIZENET']
Stage 2: config_value('PAYMENT_GOOGLE', 'CREDITCHOICES')
+ config_value('PAYMENT_AUTHORIZENET', 'CREDITCHOICES')
Stage 3: (if unique is true) remove dupes
"""
groups = config_value(group, groupkey)
ret = []
for g in groups:
try:
ret.append(config_value(g, key))
except KeyError:
if not skip_missing:
raise SettingNotSet('No config %s.%s' % (g, key))
if unique:
out = []
for x in ret:
if not x in out:
out.append(x)
ret = out
return ret
def config_register(value):
"""Register a value or values.
Parameters:
-A Value
"""
return configuration_settings.register(value)
def config_register_list(*args):
for value in args:
config_register(value)
def config_value(group, key, default=_NOTSET):
"""Get a value from the configuration system"""
try:
return config_get(group, key).value
except SettingNotSet:
if default != _NOTSET:
return default
raise
def config_value_safe(group, key, default_value):
"""Get a config value with a default fallback, safe for use during SyncDB."""
raw = default_value
try:
raw = config_value(group, key)
except SettingNotSet:
pass
except ImportError:
log.warn("Error getting %s.%s, OK if you are in SyncDB.", group, key)
return raw
def config_choice_values(group, key, skip_missing=True, translate=False):
"""Get pairs of key, label from the setting."""
try:
cfg = config_get(group, key)
choices = cfg.choice_values
except SettingNotSet:
if skip_missing:
return []
else:
raise SettingNotSet('%s.%s' % (group, key))
if translate:
choices = [(k, ugettext(v)) for k, v in choices]
return choices
def config_add_choice(group, key, choice):
"""Add a choice to a value"""
if config_exists(group, key):
cfg = config_get(group, key)
cfg.add_choice(choice)
else:
configuration_settings.preregister_choice(group, key, choice)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provisions Android devices with settings required for bots.
Usage:
./provision_devices.py [-d <device serial number>]
"""
import argparse
import datetime
import json
import logging
import os
import posixpath
import re
import subprocess
import sys
import time
from devil.android import battery_utils
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_temp_file
from devil.android import device_utils
from devil.android.sdk import version_codes
from devil.utils import run_tests_helper
from devil.utils import timeout_retry
from pylib import constants
from pylib import device_settings
_SYSTEM_WEBVIEW_PATHS = ['/system/app/webview', '/system/app/WebViewGoogle']
_CHROME_PACKAGE_REGEX = re.compile('.*chrom.*')
_TOMBSTONE_REGEX = re.compile('tombstone.*')
class _DEFAULT_TIMEOUTS(object):
# L can take a while to reboot after a wipe.
LOLLIPOP = 600
PRE_LOLLIPOP = 180
HELP_TEXT = '{}s on L, {}s on pre-L'.format(LOLLIPOP, PRE_LOLLIPOP)
class _PHASES(object):
WIPE = 'wipe'
PROPERTIES = 'properties'
FINISH = 'finish'
ALL = [WIPE, PROPERTIES, FINISH]
def ProvisionDevices(args):
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
if args.device:
devices = [d for d in devices if d == args.device]
if not devices:
raise device_errors.DeviceUnreachableError(args.device)
parallel_devices = device_utils.DeviceUtils.parallel(devices)
parallel_devices.pMap(ProvisionDevice, blacklist, args)
if args.auto_reconnect:
_LaunchHostHeartbeat()
blacklisted_devices = blacklist.Read() if blacklist else []
if args.output_device_blacklist:
with open(args.output_device_blacklist, 'w') as f:
json.dump(blacklisted_devices, f)
if all(d in blacklisted_devices for d in devices):
raise device_errors.NoDevicesError
return 0
def ProvisionDevice(device, blacklist, options):
if options.reboot_timeout:
reboot_timeout = options.reboot_timeout
elif device.build_version_sdk >= version_codes.LOLLIPOP:
reboot_timeout = _DEFAULT_TIMEOUTS.LOLLIPOP
else:
reboot_timeout = _DEFAULT_TIMEOUTS.PRE_LOLLIPOP
def should_run_phase(phase_name):
return not options.phases or phase_name in options.phases
def run_phase(phase_func, reboot=True):
try:
device.WaitUntilFullyBooted(timeout=reboot_timeout, retries=0)
except device_errors.CommandTimeoutError:
logging.error('Device did not finish booting. Will try to reboot.')
device.Reboot(timeout=reboot_timeout)
phase_func(device, options)
if reboot:
device.Reboot(False, retries=0)
device.adb.WaitForDevice()
try:
CheckExternalStorage(device)
if should_run_phase(_PHASES.WIPE):
if options.chrome_specific_wipe:
run_phase(WipeChromeData)
else:
run_phase(WipeDevice)
if should_run_phase(_PHASES.PROPERTIES):
run_phase(SetProperties)
if should_run_phase(_PHASES.FINISH):
run_phase(FinishProvisioning, reboot=False)
if options.chrome_specific_wipe:
package = "com.google.android.gms"
version_name = device.GetApplicationVersion(package)
logging.info("Version name for %s is %s", package, version_name)
except device_errors.CommandTimeoutError:
logging.exception('Timed out waiting for device %s. Adding to blacklist.',
str(device))
blacklist.Extend([str(device)])
except device_errors.CommandFailedError:
logging.exception('Failed to provision device %s. Adding to blacklist.',
str(device))
blacklist.Extend([str(device)])
def CheckExternalStorage(device):
"""Checks that storage is writable and if not makes it writable.
Arguments:
device: The device to check.
"""
try:
with device_temp_file.DeviceTempFile(
device.adb, suffix='.sh', dir=device.GetExternalStoragePath()):
pass
except device_errors.CommandFailedError:
logging.info('External storage not writable. Remounting / as RW')
device.RunShellCommand(['mount', '-o', 'remount,rw', '/'],
check_return=True, as_root=True)
with device_temp_file.DeviceTempFile(
device.adb, suffix='.sh', dir=device.GetExternalStoragePath()):
pass
def WipeChromeData(device, options):
"""Wipes chrome specific data from device
(1) uninstall any app whose name matches *chrom*, except
com.android.chrome, which is the chrome stable package. Doing so also
removes the corresponding dirs under /data/data/ and /data/app/
(2) remove any dir under /data/app-lib/ whose name matches *chrom*
(3) remove any files under /data/tombstones/ whose name matches "tombstone*"
(4) remove /data/local.prop if there is any
(5) remove /data/local/chrome-command-line if there is any
(6) remove anything under /data/local/.config/ if the dir exists
(this is telemetry related)
(7) remove anything under /data/local/tmp/
Arguments:
device: the device to wipe
"""
if options.skip_wipe:
return
try:
device.EnableRoot()
_UninstallIfMatch(device, _CHROME_PACKAGE_REGEX,
constants.PACKAGE_INFO['chrome_stable'].package)
_WipeUnderDirIfMatch(device, '/data/app-lib/', _CHROME_PACKAGE_REGEX)
_WipeUnderDirIfMatch(device, '/data/tombstones/', _TOMBSTONE_REGEX)
_WipeFileOrDir(device, '/data/local.prop')
_WipeFileOrDir(device, '/data/local/chrome-command-line')
_WipeFileOrDir(device, '/data/local/.config/')
_WipeFileOrDir(device, '/data/local/tmp/')
device.RunShellCommand('rm -rf %s/*' % device.GetExternalStoragePath(),
check_return=True)
except device_errors.CommandFailedError:
logging.exception('Possible failure while wiping the device. '
'Attempting to continue.')
def WipeDevice(device, options):
"""Wipes data from device, keeping only the adb_keys for authorization.
After wiping data on a device that has been authorized, adb can still
communicate with the device, but after reboot the device will need to be
re-authorized because the adb keys file is stored in /data/misc/adb/.
Thus, adb_keys file is rewritten so the device does not need to be
re-authorized.
Arguments:
device: the device to wipe
"""
if options.skip_wipe:
return
try:
device.EnableRoot()
device_authorized = device.FileExists(constants.ADB_KEYS_FILE)
if device_authorized:
adb_keys = device.ReadFile(constants.ADB_KEYS_FILE,
as_root=True).splitlines()
device.RunShellCommand(['wipe', 'data'],
as_root=True, check_return=True)
device.adb.WaitForDevice()
if device_authorized:
adb_keys_set = set(adb_keys)
for adb_key_file in options.adb_key_files or []:
try:
with open(adb_key_file, 'r') as f:
adb_public_keys = f.readlines()
adb_keys_set.update(adb_public_keys)
except IOError:
logging.warning('Unable to find adb keys file %s.', adb_key_file)
_WriteAdbKeysFile(device, '\n'.join(adb_keys_set))
except device_errors.CommandFailedError:
logging.exception('Possible failure while wiping the device. '
'Attempting to continue.')
def _WriteAdbKeysFile(device, adb_keys_string):
dir_path = posixpath.dirname(constants.ADB_KEYS_FILE)
device.RunShellCommand(['mkdir', '-p', dir_path],
as_root=True, check_return=True)
device.RunShellCommand(['restorecon', dir_path],
as_root=True, check_return=True)
device.WriteFile(constants.ADB_KEYS_FILE, adb_keys_string, as_root=True)
device.RunShellCommand(['restorecon', constants.ADB_KEYS_FILE],
as_root=True, check_return=True)
def SetProperties(device, options):
try:
device.EnableRoot()
except device_errors.CommandFailedError as e:
logging.warning(str(e))
_ConfigureLocalProperties(device, options.enable_java_debug)
device_settings.ConfigureContentSettings(
device, device_settings.DETERMINISTIC_DEVICE_SETTINGS)
if options.disable_location:
device_settings.ConfigureContentSettings(
device, device_settings.DISABLE_LOCATION_SETTINGS)
else:
device_settings.ConfigureContentSettings(
device, device_settings.ENABLE_LOCATION_SETTINGS)
if options.disable_mock_location:
device_settings.ConfigureContentSettings(
device, device_settings.DISABLE_MOCK_LOCATION_SETTINGS)
else:
device_settings.ConfigureContentSettings(
device, device_settings.ENABLE_MOCK_LOCATION_SETTINGS)
device_settings.SetLockScreenSettings(device)
if options.disable_network:
device_settings.ConfigureContentSettings(
device, device_settings.NETWORK_DISABLED_SETTINGS)
if options.disable_system_chrome:
# The system chrome version on the device interferes with some tests.
device.RunShellCommand(['pm', 'disable', 'com.android.chrome'],
check_return=True)
if options.remove_system_webview:
if device.HasRoot():
# This is required, e.g., to replace the system webview on a device.
device.adb.Remount()
device.RunShellCommand(['stop'], check_return=True)
device.RunShellCommand(['rm', '-rf'] + _SYSTEM_WEBVIEW_PATHS,
check_return=True)
device.RunShellCommand(['start'], check_return=True)
else:
logging.warning('Cannot remove system webview from a non-rooted device')
def _ConfigureLocalProperties(device, java_debug=True):
"""Set standard readonly testing device properties prior to reboot."""
local_props = [
'persist.sys.usb.config=adb',
'ro.monkey=1',
'ro.test_harness=1',
'ro.audio.silent=1',
'ro.setupwizard.mode=DISABLED',
]
if java_debug:
local_props.append(
'%s=all' % device_utils.DeviceUtils.JAVA_ASSERT_PROPERTY)
local_props.append('debug.checkjni=1')
try:
device.WriteFile(
device.LOCAL_PROPERTIES_PATH,
'\n'.join(local_props), as_root=True)
# Android will not respect the local props file if it is world writable.
device.RunShellCommand(
['chmod', '644', device.LOCAL_PROPERTIES_PATH],
as_root=True, check_return=True)
except device_errors.CommandFailedError:
logging.exception('Failed to configure local properties.')
def FinishProvisioning(device, options):
if options.min_battery_level is not None:
try:
battery = battery_utils.BatteryUtils(device)
battery.ChargeDeviceToLevel(options.min_battery_level)
except device_errors.CommandFailedError:
logging.exception('Unable to charge device to specified level.')
if options.max_battery_temp is not None:
try:
battery = battery_utils.BatteryUtils(device)
battery.LetBatteryCoolToTemperature(options.max_battery_temp)
except device_errors.CommandFailedError:
logging.exception('Unable to let battery cool to specified temperature.')
def _set_and_verify_date():
if device.build_version_sdk >= version_codes.MARSHMALLOW:
date_format = '%m%d%H%M%Y.%S'
set_date_command = ['date']
else:
date_format = '%Y%m%d.%H%M%S'
set_date_command = ['date', '-s']
strgmtime = time.strftime(date_format, time.gmtime())
set_date_command.append(strgmtime)
device.RunShellCommand(set_date_command, as_root=True, check_return=True)
device_time = device.RunShellCommand(
['date', '+"%Y%m%d.%H%M%S"'], as_root=True,
single_line=True).replace('"', '')
device_time = datetime.datetime.strptime(device_time, "%Y%m%d.%H%M%S")
correct_time = datetime.datetime.strptime(strgmtime, date_format)
tdelta = (correct_time - device_time).seconds
if tdelta <= 1:
logging.info('Date/time successfully set on %s', device)
return True
else:
logging.error('Date mismatch. Device: %s Correct: %s',
device_time.isoformat(), correct_time.isoformat())
return False
# Sometimes the date is not set correctly on the devices. Retry on failure.
if not timeout_retry.WaitFor(_set_and_verify_date, wait_period=1,
max_tries=2):
raise device_errors.CommandFailedError(
'Failed to set date & time.', device_serial=str(device))
props = device.RunShellCommand('getprop', check_return=True)
for prop in props:
logging.info(' %s', prop)
if options.auto_reconnect:
_PushAndLaunchAdbReboot(device, options.target)
def _UninstallIfMatch(device, pattern, app_to_keep):
installed_packages = device.RunShellCommand(['pm', 'list', 'packages'])
for package_output in installed_packages:
package = package_output.split(":")[1]
if pattern.match(package) and not package == app_to_keep:
device.Uninstall(package)
def _WipeUnderDirIfMatch(device, path, pattern):
ls_result = device.Ls(path)
for (content, _) in ls_result:
if pattern.match(content):
_WipeFileOrDir(device, path + content)
def _WipeFileOrDir(device, path):
if device.PathExists(path):
device.RunShellCommand(['rm', '-rf', path], check_return=True)
def _PushAndLaunchAdbReboot(device, target):
"""Pushes and launches the adb_reboot binary on the device.
Arguments:
device: The DeviceUtils instance for the device to which the adb_reboot
binary should be pushed.
target: The build target (example, Debug or Release) which helps in
locating the adb_reboot binary.
"""
logging.info('Will push and launch adb_reboot on %s', str(device))
# Kill if adb_reboot is already running.
device.KillAll('adb_reboot', blocking=True, timeout=2, quiet=True)
# Push adb_reboot
logging.info(' Pushing adb_reboot ...')
adb_reboot = os.path.join(constants.DIR_SOURCE_ROOT,
'out/%s/adb_reboot' % target)
device.PushChangedFiles([(adb_reboot, '/data/local/tmp/')])
# Launch adb_reboot
logging.info(' Launching adb_reboot ...')
device.RunShellCommand(
['/data/local/tmp/adb_reboot'],
check_return=True)
def _LaunchHostHeartbeat():
# Kill if existing host_heartbeat
KillHostHeartbeat()
# Launch a new host_heartbeat
logging.info('Spawning host heartbeat...')
subprocess.Popen([os.path.join(constants.DIR_SOURCE_ROOT,
'build/android/host_heartbeat.py')])
def KillHostHeartbeat():
ps = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
stdout, _ = ps.communicate()
matches = re.findall('\\n.*host_heartbeat.*', stdout)
for match in matches:
logging.info('An instance of host heart beart running... will kill')
pid = re.findall(r'(\S+)', match)[1]
subprocess.call(['kill', str(pid)])
def main():
# Recommended options on perf bots:
# --disable-network
# TODO(tonyg): We eventually want network on. However, currently radios
# can cause perfbots to drain faster than they charge.
# --min-battery-level 95
# Some perf bots run benchmarks with USB charging disabled which leads
# to gradual draining of the battery. We must wait for a full charge
# before starting a run in order to keep the devices online.
parser = argparse.ArgumentParser(
description='Provision Android devices with settings required for bots.')
parser.add_argument('-d', '--device', metavar='SERIAL',
help='the serial number of the device to be provisioned'
' (the default is to provision all devices attached)')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('--phase', action='append', choices=_PHASES.ALL,
dest='phases',
help='Phases of provisioning to run. '
'(If omitted, all phases will be run.)')
parser.add_argument('--skip-wipe', action='store_true', default=False,
help="don't wipe device data during provisioning")
parser.add_argument('--reboot-timeout', metavar='SECS', type=int,
help='when wiping the device, max number of seconds to'
' wait after each reboot '
'(default: %s)' % _DEFAULT_TIMEOUTS.HELP_TEXT)
parser.add_argument('--min-battery-level', type=int, metavar='NUM',
help='wait for the device to reach this minimum battery'
' level before trying to continue')
parser.add_argument('--disable-location', action='store_true',
help='disable Google location services on devices')
parser.add_argument('--disable-mock-location', action='store_true',
default=False, help='Set ALLOW_MOCK_LOCATION to false')
parser.add_argument('--disable-network', action='store_true',
help='disable network access on devices')
parser.add_argument('--disable-java-debug', action='store_false',
dest='enable_java_debug', default=True,
help='disable Java property asserts and JNI checking')
parser.add_argument('--disable-system-chrome', action='store_true',
help='Disable the system chrome from devices.')
parser.add_argument('--remove-system-webview', action='store_true',
help='Remove the system webview from devices.')
parser.add_argument('-t', '--target', default='Debug',
help='the build target (default: %(default)s)')
parser.add_argument('-r', '--auto-reconnect', action='store_true',
help='push binary which will reboot the device on adb'
' disconnections')
parser.add_argument('--adb-key-files', type=str, nargs='+',
help='list of adb keys to push to device')
parser.add_argument('-v', '--verbose', action='count', default=1,
help='Log more information.')
parser.add_argument('--max-battery-temp', type=int, metavar='NUM',
help='Wait for the battery to have this temp or lower.')
parser.add_argument('--output-device-blacklist',
help='Json file to output the device blacklist.')
parser.add_argument('--chrome-specific-wipe', action='store_true',
help='only wipe chrome specific data during provisioning')
args = parser.parse_args()
constants.SetBuildType(args.target)
run_tests_helper.SetLogLevel(args.verbose)
return ProvisionDevices(args)
if __name__ == '__main__':
sys.exit(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.